LUCENE-1257: Replace StringBuffer by StringBuilder where possible

git-svn-id: https://svn.apache.org/repos/asf/lucene/java/trunk@821185 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Uwe Schindler 2009-10-02 22:11:10 +00:00
parent 27b05ac971
commit af0e97fd72
111 changed files with 238 additions and 231 deletions

View File

@ -56,7 +56,7 @@ public class Hyphen implements Serializable {
&& preBreak.equals("-")) {
return "-";
}
StringBuffer res = new StringBuffer("{");
StringBuilder res = new StringBuilder("{");
res.append(preBreak);
res.append("}{");
res.append(postBreak);

View File

@ -91,7 +91,7 @@ public class HyphenationTree extends TernaryTree implements PatternConsumer,
}
protected String unpackValues(int k) {
StringBuffer buf = new StringBuffer();
StringBuilder buf = new StringBuilder();
byte v = vspace.get(k++);
while (v != 0) {
char c = (char) ((v >>> 4) - 1 + '0');
@ -169,7 +169,7 @@ public class HyphenationTree extends TernaryTree implements PatternConsumer,
}
protected byte[] getValues(int k) {
StringBuffer buf = new StringBuffer();
StringBuilder buf = new StringBuilder();
byte v = vspace.get(k++);
while (v != 0) {
char c = (char) ((v >>> 4) - 1);

View File

@ -51,7 +51,7 @@ public class PatternParser extends DefaultHandler implements PatternConsumer {
PatternConsumer consumer;
StringBuffer token;
StringBuilder token;
ArrayList exception;
@ -68,7 +68,7 @@ public class PatternParser extends DefaultHandler implements PatternConsumer {
static final int ELEM_HYPHEN = 4;
public PatternParser() throws HyphenationException {
token = new StringBuffer();
token = new StringBuilder();
parser = createParser();
parser.setContentHandler(this);
parser.setErrorHandler(this);
@ -191,7 +191,7 @@ public class PatternParser extends DefaultHandler implements PatternConsumer {
}
protected static String getPattern(String word) {
StringBuffer pat = new StringBuffer();
StringBuilder pat = new StringBuilder();
int len = word.length();
for (int i = 0; i < len; i++) {
if (!Character.isDigit(word.charAt(i))) {
@ -207,7 +207,7 @@ public class PatternParser extends DefaultHandler implements PatternConsumer {
Object item = ex.get(i);
if (item instanceof String) {
String str = (String) item;
StringBuffer buf = new StringBuffer();
StringBuilder buf = new StringBuilder();
for (int j = 0; j < str.length(); j++) {
char c = str.charAt(j);
if (c != hyphenChar) {
@ -233,7 +233,7 @@ public class PatternParser extends DefaultHandler implements PatternConsumer {
}
protected String getExceptionWord(ArrayList ex) {
StringBuffer res = new StringBuffer();
StringBuilder res = new StringBuilder();
for (int i = 0; i < ex.size(); i++) {
Object item = ex.get(i);
if (item instanceof String) {
@ -248,7 +248,7 @@ public class PatternParser extends DefaultHandler implements PatternConsumer {
}
protected static String getInterletterValues(String pat) {
StringBuffer il = new StringBuffer();
StringBuilder il = new StringBuilder();
String word = pat + "a"; // add dummy letter to serve as sentinel
int len = word.length();
for (int i = 0; i < len; i++) {
@ -399,7 +399,7 @@ public class PatternParser extends DefaultHandler implements PatternConsumer {
* Returns a string of the location.
*/
private String getLocationString(SAXParseException ex) {
StringBuffer str = new StringBuffer();
StringBuilder str = new StringBuilder();
String systemId = ex.getSystemId();
if (systemId != null) {

View File

@ -495,14 +495,14 @@ public class TernaryTree implements Cloneable, Serializable {
Stack ns;
/**
* key stack implemented with a StringBuffer
* key stack implemented with a StringBuilder
*/
StringBuffer ks;
StringBuilder ks;
public Iterator() {
cur = -1;
ns = new Stack();
ks = new StringBuffer();
ks = new StringBuilder();
rewind();
}
@ -619,7 +619,7 @@ public class TernaryTree implements Cloneable, Serializable {
}
// The current node should be a data node and
// the key should be in the key stack (at least partially)
StringBuffer buf = new StringBuffer(ks.toString());
StringBuilder buf = new StringBuilder(ks.toString());
if (sc[cur] == 0xFFFF) {
int p = lo[cur];
while (kv.get(p) != 0) {

View File

@ -33,7 +33,7 @@ public class GermanStemmer
/**
* Buffer for the terms while stemming them.
*/
private StringBuffer sb = new StringBuffer();
private StringBuilder sb = new StringBuilder();
/**
* Amount of characters that are removed with <tt>substitute()</tt> while stemming.
@ -52,7 +52,7 @@ public class GermanStemmer
term = term.toLowerCase();
if ( !isStemmable( term ) )
return term;
// Reset the StringBuffer.
// Reset the StringBuilder.
sb.delete( 0, sb.length() );
sb.insert( 0, term );
// Stemming starts here...
@ -86,7 +86,7 @@ public class GermanStemmer
* discriminators in the most of those cases.
* The algorithm is context free, except of the length restrictions.
*/
private void strip( StringBuffer buffer )
private void strip( StringBuilder buffer )
{
boolean doMore = true;
while ( doMore && buffer.length() > 3 ) {
@ -126,7 +126,7 @@ public class GermanStemmer
* Does some optimizations on the term. This optimisations are
* contextual.
*/
private void optimize( StringBuffer buffer )
private void optimize( StringBuilder buffer )
{
// Additional step for female plurals of professions and inhabitants.
if ( buffer.length() > 5 && buffer.substring( buffer.length() - 5, buffer.length() ).equals( "erin*" ) ) {
@ -142,7 +142,7 @@ public class GermanStemmer
/**
* Removes a particle denotion ("ge") from a term.
*/
private void removeParticleDenotion( StringBuffer buffer )
private void removeParticleDenotion( StringBuilder buffer )
{
if ( buffer.length() > 4 ) {
for ( int c = 0; c < buffer.length() - 3; c++ ) {
@ -164,7 +164,7 @@ public class GermanStemmer
* - Substitute some common character combinations with a token:
* sch/ch/ei/ie/ig/st -> $/§/%/&/#/!
*/
private void substitute( StringBuffer buffer )
private void substitute( StringBuilder buffer )
{
substCount = 0;
for ( int c = 0; c < buffer.length(); c++ ) {
@ -232,7 +232,7 @@ public class GermanStemmer
* character combinations. Umlauts will remain as their corresponding vowel,
* as "ß" remains as "ss".
*/
private void resubstitute( StringBuffer buffer )
private void resubstitute( StringBuilder buffer )
{
for ( int c = 0; c < buffer.length(); c++ ) {
if ( buffer.charAt( c ) == '*' ) {

View File

@ -32,12 +32,12 @@ public class FrenchStemmer {
/**
* Buffer for the terms while stemming them.
*/
private StringBuffer sb = new StringBuffer();
private StringBuilder sb = new StringBuilder();
/**
* A temporary buffer, used to reconstruct R2
*/
private StringBuffer tb = new StringBuffer();
private StringBuilder tb = new StringBuilder();
/**
* Region R0 is equal to the whole buffer
@ -92,7 +92,7 @@ public class FrenchStemmer {
// Use lowercase for medium stemming.
term = term.toLowerCase();
// Reset the StringBuffer.
// Reset the StringBuilder.
sb.delete( 0, sb.length() );
sb.insert( 0, term );
@ -207,7 +207,7 @@ public class FrenchStemmer {
* or changed were done in the amment, emment, ments or ment suffixes<br>
* refer to http://snowball.sourceforge.net/french/stemmer.html for an explanation
*
* @return boolean - true if something changed in the StringBuffer
* @return boolean - true if something changed in the StringBuilder
*/
private boolean step2a() {
String[] search = { "îmes", "îtes", "iraIent", "irait", "irais", "irai", "iras", "ira",
@ -553,10 +553,10 @@ public class FrenchStemmer {
* Retrieve the "R zone" (1 or 2 depending on the buffer) and return the corresponding string<br>
* "R is the region after the first non-vowel following a vowel
* or is the null region at the end of the word if there is no such non-vowel"<br>
* @param buffer java.lang.StringBuffer - the in buffer
* @param buffer java.lang.StringBuilder - the in buffer
* @return java.lang.String - the resulting string
*/
private String retrieveR( StringBuffer buffer ) {
private String retrieveR( StringBuilder buffer ) {
int len = buffer.length();
int pos = -1;
for (int c = 0; c < len; c++) {
@ -590,10 +590,10 @@ public class FrenchStemmer {
* "If the word begins with two vowels, RV is the region after the third letter,
* otherwise the region after the first vowel not at the beginning of the word,
* or the end of the word if these positions cannot be found."<br>
* @param buffer java.lang.StringBuffer - the in buffer
* @param buffer java.lang.StringBuilder - the in buffer
* @return java.lang.String - the resulting string
*/
private String retrieveRV( StringBuffer buffer ) {
private String retrieveRV( StringBuilder buffer ) {
int len = buffer.length();
if ( buffer.length() > 3)
{
@ -627,10 +627,10 @@ public class FrenchStemmer {
* Turns y preceded OR followed by a vowel to UpperCase<br>
* Turns u preceded by q to UpperCase<br>
*
* @param buffer java.util.StringBuffer - the buffer to treat
* @return java.util.StringBuffer - the treated buffer
* @param buffer java.util.StringBuilder - the buffer to treat
* @return java.util.StringBuilder - the treated buffer
*/
private StringBuffer treatVowels( StringBuffer buffer ) {
private StringBuilder treatVowels( StringBuilder buffer ) {
for ( int c = 0; c < buffer.length(); c++ ) {
char ch = buffer.charAt( c );

View File

@ -32,7 +32,7 @@ public class DutchStemmer {
/**
* Buffer for the terms while stemming them.
*/
private StringBuffer sb = new StringBuffer();
private StringBuilder sb = new StringBuilder();
private boolean _removedE;
private Map _stemDict;
@ -56,7 +56,7 @@ public class DutchStemmer {
else
return null;
// Reset the StringBuffer.
// Reset the StringBuilder.
sb.delete(0, sb.length());
sb.insert(0, term);
// Stemming starts here...
@ -74,7 +74,7 @@ public class DutchStemmer {
return sb.toString();
}
private boolean enEnding(StringBuffer sb) {
private boolean enEnding(StringBuilder sb) {
String[] enend = new String[]{"ene", "en"};
for (int i = 0; i < enend.length; i++) {
String end = enend[i];
@ -93,7 +93,7 @@ public class DutchStemmer {
}
private void step1(StringBuffer sb) {
private void step1(StringBuilder sb) {
if (_R1 >= sb.length())
return;
@ -129,7 +129,7 @@ public class DutchStemmer {
*
* @param sb String being stemmed
*/
private void step2(StringBuffer sb) {
private void step2(StringBuilder sb) {
_removedE = false;
if (_R1 >= sb.length())
return;
@ -149,7 +149,7 @@ public class DutchStemmer {
*
* @param sb String being stemmed
*/
private void step3a(StringBuffer sb) {
private void step3a(StringBuilder sb) {
if (_R2 >= sb.length())
return;
String s = sb.toString();
@ -174,7 +174,7 @@ public class DutchStemmer {
*
* @param sb String being stemmed
*/
private void step3b(StringBuffer sb) {
private void step3b(StringBuilder sb) {
if (_R2 >= sb.length())
return;
String s = sb.toString();
@ -229,7 +229,7 @@ public class DutchStemmer {
*
* @param sb String being stemmed
*/
private void step4(StringBuffer sb) {
private void step4(StringBuilder sb) {
if (sb.length() < 4)
return;
String end = sb.substring(sb.length() - 4, sb.length());
@ -262,7 +262,7 @@ public class DutchStemmer {
/**
* Substitute ä, ë, ï, ö, ü, á , é, í, ó, ú
*/
private void substitute(StringBuffer buffer) {
private void substitute(StringBuilder buffer) {
for (int i = 0; i < buffer.length(); i++) {
switch (buffer.charAt(i)) {
case 'ä':
@ -299,22 +299,22 @@ public class DutchStemmer {
}
}
/*private boolean isValidSEnding(StringBuffer sb) {
/*private boolean isValidSEnding(StringBuilder sb) {
return isValidSEnding(sb, sb.length() - 1);
}*/
private boolean isValidSEnding(StringBuffer sb, int index) {
private boolean isValidSEnding(StringBuilder sb, int index) {
char c = sb.charAt(index);
if (isVowel(c) || c == 'j')
return false;
return true;
}
/*private boolean isValidEnEnding(StringBuffer sb) {
/*private boolean isValidEnEnding(StringBuilder sb) {
return isValidEnEnding(sb, sb.length() - 1);
}*/
private boolean isValidEnEnding(StringBuffer sb, int index) {
private boolean isValidEnEnding(StringBuilder sb, int index) {
char c = sb.charAt(index);
if (isVowel(c))
return false;
@ -326,18 +326,18 @@ public class DutchStemmer {
return true;
}
private void unDouble(StringBuffer sb) {
private void unDouble(StringBuilder sb) {
unDouble(sb, sb.length());
}
private void unDouble(StringBuffer sb, int endIndex) {
private void unDouble(StringBuilder sb, int endIndex) {
String s = sb.substring(0, endIndex);
if (s.endsWith("kk") || s.endsWith("tt") || s.endsWith("dd") || s.endsWith("nn") || s.endsWith("mm") || s.endsWith("ff")) {
sb.delete(endIndex - 1, endIndex);
}
}
private int getRIndex(StringBuffer sb, int start) {
private int getRIndex(StringBuilder sb, int start) {
if (start == 0)
start = 1;
int i = start;
@ -350,7 +350,7 @@ public class DutchStemmer {
return i + 1;
}
private void storeYandI(StringBuffer sb) {
private void storeYandI(StringBuilder sb) {
if (sb.charAt(0) == 'y')
sb.setCharAt(0, 'Y');
@ -378,7 +378,7 @@ public class DutchStemmer {
sb.setCharAt(last, 'Y');
}
private void reStoreYandI(StringBuffer sb) {
private void reStoreYandI(StringBuilder sb) {
String tmp = sb.toString();
sb.delete(0, sb.length());
sb.insert(0, tmp.replaceAll("I", "i").replaceAll("Y", "y"));

View File

@ -255,9 +255,9 @@ class RussianStemmer
* Adjectival ending is an adjective ending,
* optionally preceded by participle ending.
* Creation date: (17/03/2002 12:14:58 AM)
* @param stemmingZone java.lang.StringBuffer
* @param stemmingZone java.lang.StringBuilder
*/
private boolean adjectival(StringBuffer stemmingZone)
private boolean adjectival(StringBuilder stemmingZone)
{
// look for adjective ending in a stemming zone
if (!findAndRemoveEnding(stemmingZone, adjectiveEndings))
@ -275,9 +275,9 @@ class RussianStemmer
/**
* Derivational endings
* Creation date: (17/03/2002 12:14:58 AM)
* @param stemmingZone java.lang.StringBuffer
* @param stemmingZone java.lang.StringBuilder
*/
private boolean derivational(StringBuffer stemmingZone)
private boolean derivational(StringBuilder stemmingZone)
{
int endingLength = findEnding(stemmingZone, derivationalEndings);
if (endingLength == 0)
@ -302,7 +302,7 @@ class RussianStemmer
* Finds ending among given ending class and returns the length of ending found(0, if not found).
* Creation date: (17/03/2002 8:18:34 PM)
*/
private int findEnding(StringBuffer stemmingZone, int startIndex, char[][] theEndingClass)
private int findEnding(StringBuilder stemmingZone, int startIndex, char[][] theEndingClass)
{
boolean match = false;
for (int i = theEndingClass.length - 1; i >= 0; i--)
@ -333,7 +333,7 @@ class RussianStemmer
return 0;
}
private int findEnding(StringBuffer stemmingZone, char[][] theEndingClass)
private int findEnding(StringBuilder stemmingZone, char[][] theEndingClass)
{
return findEnding(stemmingZone, stemmingZone.length() - 1, theEndingClass);
}
@ -342,7 +342,7 @@ class RussianStemmer
* Finds the ending among the given class of endings and removes it from stemming zone.
* Creation date: (17/03/2002 8:18:34 PM)
*/
private boolean findAndRemoveEnding(StringBuffer stemmingZone, char[][] theEndingClass)
private boolean findAndRemoveEnding(StringBuilder stemmingZone, char[][] theEndingClass)
{
int endingLength = findEnding(stemmingZone, theEndingClass);
if (endingLength == 0)
@ -360,7 +360,7 @@ class RussianStemmer
* preceded by any of given predecessors, and if so, removes it from stemming zone.
* Creation date: (17/03/2002 8:18:34 PM)
*/
private boolean findAndRemoveEnding(StringBuffer stemmingZone,
private boolean findAndRemoveEnding(StringBuilder stemmingZone,
char[][] theEndingClass, char[][] thePredessors)
{
int endingLength = findEnding(stemmingZone, theEndingClass);
@ -445,9 +445,9 @@ class RussianStemmer
/**
* Noun endings.
* Creation date: (17/03/2002 12:14:58 AM)
* @param stemmingZone java.lang.StringBuffer
* @param stemmingZone java.lang.StringBuilder
*/
private boolean noun(StringBuffer stemmingZone)
private boolean noun(StringBuilder stemmingZone)
{
return findAndRemoveEnding(stemmingZone, nounEndings);
}
@ -455,9 +455,9 @@ class RussianStemmer
/**
* Perfective gerund endings.
* Creation date: (17/03/2002 12:14:58 AM)
* @param stemmingZone java.lang.StringBuffer
* @param stemmingZone java.lang.StringBuilder
*/
private boolean perfectiveGerund(StringBuffer stemmingZone)
private boolean perfectiveGerund(StringBuilder stemmingZone)
{
return findAndRemoveEnding(
stemmingZone,
@ -469,9 +469,9 @@ class RussianStemmer
/**
* Reflexive endings.
* Creation date: (17/03/2002 12:14:58 AM)
* @param stemmingZone java.lang.StringBuffer
* @param stemmingZone java.lang.StringBuilder
*/
private boolean reflexive(StringBuffer stemmingZone)
private boolean reflexive(StringBuilder stemmingZone)
{
return findAndRemoveEnding(stemmingZone, reflexiveEndings);
}
@ -479,9 +479,9 @@ class RussianStemmer
/**
* Insert the method's description here.
* Creation date: (17/03/2002 12:14:58 AM)
* @param stemmingZone java.lang.StringBuffer
* @param stemmingZone java.lang.StringBuilder
*/
private boolean removeI(StringBuffer stemmingZone)
private boolean removeI(StringBuilder stemmingZone)
{
if (stemmingZone.length() > 0
&& stemmingZone.charAt(stemmingZone.length() - 1) == I)
@ -498,9 +498,9 @@ class RussianStemmer
/**
* Insert the method's description here.
* Creation date: (17/03/2002 12:14:58 AM)
* @param stemmingZone java.lang.StringBuffer
* @param stemmingZone java.lang.StringBuilder
*/
private boolean removeSoft(StringBuffer stemmingZone)
private boolean removeSoft(StringBuilder stemmingZone)
{
if (stemmingZone.length() > 0
&& stemmingZone.charAt(stemmingZone.length() - 1) == SOFT)
@ -525,7 +525,7 @@ class RussianStemmer
markPositions(input);
if (RV == 0)
return input; //RV wasn't detected, nothing to stem
StringBuffer stemmingZone = new StringBuffer(input.substring(RV));
StringBuilder stemmingZone = new StringBuilder(input.substring(RV));
// stemming goes on in RV
// Step 1
@ -555,9 +555,9 @@ class RussianStemmer
/**
* Superlative endings.
* Creation date: (17/03/2002 12:14:58 AM)
* @param stemmingZone java.lang.StringBuffer
* @param stemmingZone java.lang.StringBuilder
*/
private boolean superlative(StringBuffer stemmingZone)
private boolean superlative(StringBuilder stemmingZone)
{
return findAndRemoveEnding(stemmingZone, superlativeEndings);
}
@ -565,9 +565,9 @@ class RussianStemmer
/**
* Undoubles N.
* Creation date: (17/03/2002 12:14:58 AM)
* @param stemmingZone java.lang.StringBuffer
* @param stemmingZone java.lang.StringBuilder
*/
private boolean undoubleN(StringBuffer stemmingZone)
private boolean undoubleN(StringBuilder stemmingZone)
{
char[][] doubleN = {
{ N, N }
@ -586,9 +586,9 @@ class RussianStemmer
/**
* Verb endings.
* Creation date: (17/03/2002 12:14:58 AM)
* @param stemmingZone java.lang.StringBuffer
* @param stemmingZone java.lang.StringBuilder
*/
private boolean verb(StringBuffer stemmingZone)
private boolean verb(StringBuilder stemmingZone)
{
return findAndRemoveEnding(
stemmingZone,

View File

@ -44,7 +44,7 @@ import org.apache.lucene.util.AttributeSource;
public class ShingleFilter extends TokenFilter {
private LinkedList shingleBuf = new LinkedList();
private StringBuffer[] shingles;
private StringBuilder[] shingles;
private String tokenType = "shingle";
/**
@ -140,15 +140,15 @@ public class ShingleFilter extends TokenFilter {
if (maxShingleSize < 2) {
throw new IllegalArgumentException("Max shingle size must be >= 2");
}
shingles = new StringBuffer[maxShingleSize];
shingles = new StringBuilder[maxShingleSize];
for (int i = 0; i < shingles.length; i++) {
shingles[i] = new StringBuffer();
shingles[i] = new StringBuilder();
}
this.maxShingleSize = maxShingleSize;
}
/**
* Clear the StringBuffers that are used for storing the output shingles.
* Clear the StringBuilders that are used for storing the output shingles.
*/
private void clearShingles() {
for (int i = 0; i < shingles.length; i++) {
@ -188,7 +188,7 @@ public class ShingleFilter extends TokenFilter {
restoreState(nextToken);
typeAtt.setType(tokenType);
offsetAtt.setOffset(offsetAtt.startOffset(), endOffsets[shingleBufferPosition]);
StringBuffer buf = shingles[shingleBufferPosition];
StringBuilder buf = shingles[shingleBufferPosition];
int termLength = buf.length();
char[] termBuffer = termAtt.termBuffer();
if (termBuffer.length < termLength)

View File

@ -451,7 +451,7 @@ public class ShingleMatrixFilter extends TokenStream {
}
// shingle token factory
StringBuffer sb = new StringBuffer(termLength + 10); // paranormal ability to foresee the future.
StringBuilder sb = new StringBuilder(termLength + 10); // paranormal ability to foresee the future.
for (Iterator iterator = shingle.iterator(); iterator.hasNext();) {
Token shingleToken = (Token) iterator.next();
if (spacerCharacter != null && sb.length() > 0) {

View File

@ -44,7 +44,7 @@ public final class SentenceTokenizer extends Tokenizer {
*/
private final static String PUNCTION = "。,!?;,!?;";
private final StringBuffer buffer = new StringBuffer();
private final StringBuilder buffer = new StringBuilder();
private int tokenStart = 0, tokenEnd = 0;

View File

@ -225,7 +225,7 @@ class BiSegGraph {
}
public String toString() {
StringBuffer sb = new StringBuffer();
StringBuilder sb = new StringBuilder();
Collection values = tokenPairListTable.values();
for (Iterator iter1 = values.iterator(); iter1.hasNext();) {
List segList = (List) iter1.next();

View File

@ -47,7 +47,7 @@ public class HHMMSegmenter {
int length = sentence.length();
int foundIndex;
int[] charTypeArray = getCharTypes(sentence);
StringBuffer wordBuf = new StringBuffer();
StringBuilder wordBuf = new StringBuilder();
SegToken token;
int frequency = 0; // word的出现次数
boolean hasFullWidth;

View File

@ -141,7 +141,7 @@ class SegGraph {
public String toString() {
List tokenList = this.toTokenList();
StringBuffer sb = new StringBuffer();
StringBuilder sb = new StringBuilder();
for (Iterator iter = tokenList.iterator(); iter.hasNext();) {
SegToken t = (SegToken) iter.next();
sb.append(t + "\n");

View File

@ -275,7 +275,7 @@ public class HtmlDocument {
*/
private String getBodyText(Node node) {
NodeList nl = node.getChildNodes();
StringBuffer buffer = new StringBuffer();
StringBuilder buffer = new StringBuilder();
for (int i = 0; i < nl.getLength(); i++) {
Node child = nl.item(i);
switch (child.getNodeType()) {

View File

@ -292,7 +292,7 @@ public class CollationTestBase extends TestCase {
private void assertMatches(Searcher searcher, Query query, Sort sort,
String expectedResult) throws IOException {
ScoreDoc[] result = searcher.search(query, null, 1000, sort).scoreDocs;
StringBuffer buff = new StringBuffer(10);
StringBuilder buff = new StringBuilder(10);
int n = result.length;
for (int i = 0 ; i < n ; ++i) {
Document doc = searcher.doc(result[i].doc);

View File

@ -123,7 +123,7 @@ public class GradientFormatter implements Formatter
{
return originalText;
}
StringBuffer sb = new StringBuffer();
StringBuilder sb = new StringBuilder();
sb.append("<font ");
if (highlightForeground)
{
@ -148,7 +148,7 @@ public class GradientFormatter implements Formatter
int rVal = getColorVal(fgRMin, fgRMax, score);
int gVal = getColorVal(fgGMin, fgGMax, score);
int bVal = getColorVal(fgBMin, fgBMax, score);
StringBuffer sb = new StringBuffer();
StringBuilder sb = new StringBuilder();
sb.append("#");
sb.append(intToHex(rVal));
sb.append(intToHex(gVal));
@ -161,7 +161,7 @@ public class GradientFormatter implements Formatter
int rVal = getColorVal(bgRMin, bgRMax, score);
int gVal = getColorVal(bgGMin, bgGMax, score);
int bVal = getColorVal(bgBMin, bgBMax, score);
StringBuffer sb = new StringBuffer();
StringBuilder sb = new StringBuilder();
sb.append("#");
sb.append(intToHex(rVal));
sb.append(intToHex(gVal));

View File

@ -215,7 +215,7 @@ public class Highlighter
throws IOException, InvalidTokenOffsetsException
{
ArrayList docFrags = new ArrayList();
StringBuffer newText=new StringBuffer();
StringBuilder newText=new StringBuilder();
TermAttribute termAtt = tokenStream.addAttribute(TermAttribute.class);
OffsetAttribute offsetAtt = tokenStream.addAttribute(OffsetAttribute.class);
@ -486,7 +486,7 @@ public class Highlighter
throws IOException, InvalidTokenOffsetsException
{
String sections[] = getBestFragments(tokenStream,text, maxNumFragments);
StringBuffer result = new StringBuffer();
StringBuilder result = new StringBuilder();
for (int i = 0; i < sections.length; i++)
{
if (i > 0)

View File

@ -40,7 +40,7 @@ public class SimpleHTMLEncoder implements Encoder
return "";
}
StringBuffer result = new StringBuffer(plainText.length());
StringBuilder result = new StringBuilder(plainText.length());
for (int index=0; index<plainText.length(); index++)
{

View File

@ -47,9 +47,9 @@ public class SimpleHTMLFormatter implements Formatter {
return originalText;
}
// Allocate StringBuffer with the right number of characters from the
// Allocate StringBuilder with the right number of characters from the
// beginning, to avoid char[] allocations in the middle of appends.
StringBuffer returnBuffer = new StringBuffer(preTag.length() + originalText.length() + postTag.length());
StringBuilder returnBuffer = new StringBuilder(preTag.length() + originalText.length() + postTag.length());
returnBuffer.append(preTag);
returnBuffer.append(originalText);
returnBuffer.append(postTag);

View File

@ -50,7 +50,7 @@ public class SpanGradientFormatter
}
// try to size sb correctly
StringBuffer sb = new StringBuffer( originalText.length() + EXTRA);
StringBuilder sb = new StringBuilder( originalText.length() + EXTRA);
sb.append("<span style=\"");
if (highlightForeground)
@ -71,7 +71,7 @@ public class SpanGradientFormatter
return sb.toString();
}
// guess how much extra text we'll add to the text we're highlighting to try to avoid a StringBuffer resize
// guess how much extra text we'll add to the text we're highlighting to try to avoid a StringBuilder resize
private static final String TEMPLATE = "<span style=\"background: #EEEEEE; color: #000000;\">...</span>";
private static final int EXTRA = TEMPLATE.length();
}

View File

@ -25,12 +25,19 @@ package org.apache.lucene.search.highlight;
*/
public class TextFragment
{
StringBuffer markedUpText;
CharSequence markedUpText;
int fragNum;
int textStartPos;
int textEndPos;
float score;
public TextFragment(CharSequence markedUpText,int textStartPos, int fragNum)
{
this.markedUpText=markedUpText;
this.textStartPos = textStartPos;
this.fragNum = fragNum;
}
/** @deprecated */
public TextFragment(StringBuffer markedUpText,int textStartPos, int fragNum)
{
this.markedUpText=markedUpText;
@ -73,7 +80,7 @@ public class TextFragment
/* Returns the marked-up text for this text fragment
*/
public String toString() {
return markedUpText.substring(textStartPos, textEndPos);
return markedUpText.subSequence(textStartPos, textEndPos).toString();
}
}

View File

@ -1034,7 +1034,7 @@ public class HighlighterTest extends BaseTokenStreamTestCase implements Formatte
TermQuery query = new TermQuery(new Term("data", goodWord));
String match = null;
StringBuffer sb = new StringBuffer();
StringBuilder sb = new StringBuilder();
sb.append(goodWord);
for (int i = 0; i < 10000; i++) {
sb.append(" ");

View File

@ -58,7 +58,7 @@ public class InstantiatedTermFreqVector
}
public String toString() {
StringBuffer sb = new StringBuffer();
StringBuilder sb = new StringBuilder();
sb.append('{');
sb.append(field).append(": ");
if (terms != null) {

View File

@ -507,7 +507,7 @@ public class MemoryIndex implements Serializable {
* @return the string representation
*/
public String toString() {
StringBuffer result = new StringBuffer(256);
StringBuilder result = new StringBuilder(256);
sortFields();
int sumChars = 0;
int sumPositions = 0;
@ -688,7 +688,7 @@ public class MemoryIndex implements Serializable {
public String toString(int stride) {
int s = size() / stride;
int len = Math.min(10, s); // avoid printing huge lists
StringBuffer buf = new StringBuffer(4*len);
StringBuilder buf = new StringBuilder(4*len);
buf.append("[");
for (int i = 0; i < len; i++) {
buf.append(get(i*stride));

View File

@ -122,7 +122,7 @@ public class SynonymMap {
* @return a String representation
*/
public String toString() {
StringBuffer buf = new StringBuffer();
StringBuilder buf = new StringBuilder();
Iterator iter = new TreeMap(table).keySet().iterator();
int count = 0;
int f0 = 0;
@ -327,7 +327,7 @@ public class SynonymMap {
}
// assemble large string containing all words
StringBuffer buf = new StringBuffer(len);
StringBuilder buf = new StringBuilder(len);
for (int j=0; j < size; j++) buf.append(words[j]);
String allWords = new String(buf.toString()); // ensure compact string across JDK versions
buf = null;

View File

@ -201,7 +201,7 @@ public class ChainedFilter extends Filter
public String toString()
{
StringBuffer sb = new StringBuffer();
StringBuilder sb = new StringBuilder();
sb.append("ChainedFilter: [");
for (int i = 0; i < chain.length; i++)
{

View File

@ -80,7 +80,7 @@ public class AnalyzingQueryParser extends org.apache.lucene.queryParser.QueryPar
/* somewhat a hack: find/store wildcard chars
* in order to put them back after analyzing */
boolean isWithinToken = (!termStr.startsWith("?") && !termStr.startsWith("*"));
StringBuffer tmpBuffer = new StringBuffer();
StringBuilder tmpBuffer = new StringBuilder();
char[] chars = termStr.toCharArray();
for (int i = 0; i < termStr.length(); i++) {
if (chars[i] == '?' || chars[i] == '*') {
@ -154,7 +154,7 @@ public class AnalyzingQueryParser extends org.apache.lucene.queryParser.QueryPar
} else {
/* the term was tokenized, let's rebuild to one token
* with wildcards put back in postion */
StringBuffer sb = new StringBuffer();
StringBuilder sb = new StringBuilder();
for (int i = 0; i < tlist.size(); i++) {
sb.append((String) tlist.get(i));
if (wlist != null && wlist.size() > i) {

View File

@ -182,7 +182,7 @@ public class BooleanFilter extends Filter
/** Prints a user-readable version of this query. */
public String toString()
{
StringBuffer buffer = new StringBuffer();
StringBuilder buffer = new StringBuilder();
buffer.append("BooleanFilter(");
appendFilters(shouldFilters, "", buffer);
appendFilters(mustFilters, "+", buffer);
@ -191,7 +191,7 @@ public class BooleanFilter extends Filter
return buffer.toString();
}
private void appendFilters(ArrayList filters, String occurString, StringBuffer buffer)
private void appendFilters(ArrayList filters, String occurString, StringBuilder buffer)
{
if (filters != null) {
for (int i = 0; i < filters.size(); i++) {

View File

@ -681,7 +681,7 @@ public final class MoreLikeThis {
* Describe the parameters that control how the "more like this" query is formed.
*/
public String describeParams() {
StringBuffer sb = new StringBuffer();
StringBuilder sb = new StringBuilder();
sb.append("\t" + "maxQueryTerms : " + maxQueryTerms + "\n");
sb.append("\t" + "minWordLen : " + minWordLen + "\n");
sb.append("\t" + "maxWordLen : " + maxWordLen + "\n");

View File

@ -171,7 +171,7 @@ public class PathQueryNode extends QueryNodeImpl {
}
private CharSequence getPathString() {
StringBuffer path = new StringBuffer();
StringBuilder path = new StringBuilder();
for (QueryText pathelement : values) {
path.append("/").append(pathelement.value);
@ -180,7 +180,7 @@ public class PathQueryNode extends QueryNodeImpl {
}
public CharSequence toQueryString(EscapeQuerySyntax escaper) {
StringBuffer path = new StringBuffer();
StringBuilder path = new StringBuilder();
path.append("/").append(getFirstPathElement());
for (QueryText pathelement : getPathElements(1)) {

View File

@ -96,7 +96,7 @@ public final class UnescapedCharSequence implements CharSequence {
*/
public String toStringEscaped() {
// non efficient implementation
StringBuffer result = new StringBuffer();
StringBuilder result = new StringBuilder();
for (int i = 0; i >= this.length(); i++) {
if (this.chars[i] == '\\') {
result.append('\\');
@ -117,7 +117,7 @@ public final class UnescapedCharSequence implements CharSequence {
*/
public String toStringEscaped(char[] enabledChars) {
// TODO: non efficient implementation, refactor this code
StringBuffer result = new StringBuffer();
StringBuilder result = new StringBuilder();
for (int i = 0; i < this.length(); i++) {
if (this.chars[i] == '\\') {
result.append('\\');

View File

@ -188,7 +188,7 @@ final public class QueryParserUtil {
* escaped are escaped by a preceding <code>\</code>.
*/
public static String escape(String s) {
StringBuffer sb = new StringBuffer();
StringBuilder sb = new StringBuilder();
for (int i = 0; i < s.length(); i++) {
char c = s.charAt(i);
// These characters are part of the query syntax and must be escaped

View File

@ -95,7 +95,7 @@ public class QueryParserWrapper {
* escaped are escaped by a preceding <code>\</code>.
*/
public static String escape(String s) {
StringBuffer sb = new StringBuffer();
StringBuilder sb = new StringBuilder();
for (int i = 0; i < s.length(); i++) {
char c = s.charAt(i);
// These characters are part of the query syntax and must be escaped

View File

@ -107,7 +107,7 @@ public class SpanRegexQuery extends SpanQuery implements RegexQueryCapable {
}
public String toString(String field) {
StringBuffer buffer = new StringBuffer();
StringBuilder buffer = new StringBuilder();
buffer.append("spanRegexQuery(");
buffer.append(term);
buffer.append(")");

View File

@ -146,7 +146,7 @@ public class TestRemoteSort extends LuceneTestCase implements Serializable {
}
public String getRandomNumberString(int num, int low, int high) {
StringBuffer sb = new StringBuffer();
StringBuilder sb = new StringBuilder();
for (int i = 0; i < num; i++) {
sb.append(getRandomNumber(low, high));
}
@ -158,7 +158,7 @@ public class TestRemoteSort extends LuceneTestCase implements Serializable {
}
public String getRandomCharString(int num, int start, int end) {
StringBuffer sb = new StringBuffer();
StringBuilder sb = new StringBuilder();
for (int i = 0; i < num; i++) {
sb.append(new Character((char) getRandomNumber(start, end)));
}
@ -394,7 +394,7 @@ public class TestRemoteSort extends LuceneTestCase implements Serializable {
TopDocs hits = searcher.search (query, null, expectedResult.length(), sort);
ScoreDoc[] result = hits.scoreDocs;
assertEquals(hits.totalHits, expectedResult.length());
StringBuffer buff = new StringBuffer(10);
StringBuilder buff = new StringBuilder(10);
int n = result.length;
for (int i=0; i<n; ++i) {
Document doc = searcher.doc(result[i].doc);

View File

@ -236,7 +236,7 @@ public class WikipediaTokenizer extends Tokenizer {
private void collapseAndSaveTokens(int tokenType, String type) throws IOException {
//collapse
StringBuffer buffer = new StringBuffer(32);
StringBuilder buffer = new StringBuilder(32);
int numAdded = scanner.setText(buffer);
//TODO: how to know how much whitespace to add
int theStart = scanner.yychar();
@ -279,7 +279,7 @@ public class WikipediaTokenizer extends Tokenizer {
private void collapseTokens(int tokenType) throws IOException {
//collapse
StringBuffer buffer = new StringBuffer(32);
StringBuilder buffer = new StringBuilder(32);
int numAdded = scanner.setText(buffer);
//TODO: how to know how much whitespace to add
int theStart = scanner.yychar();

View File

@ -480,7 +480,7 @@ final void getText(TermAttribute t) {
t.setTermBuffer(zzBuffer, zzStartRead, zzMarkedPos-zzStartRead);
}
final int setText(StringBuffer buffer){
final int setText(StringBuilder buffer){
int length = zzMarkedPos - zzStartRead;
buffer.append(zzBuffer, zzStartRead, length);
return length;

View File

@ -85,7 +85,7 @@ final void getText(Token t) {
t.setTermBuffer(zzBuffer, zzStartRead, zzMarkedPos-zzStartRead);
}
final int setText(StringBuffer buffer){
final int setText(StringBuilder buffer){
int length = zzMarkedPos - zzStartRead;
buffer.append(zzBuffer, zzStartRead, length);
return length;

View File

@ -197,7 +197,7 @@ public class DOMUtils
//MH changed to Node from Element 25/11/2005
public static String getText(Node e)
{
StringBuffer sb=new StringBuffer();
StringBuilder sb=new StringBuilder();
getTextBuffer(e, sb);
return sb.toString();
}
@ -215,7 +215,7 @@ public class DOMUtils
return null;
}
private static void getTextBuffer(Node e, StringBuffer sb)
private static void getTextBuffer(Node e, StringBuilder sb)
{
for (Node kid = e.getFirstChild(); kid != null; kid = kid.getNextSibling())
{

View File

@ -80,7 +80,7 @@ public abstract class BaseCharFilter extends CharFilter {
}
public String toString() {
StringBuffer sb = new StringBuffer();
StringBuilder sb = new StringBuilder();
sb.append('(');
sb.append(off);
sb.append(',');

View File

@ -235,7 +235,7 @@ public final class NumericTokenStream extends TokenStream {
@Override
public String toString() {
final StringBuffer sb = new StringBuffer("(numeric,valSize=").append(valSize);
final StringBuilder sb = new StringBuilder("(numeric,valSize=").append(valSize);
sb.append(",precisionStep=").append(precisionStep).append(')');
return sb.toString();
}

View File

@ -607,7 +607,7 @@ public class Token extends AttributeImpl
}
public String toString() {
StringBuffer sb = new StringBuffer();
StringBuilder sb = new StringBuilder();
sb.append('(');
initTermBuffer();
if (termBuffer == null)

View File

@ -305,7 +305,7 @@ public abstract class AbstractField implements Fieldable {
/** Prints a Field for human consumption. */
public final String toString() {
StringBuffer result = new StringBuffer();
StringBuilder result = new StringBuilder();
if (isStored) {
result.append("stored");
if (isCompressed)

View File

@ -102,7 +102,7 @@ public class DateField {
// Pad with leading zeros
if (s.length() < DATE_LEN) {
StringBuffer sb = new StringBuffer(s);
StringBuilder sb = new StringBuilder(s);
while (sb.length() < DATE_LEN)
sb.insert(0, 0);
s = sb.toString();

View File

@ -317,7 +317,7 @@ public final class Document implements java.io.Serializable {
/** Prints the fields of a document for human consumption. */
public final String toString() {
StringBuffer buffer = new StringBuffer();
StringBuilder buffer = new StringBuilder();
buffer.append("Document<");
for (int i = 0; i < fields.size(); i++) {
Fieldable field = (Fieldable)fields.get(i);

View File

@ -81,7 +81,7 @@ public class NumberTools {
return MIN_STRING_VALUE;
}
StringBuffer buf = new StringBuffer(STR_SIZE);
StringBuilder buf = new StringBuilder(STR_SIZE);
if (l < 0) {
buf.append(NEGATIVE_PREFIX);

View File

@ -5220,7 +5220,7 @@ public class IndexWriter {
}
private synchronized String segString(SegmentInfos infos) {
StringBuffer buffer = new StringBuffer();
StringBuilder buffer = new StringBuilder();
final int count = infos.size();
for(int i = 0; i < count; i++) {
if (i > 0) {

View File

@ -123,7 +123,7 @@ public abstract class MergePolicy {
}
String segString(Directory dir) {
StringBuffer b = new StringBuffer();
StringBuilder b = new StringBuilder();
final int numSegments = segments.size();
for(int i=0;i<numSegments;i++) {
if (i > 0) b.append(' ');
@ -159,7 +159,7 @@ public abstract class MergePolicy {
}
public String segString(Directory dir) {
StringBuffer b = new StringBuffer();
StringBuilder b = new StringBuilder();
b.append("MergeSpec:\n");
final int count = merges.size();
for(int i=0;i<count;i++)

View File

@ -895,7 +895,7 @@ public final class SegmentInfos extends Vector {
}
public synchronized String segString(Directory directory) {
StringBuffer buffer = new StringBuffer();
StringBuilder buffer = new StringBuilder();
final int count = size();
for(int i = 0; i < count; i++) {
if (i > 0) {

View File

@ -40,7 +40,7 @@ class SegmentTermVector implements TermFreqVector {
}
public String toString() {
StringBuffer sb = new StringBuffer();
StringBuilder sb = new StringBuilder();
sb.append('{');
sb.append(field).append(": ");
if(terms != null){

View File

@ -1131,7 +1131,7 @@ public class QueryParser implements QueryParserConstants {
* expects to be escaped are escaped by a preceding <code>\</code>.
*/
public static String escape(String s) {
StringBuffer sb = new StringBuffer();
StringBuilder sb = new StringBuilder();
for (int i = 0; i < s.length(); i++) {
char c = s.charAt(i);
// These characters are part of the query syntax and must be escaped

View File

@ -1155,7 +1155,7 @@ public class QueryParser {
* expects to be escaped are escaped by a preceding <code>\</code>.
*/
public static String escape(String s) {
StringBuffer sb = new StringBuffer();
StringBuilder sb = new StringBuilder();
for (int i = 0; i < s.length(); i++) {
char c = s.charAt(i);
// These characters are part of the query syntax and must be escaped

View File

@ -464,7 +464,7 @@ public class BooleanQuery extends Query {
/** Prints a user-readable version of this query. */
public String toString(String field) {
StringBuffer buffer = new StringBuffer();
StringBuilder buffer = new StringBuilder();
boolean needParens=(getBoost() != 1.0) || (getMinimumNumberShouldMatch()>0) ;
if (needParens) {
buffer.append("(");

View File

@ -357,7 +357,7 @@ final class BooleanScorer extends Scorer {
}
public String toString() {
StringBuffer buffer = new StringBuffer();
StringBuilder buffer = new StringBuilder();
buffer.append("boolean(");
for (SubScorer sub = scorers; sub != null; sub = sub.next) {
buffer.append(sub.scorer.toString());

View File

@ -224,7 +224,7 @@ public class DisjunctionMaxQuery extends Query {
* @return a string that shows what we do, of the form "(disjunct1 | disjunct2 | ... | disjunctn)^boost"
*/
public String toString(String field) {
StringBuffer buffer = new StringBuffer();
StringBuilder buffer = new StringBuilder();
buffer.append("(");
int numDisjunctions = disjuncts.size();
for (int i = 0 ; i < numDisjunctions; i++) {

View File

@ -86,7 +86,7 @@ public class Explanation implements java.io.Serializable {
return toString(0);
}
protected String toString(int depth) {
StringBuffer buffer = new StringBuffer();
StringBuilder buffer = new StringBuilder();
for (int i = 0; i < depth; i++) {
buffer.append(" ");
}
@ -106,7 +106,7 @@ public class Explanation implements java.io.Serializable {
/** Render an explanation as HTML. */
public String toHtml() {
StringBuffer buffer = new StringBuffer();
StringBuilder buffer = new StringBuilder();
buffer.append("<ul>\n");
buffer.append("<li>");

View File

@ -563,7 +563,7 @@ public interface FieldCache {
public String toString() {
StringBuffer b = new StringBuffer();
StringBuilder b = new StringBuilder();
b.append("'").append(getReaderKey()).append("'=>");
b.append("'").append(getFieldName()).append("',");
b.append(getCacheType()).append(",").append(getCustom());

View File

@ -431,7 +431,7 @@ public abstract class FieldCacheRangeFilter extends Filter {
}
public final String toString() {
final StringBuffer sb = new StringBuffer(field).append(":");
final StringBuilder sb = new StringBuilder(field).append(":");
return sb.append(includeLower ? '[' : '{')
.append((lowerVal == null) ? "*" : lowerVal.toString())
.append(" TO ")

View File

@ -63,7 +63,7 @@ public class FieldDoc extends ScoreDoc {
public String toString() {
// super.toString returns the doc and score information, so just add the
// fields information
StringBuffer sb = new StringBuffer(super.toString());
StringBuilder sb = new StringBuilder(super.toString());
sb.append("[");
for (int i = 0; i < fields.length; i++) {
sb.append(fields[i]).append(", ");

View File

@ -206,7 +206,7 @@ extends Query {
/** Prints a user-readable version of this query. */
public String toString (String s) {
StringBuffer buffer = new StringBuffer();
StringBuilder buffer = new StringBuilder();
buffer.append("filtered(");
buffer.append(query.toString(s));
buffer.append(")->");

View File

@ -173,7 +173,7 @@ public class FuzzyQuery extends MultiTermQuery {
}
public String toString(String field) {
StringBuffer buffer = new StringBuffer();
StringBuilder buffer = new StringBuilder();
if (!term.field().equals(field)) {
buffer.append(term.field());
buffer.append(":");

View File

@ -113,7 +113,7 @@ public class Hit implements java.io.Serializable {
* Prints the parameters to be used to discover the promised result.
*/
public String toString() {
StringBuffer buffer = new StringBuffer();
StringBuilder buffer = new StringBuilder();
buffer.append("Hit<");
buffer.append(hits.toString());
buffer.append(" [");

View File

@ -150,7 +150,7 @@ public class MatchAllDocsQuery extends Query {
}
public String toString(String field) {
StringBuffer buffer = new StringBuffer();
StringBuilder buffer = new StringBuilder();
buffer.append("*:*");
buffer.append(ToStringUtils.boost(getBoost()));
return buffer.toString();

View File

@ -271,7 +271,7 @@ public class MultiPhraseQuery extends Query {
/** Prints a user-readable version of this query. */
public final String toString(String f) {
StringBuffer buffer = new StringBuffer();
StringBuilder buffer = new StringBuilder();
if (!field.equals(f)) {
buffer.append(field);
buffer.append(":");

View File

@ -393,7 +393,7 @@ public abstract class MultiTermQuery extends Query {
*/
@Override
public String toString(String field) {
StringBuffer buffer = new StringBuffer();
StringBuilder buffer = new StringBuilder();
if (term != null) {
if (!term.field().equals(field)) {
buffer.append(term.field());

View File

@ -317,7 +317,7 @@ public final class NumericRangeQuery<T extends Number> extends MultiTermQuery {
@Override
public String toString(final String field) {
final StringBuffer sb = new StringBuffer();
final StringBuilder sb = new StringBuilder();
if (!this.field.equals(field)) sb.append(this.field).append(':');
return sb.append(minInclusive ? '[' : '{')
.append((min == null) ? "*" : min.toString())

View File

@ -167,8 +167,8 @@ public class PhraseQuery extends Query {
Explanation result = new Explanation();
result.setDescription("weight("+getQuery()+" in "+doc+"), product of:");
StringBuffer docFreqs = new StringBuffer();
StringBuffer query = new StringBuffer();
StringBuilder docFreqs = new StringBuilder();
StringBuilder query = new StringBuilder();
query.append('\"');
docFreqs.append(idfExp.explain());
for (int i = 0; i < terms.size(); i++) {
@ -259,7 +259,7 @@ public class PhraseQuery extends Query {
/** Prints a user-readable version of this query. */
public String toString(String f) {
StringBuffer buffer = new StringBuffer();
StringBuilder buffer = new StringBuilder();
if (field != null && !field.equals(f)) {
buffer.append(field);
buffer.append(":");

View File

@ -33,7 +33,7 @@ public class PrefixFilter extends MultiTermQueryWrapperFilter {
/** Prints a user-readable version of this query. */
public String toString () {
StringBuffer buffer = new StringBuffer();
StringBuilder buffer = new StringBuilder();
buffer.append("PrefixFilter(");
buffer.append(getPrefix().toString());
buffer.append(")");

View File

@ -47,7 +47,7 @@ public class PrefixQuery extends MultiTermQuery {
/** Prints a user-readable version of this query. */
public String toString(String field) {
StringBuffer buffer = new StringBuffer();
StringBuilder buffer = new StringBuilder();
if (!prefix.field().equals(field)) {
buffer.append(prefix.field());
buffer.append(":");

View File

@ -108,7 +108,7 @@ public class QueryTermVector implements TermFreqVector {
}
public final String toString() {
StringBuffer sb = new StringBuffer();
StringBuilder sb = new StringBuilder();
sb.append('{');
for (int i=0; i<terms.length; i++) {
if (i>0) sb.append(", ");

View File

@ -826,7 +826,7 @@ public abstract class Similarity implements Serializable {
}
final int max = searcher.maxDoc();
float idf = 0.0f;
final StringBuffer exp = new StringBuffer();
final StringBuilder exp = new StringBuilder();
Iterator i = terms.iterator();
while (i.hasNext()) {
Term term = (Term)i.next();

View File

@ -229,7 +229,7 @@ implements Serializable {
}
public String toString() {
StringBuffer buffer = new StringBuffer();
StringBuilder buffer = new StringBuilder();
for (int i = 0; i < fields.length; i++) {
buffer.append(fields[i].toString());

View File

@ -347,7 +347,7 @@ implements Serializable {
}
public String toString() {
StringBuffer buffer = new StringBuffer();
StringBuilder buffer = new StringBuilder();
switch (type) {
case SCORE:
buffer.append("<score>");

View File

@ -152,7 +152,7 @@ public class TermQuery extends Query {
/** Prints a user-readable version of this query. */
public String toString(String field) {
StringBuffer buffer = new StringBuffer();
StringBuilder buffer = new StringBuilder();
if (!term.field().equals(field)) {
buffer.append(term.field());
buffer.append(":");

View File

@ -137,7 +137,7 @@ public class TermRangeQuery extends MultiTermQuery {
/** Prints a user-readable version of this query. */
public String toString(String field) {
StringBuffer buffer = new StringBuffer();
StringBuilder buffer = new StringBuilder();
if (!getField().equals(field)) {
buffer.append(getField());
buffer.append(":");

View File

@ -65,7 +65,7 @@ public class WildcardQuery extends MultiTermQuery {
/** Prints a user-readable version of this query. */
public String toString(String field) {
StringBuffer buffer = new StringBuffer();
StringBuilder buffer = new StringBuilder();
if (!term.field().equals(field)) {
buffer.append(term.field());
buffer.append(":");

View File

@ -118,7 +118,7 @@ public class CustomScoreQuery extends Query {
/* (non-Javadoc) @see org.apache.lucene.search.Query#toString(java.lang.String) */
public String toString(String field) {
StringBuffer sb = new StringBuffer(name()).append("(");
StringBuilder sb = new StringBuilder(name()).append("(");
sb.append(subQuery.toString(field));
for(int i = 0; i < valSrcQueries.length; i++) {
sb.append(", ").append(valSrcQueries[i].toString(field));

View File

@ -85,7 +85,7 @@ public class PayloadNearQuery extends SpanNearQuery {
}
public String toString(String field) {
StringBuffer buffer = new StringBuffer();
StringBuilder buffer = new StringBuilder();
buffer.append("payloadNear([");
Iterator i = clauses.iterator();
while (i.hasNext()) {

View File

@ -128,7 +128,7 @@ public class FieldMaskingSpanQuery extends SpanQuery {
}
public String toString(String field) {
StringBuffer buffer = new StringBuffer();
StringBuilder buffer = new StringBuilder();
buffer.append("mask(");
buffer.append(maskedQuery.toString(field));
buffer.append(")");

View File

@ -54,7 +54,7 @@ public class SpanFirstQuery extends SpanQuery implements Cloneable {
public Collection getTerms() { return match.getTerms(); }
public String toString(String field) {
StringBuffer buffer = new StringBuffer();
StringBuilder buffer = new StringBuilder();
buffer.append("spanFirst(");
buffer.append(match.toString(field));
buffer.append(", ");

View File

@ -104,7 +104,7 @@ public class SpanNearQuery extends SpanQuery implements Cloneable {
public String toString(String field) {
StringBuffer buffer = new StringBuffer();
StringBuilder buffer = new StringBuilder();
buffer.append("spanNear([");
Iterator i = clauses.iterator();
while (i.hasNext()) {

View File

@ -58,7 +58,7 @@ public class SpanNotQuery extends SpanQuery implements Cloneable {
public void extractTerms(Set terms) { include.extractTerms(terms); }
public String toString(String field) {
StringBuffer buffer = new StringBuffer();
StringBuilder buffer = new StringBuilder();
buffer.append("spanNot(");
buffer.append(include.toString(field));
buffer.append(", ");

View File

@ -112,7 +112,7 @@ public class SpanOrQuery extends SpanQuery implements Cloneable {
}
public String toString(String field) {
StringBuffer buffer = new StringBuffer();
StringBuilder buffer = new StringBuilder();
buffer.append("spanOr([");
Iterator i = clauses.iterator();
while (i.hasNext()) {

View File

@ -52,7 +52,7 @@ public class SpanTermQuery extends SpanQuery {
}
public String toString(String field) {
StringBuffer buffer = new StringBuffer();
StringBuilder buffer = new StringBuilder();
if (term.field().equals(field))
buffer.append(term.text());
else

View File

@ -718,7 +718,7 @@ public class FSDirectory extends Directory {
synchronized (DIGESTER) {
digest = DIGESTER.digest(dirName.getBytes());
}
StringBuffer buf = new StringBuffer();
StringBuilder buf = new StringBuilder();
buf.append("lucene-");
for (int i = 0; i < digest.length; i++) {
int b = digest[i];

View File

@ -366,7 +366,7 @@ public final class FieldCacheSanityChecker {
* own line prefaced by a tab character
*/
public String toString() {
StringBuffer buf = new StringBuffer();
StringBuilder buf = new StringBuilder();
buf.append(getType()).append(": ");
String m = getMsg();

View File

@ -1872,7 +1872,7 @@ public class TestASCIIFoldingFilter extends BaseTokenStreamTestCase {
// Construct input text and expected output tokens
List expectedOutputTokens = new ArrayList();
StringBuffer inputText = new StringBuffer();
StringBuilder inputText = new StringBuilder();
for (int n = 0 ; n < foldings.length ; n += 2) {
if (n > 0) {
inputText.append(' '); // Space between tokens
@ -1881,7 +1881,7 @@ public class TestASCIIFoldingFilter extends BaseTokenStreamTestCase {
// Construct the expected output token: the ASCII string to fold to,
// duplicated as many times as the number of characters in the input text.
StringBuffer expected = new StringBuffer();
StringBuilder expected = new StringBuilder();
int numChars = foldings[n].length();
for (int m = 0 ; m < numChars; ++m) {
expected.append(foldings[n + 1]);

View File

@ -108,7 +108,7 @@ public class TestCharArraySet extends LuceneTestCase {
}
try{
set.add(new StringBuffer(NOT_IN_SET));
set.add(new StringBuilder(NOT_IN_SET));
fail("Modified unmodifiable set");
}catch (UnsupportedOperationException e) {
// expected

View File

@ -71,7 +71,7 @@ public class TestStopFilter extends BaseTokenStreamTestCase {
* Test Position increments applied by StopFilter with and without enabling this option.
*/
public void testStopPositons() throws IOException {
StringBuffer sb = new StringBuffer();
StringBuilder sb = new StringBuilder();
ArrayList a = new ArrayList();
for (int i=0; i<20; i++) {
String w = English.intToEnglish(i).trim();

View File

@ -32,8 +32,8 @@ import java.util.List;
* tests for the TestTeeSinkTokenFilter
*/
public class TestTeeSinkTokenFilter extends BaseTokenStreamTestCase {
protected StringBuffer buffer1;
protected StringBuffer buffer2;
protected StringBuilder buffer1;
protected StringBuilder buffer2;
protected String[] tokens1;
protected String[] tokens2;
@ -46,12 +46,12 @@ public class TestTeeSinkTokenFilter extends BaseTokenStreamTestCase {
super.setUp();
tokens1 = new String[]{"The", "quick", "Burgundy", "Fox", "jumped", "over", "the", "lazy", "Red", "Dogs"};
tokens2 = new String[]{"The", "Lazy", "Dogs", "should", "stay", "on", "the", "porch"};
buffer1 = new StringBuffer();
buffer1 = new StringBuilder();
for (int i = 0; i < tokens1.length; i++) {
buffer1.append(tokens1[i]).append(' ');
}
buffer2 = new StringBuffer();
buffer2 = new StringBuilder();
for (int i = 0; i < tokens2.length; i++) {
buffer2.append(tokens2[i]).append(' ');
}
@ -161,7 +161,7 @@ public class TestTeeSinkTokenFilter extends BaseTokenStreamTestCase {
int[] tokCount = {100, 500, 1000, 2000, 5000, 10000};
int[] modCounts = {1, 2, 5, 10, 20, 50, 100, 200, 500};
for (int k = 0; k < tokCount.length; k++) {
StringBuffer buffer = new StringBuffer();
StringBuilder buffer = new StringBuilder();
System.out.println("-----Tokens: " + tokCount[k] + "-----");
for (int i = 0; i < tokCount[k]; i++) {
buffer.append(English.intToEnglish(i).toUpperCase()).append(' ');

View File

@ -30,8 +30,8 @@ import java.util.List;
* tests for the TeeTokenFilter and SinkTokenizer
*/
public class TestTeeTokenFilter extends LuceneTestCase {
protected StringBuffer buffer1;
protected StringBuffer buffer2;
protected StringBuilder buffer1;
protected StringBuilder buffer2;
protected String[] tokens1;
protected String[] tokens2;
@ -44,12 +44,12 @@ public class TestTeeTokenFilter extends LuceneTestCase {
super.setUp();
tokens1 = new String[]{"The", "quick", "Burgundy", "Fox", "jumped", "over", "the", "lazy", "Red", "Dogs"};
tokens2 = new String[]{"The", "Lazy", "Dogs", "should", "stay", "on", "the", "porch"};
buffer1 = new StringBuffer();
buffer1 = new StringBuilder();
for (int i = 0; i < tokens1.length; i++) {
buffer1.append(tokens1[i]).append(' ');
}
buffer2 = new StringBuffer();
buffer2 = new StringBuilder();
for (int i = 0; i < tokens2.length; i++) {
buffer2.append(tokens2[i]).append(' ');
@ -147,7 +147,7 @@ public class TestTeeTokenFilter extends LuceneTestCase {
int[] tokCount = {100, 500, 1000, 2000, 5000, 10000};
int[] modCounts = {1, 2, 5, 10, 20, 50, 100, 200, 500};
for (int k = 0; k < tokCount.length; k++) {
StringBuffer buffer = new StringBuffer();
StringBuilder buffer = new StringBuilder();
System.out.println("-----Tokens: " + tokCount[k] + "-----");
for (int i = 0; i < tokCount[k]; i++) {
buffer.append(English.intToEnglish(i).toUpperCase()).append(' ');

View File

@ -71,7 +71,7 @@ public class TestToken extends LuceneTestCase {
public void testGrow() {
Token t = new Token();
StringBuffer buf = new StringBuffer("ab");
StringBuilder buf = new StringBuilder("ab");
for (int i = 0; i < 20; i++)
{
char[] content = buf.toString().toCharArray();
@ -85,7 +85,7 @@ public class TestToken extends LuceneTestCase {
// now as a string, first variant
t = new Token();
buf = new StringBuffer("ab");
buf = new StringBuilder("ab");
for (int i = 0; i < 20; i++)
{
String content = buf.toString();
@ -99,7 +99,7 @@ public class TestToken extends LuceneTestCase {
// now as a string, second variant
t = new Token();
buf = new StringBuffer("ab");
buf = new StringBuilder("ab");
for (int i = 0; i < 20; i++)
{
String content = buf.toString();
@ -113,7 +113,7 @@ public class TestToken extends LuceneTestCase {
// Test for slow growth to a long term
t = new Token();
buf = new StringBuffer("a");
buf = new StringBuilder("a");
for (int i = 0; i < 20000; i++)
{
String content = buf.toString();
@ -127,7 +127,7 @@ public class TestToken extends LuceneTestCase {
// Test for slow growth to a long term
t = new Token();
buf = new StringBuffer("a");
buf = new StringBuilder("a");
for (int i = 0; i < 20000; i++)
{
String content = buf.toString();

View File

@ -39,7 +39,7 @@ public class TestTermAttributeImpl extends LuceneTestCase {
public void testGrow() {
TermAttributeImpl t = new TermAttributeImpl();
StringBuffer buf = new StringBuffer("ab");
StringBuilder buf = new StringBuilder("ab");
for (int i = 0; i < 20; i++)
{
char[] content = buf.toString().toCharArray();
@ -53,7 +53,7 @@ public class TestTermAttributeImpl extends LuceneTestCase {
// now as a string, first variant
t = new TermAttributeImpl();
buf = new StringBuffer("ab");
buf = new StringBuilder("ab");
for (int i = 0; i < 20; i++)
{
String content = buf.toString();
@ -67,7 +67,7 @@ public class TestTermAttributeImpl extends LuceneTestCase {
// now as a string, second variant
t = new TermAttributeImpl();
buf = new StringBuffer("ab");
buf = new StringBuilder("ab");
for (int i = 0; i < 20; i++)
{
String content = buf.toString();
@ -81,7 +81,7 @@ public class TestTermAttributeImpl extends LuceneTestCase {
// Test for slow growth to a long term
t = new TermAttributeImpl();
buf = new StringBuffer("a");
buf = new StringBuilder("a");
for (int i = 0; i < 20000; i++)
{
String content = buf.toString();
@ -95,7 +95,7 @@ public class TestTermAttributeImpl extends LuceneTestCase {
// Test for slow growth to a long term
t = new TermAttributeImpl();
buf = new StringBuffer("a");
buf = new StringBuilder("a");
for (int i = 0; i < 20000; i++)
{
String content = buf.toString();

View File

@ -152,7 +152,7 @@ class DocHelper {
static {
//Initialize the large Lazy Field
StringBuffer buffer = new StringBuffer();
StringBuilder buffer = new StringBuilder();
for (int i = 0; i < 10000; i++)
{
buffer.append("Lazily loading lengths of language in lieu of laughing ");

View File

@ -955,7 +955,7 @@ public class TestIndexReaderReopen extends LuceneTestCase {
}
public static Document createDocument(int n, int numFields) {
StringBuffer sb = new StringBuffer();
StringBuilder sb = new StringBuilder();
Document doc = new Document();
sb.append("a");
sb.append(n);

View File

@ -1440,7 +1440,7 @@ public class TestIndexWriter extends BaseTokenStreamTestCase {
// occurs but the terms are very long (heavy on
// char[] arrays)
for(int j=0;j<100;j++) {
StringBuffer b = new StringBuffer();
StringBuilder b = new StringBuilder();
String x = Integer.toString(j) + ".";
for(int k=0;k<1000;k++)
b.append(x);
@ -1513,7 +1513,7 @@ public class TestIndexWriter extends BaseTokenStreamTestCase {
IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(), true, new IndexWriter.MaxFieldLength(100000000));
writer.setRAMBufferSizeMB(0.01);
// Massive doc that has 128 K a's
StringBuffer b = new StringBuffer(1024*1024);
StringBuilder b = new StringBuilder(1024*1024);
for(int i=0;i<4096;i++) {
b.append(" a a a a a a a a");
b.append(" a a a a a a a a");
@ -2677,7 +2677,7 @@ public class TestIndexWriter extends BaseTokenStreamTestCase {
IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.UNLIMITED);
Document doc = new Document();
StringBuffer b = new StringBuffer();
StringBuilder b = new StringBuilder();
for(int i=0;i<10000;i++)
b.append(" a");
b.append(" x");
@ -2957,7 +2957,7 @@ public class TestIndexWriter extends BaseTokenStreamTestCase {
IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(), new IndexWriter.MaxFieldLength(100000));
Document doc = new Document();
StringBuffer b = new StringBuffer();
StringBuilder b = new StringBuilder();
for(int i=0;i<10000;i++)
b.append(" a");
b.append(" x");

View File

@ -509,7 +509,7 @@ public class TestIndexWriterReader extends LuceneTestCase {
public static Document createDocument(int n, String indexName, int numFields) {
StringBuffer sb = new StringBuffer();
StringBuilder sb = new StringBuilder();
Document doc = new Document();
doc.add(new Field("id", Integer.toString(n), Store.YES, Index.NOT_ANALYZED, TermVector.WITH_POSITIONS_OFFSETS));
doc.add(new Field("indexname", indexName, Store.YES, Index.NOT_ANALYZED, TermVector.WITH_POSITIONS_OFFSETS));

View File

@ -242,7 +242,7 @@ public class TestOmitTf extends LuceneTestCase {
writer.setSimilarity(new SimpleSimilarity());
StringBuffer sb = new StringBuffer(265);
StringBuilder sb = new StringBuilder(265);
String term = "term";
for(int i = 0; i<30; i++){
Document d = new Document();

View File

@ -179,7 +179,7 @@ public class TestPayloads extends LuceneTestCase {
int numDocs = skipInterval + 1;
// create content for the test documents with just a few terms
Term[] terms = generateTerms(fieldName, numTerms);
StringBuffer sb = new StringBuffer();
StringBuilder sb = new StringBuilder();
for (int i = 0; i < terms.length; i++) {
sb.append(terms[i].text);
sb.append(" ");
@ -350,7 +350,7 @@ public class TestPayloads extends LuceneTestCase {
private Term[] generateTerms(String fieldName, int n) {
int maxDigits = (int) (Math.log(n) / Math.log(10));
Term[] terms = new Term[n];
StringBuffer sb = new StringBuffer();
StringBuilder sb = new StringBuilder();
for (int i = 0; i < n; i++) {
sb.setLength(0);
sb.append("t");

Some files were not shown because too many files have changed in this diff Show More