LUCENE-10003: No C style array declaration (#206)

Most cases of C-style array declarations have been switched.  The Google Java Format, that which we adhere to, disallows C-style array declarations: https://google.github.io/styleguide/javaguide.html#s4.8.3-arrays
Some cases (esp. Snowball) can't be updated.
This commit is contained in:
David Smiley 2021-08-25 17:06:41 -04:00 committed by GitHub
parent 88588e3dea
commit 8ac2673791
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
336 changed files with 1064 additions and 1064 deletions

View File

@ -14,7 +14,7 @@
private static final int ZZ_PUSHBACK_2BIG = 2;
/* error messages for the codes above */
private static final String ZZ_ERROR_MSG[] = {
private static final String[] ZZ_ERROR_MSG = {
"Unknown internal scanner error",
"Error: could not match input",
"Error: pushback value was too large"
@ -32,7 +32,7 @@
/** this buffer contains the current text to be matched and is
the source of the yytext() string */
private char zzBuffer[] = new char[ZZ_BUFFERSIZE];
private char[] zzBuffer = new char[ZZ_BUFFERSIZE];
/** the textposition at the last accepting state */
private int zzMarkedPos;
@ -110,7 +110,7 @@
/* is the buffer big enough? */
if (zzCurrentPos >= zzBuffer.length - zzFinalHighSurrogate) {
/* if not: blow it up */
char newBuffer[] = new char[zzBuffer.length*2];
char[] newBuffer = new char[zzBuffer.length*2];
System.arraycopy(zzBuffer, 0, newBuffer, 0, zzBuffer.length);
zzBuffer = newBuffer;
zzEndRead += zzFinalHighSurrogate;

View File

@ -14,7 +14,7 @@
private static final int ZZ_PUSHBACK_2BIG = 2;
/* error messages for the codes above */
private static final String ZZ_ERROR_MSG[] = {
private static final String[] ZZ_ERROR_MSG = {
"Unknown internal scanner error",
"Error: could not match input",
"Error: pushback value was too large"
@ -32,7 +32,7 @@
/** this buffer contains the current text to be matched and is
the source of the yytext() string */
private char zzBuffer[] = new char[ZZ_BUFFERSIZE];
private char[] zzBuffer = new char[ZZ_BUFFERSIZE];
/** the textposition at the last accepting state */
private int zzMarkedPos;

View File

@ -740,7 +740,7 @@ index 1b27b96..94f2d4b 100644
- public String getCurrent()
- {
- return current.toString();
+ public void setCurrent(char text[], int length) {
+ public void setCurrent(char[] text, int length) {
+ current = text;
+ cursor = 0;
+ limit = length;
@ -778,7 +778,7 @@ index 1b27b96..94f2d4b 100644
// current string
- protected StringBuilder current;
+ private char current[];
+ private char[] current;
protected int cursor;
protected int limit;
@ -926,7 +926,7 @@ index 1b27b96..94f2d4b 100644
+ final int newLength = limit + adjustment;
+ //resize if necessary
+ if (newLength > current.length) {
+ char newBuffer[] = new char[oversize(newLength)];
+ char[] newBuffer = new char[oversize(newLength)];
+ System.arraycopy(current, 0, newBuffer, 0, limit);
+ current = newBuffer;
+ }

View File

@ -1,5 +1,5 @@
{
"gradle/generation/jflex/skeleton.default.txt": "ca1043249c0eefdf2623a785e2b91f5608bfc3f1",
"lucene/analysis/common/src/java/org/apache/lucene/analysis/classic/ClassicTokenizerImpl.java": "21c2cf7ba0a0cdeb43ebe624101e259c9348f6b0",
"gradle/generation/jflex/skeleton.default.txt": "883b32da9ff37f859964af9c2c665361c621b2c2",
"lucene/analysis/common/src/java/org/apache/lucene/analysis/classic/ClassicTokenizerImpl.java": "50f43f43859e63a5470f3c8249cad3ea9c131dc0",
"lucene/analysis/common/src/java/org/apache/lucene/analysis/classic/ClassicTokenizerImpl.jflex": "958b028ef3f0aec36488fb2bb033cdec5858035f"
}

View File

@ -1,6 +1,6 @@
{
"gradle/generation/jflex/skeleton.default.txt": "ca1043249c0eefdf2623a785e2b91f5608bfc3f1",
"gradle/generation/jflex/skeleton.default.txt": "883b32da9ff37f859964af9c2c665361c621b2c2",
"lucene/analysis/common/src/java/org/apache/lucene/analysis/charfilter/HTMLCharacterEntities.jflex": "d1aa75b9b37646efe31731394f84a063eb7eed9d",
"lucene/analysis/common/src/java/org/apache/lucene/analysis/charfilter/HTMLStripCharFilter.java": "78f5208455706d60a9ce4b63624ed04b0fd32573",
"lucene/analysis/common/src/java/org/apache/lucene/analysis/charfilter/HTMLStripCharFilter.jflex": "71760e2f7abe078109545a0c68aeac9125508d7c"
"lucene/analysis/common/src/java/org/apache/lucene/analysis/charfilter/HTMLStripCharFilter.java": "8470ed427633f58905a8269c78927d7794451e55",
"lucene/analysis/common/src/java/org/apache/lucene/analysis/charfilter/HTMLStripCharFilter.jflex": "44a271b04ad1564284982be166553584d38b5ea0"
}

View File

@ -1,7 +1,7 @@
{
"gradle/generation/jflex/skeleton.disable.buffer.expansion.txt": "68263ff0a014904c6e89b040d868d8f399408908",
"gradle/generation/jflex/skeleton.disable.buffer.expansion.txt": "1424f4df33c977bb150d7377c3bd61f819113091",
"lucene/analysis/common/src/java/org/apache/lucene/analysis/email/ASCIITLD.jflex": "bb3878ea10f85f124a0a9e4ea614d3400d664dae",
"lucene/analysis/common/src/java/org/apache/lucene/analysis/email/UAX29URLEmailTokenizerImpl.java": "2bf3efe1a1bc473eb3fe2456f50521ecd7d9b03b",
"lucene/analysis/common/src/java/org/apache/lucene/analysis/email/UAX29URLEmailTokenizerImpl.java": "b88c349d24028f557f2c014437f3f60c968ad9de",
"lucene/analysis/common/src/java/org/apache/lucene/analysis/email/UAX29URLEmailTokenizerImpl.jflex": "56a751d27e481fb55388f91ebf34f5a0cb8cb1b2",
"lucene/core/src/data/jflex/UnicodeEmojiProperties.jflex": "7491dd535debc6e9e9ce367c4d3a7217e466dcae"
}

View File

@ -1,5 +1,5 @@
{
"gradle/generation/jflex/skeleton.default.txt": "ca1043249c0eefdf2623a785e2b91f5608bfc3f1",
"lucene/analysis/common/src/java/org/apache/lucene/analysis/wikipedia/WikipediaTokenizerImpl.java": "10b391af6953d2f7bcca86da835a1037705509ec",
"gradle/generation/jflex/skeleton.default.txt": "883b32da9ff37f859964af9c2c665361c621b2c2",
"lucene/analysis/common/src/java/org/apache/lucene/analysis/wikipedia/WikipediaTokenizerImpl.java": "743fb4cc4b88d36242b3d227320c85e89a6868a8",
"lucene/analysis/common/src/java/org/apache/lucene/analysis/wikipedia/WikipediaTokenizerImpl.jflex": "a23a4b7cbcdba1fc864c0b85bc2784c8893a0f9f"
}

View File

@ -1,6 +1,6 @@
{
"lucene/analysis/common/src/java/org/tartarus/snowball/Among.java": "5371973cc30637273366f042e1cff920e0dd14f6",
"lucene/analysis/common/src/java/org/tartarus/snowball/SnowballProgram.java": "4e1caa344c7ac864c467ff0e615c1343e911b06b",
"lucene/analysis/common/src/java/org/tartarus/snowball/SnowballProgram.java": "93d77707ddc746aad94c1308d2f6f4321a1aa003",
"lucene/analysis/common/src/java/org/tartarus/snowball/SnowballStemmer.java": "85bfc728393d7804f86f0def0467a12fd4b82fd3",
"lucene/analysis/common/src/java/org/tartarus/snowball/ext/ArabicStemmer.java": "2d43c4606bbaf96d9ac5f8be8ccf28e32164b9f0",
"lucene/analysis/common/src/java/org/tartarus/snowball/ext/ArmenianStemmer.java": "0be0949fe1f021ef41e3f9a27280b295ab1e998c",

View File

@ -63,7 +63,7 @@ public class ArabicNormalizer {
* @param len length of input buffer
* @return length of input buffer after normalization
*/
public int normalize(char s[], int len) {
public int normalize(char[] s, int len) {
for (int i = 0; i < len; i++) {
switch (s[i]) {

View File

@ -43,7 +43,7 @@ public class ArabicStemmer {
public static final char WAW = '\u0648';
public static final char YEH = '\u064A';
public static final char prefixes[][] = {
public static final char[][] prefixes = {
("" + ALEF + LAM).toCharArray(),
("" + WAW + ALEF + LAM).toCharArray(),
("" + BEH + ALEF + LAM).toCharArray(),
@ -53,7 +53,7 @@ public class ArabicStemmer {
("" + WAW).toCharArray(),
};
public static final char suffixes[][] = {
public static final char[][] suffixes = {
("" + HEH + ALEF).toCharArray(),
("" + ALEF + NOON).toCharArray(),
("" + ALEF + TEH).toCharArray(),
@ -73,7 +73,7 @@ public class ArabicStemmer {
* @param len length of input buffer
* @return length of input buffer after normalization
*/
public int stem(char s[], int len) {
public int stem(char[] s, int len) {
len = stemPrefix(s, len);
len = stemSuffix(s, len);
@ -87,7 +87,7 @@ public class ArabicStemmer {
* @param len length of input buffer
* @return new length of input buffer after stemming.
*/
public int stemPrefix(char s[], int len) {
public int stemPrefix(char[] s, int len) {
for (int i = 0; i < prefixes.length; i++)
if (startsWithCheckLength(s, len, prefixes[i])) return deleteN(s, 0, len, prefixes[i].length);
return len;
@ -100,7 +100,7 @@ public class ArabicStemmer {
* @param len length of input buffer
* @return new length of input buffer after stemming
*/
public int stemSuffix(char s[], int len) {
public int stemSuffix(char[] s, int len) {
for (int i = 0; i < suffixes.length; i++)
if (endsWithCheckLength(s, len, suffixes[i]))
len = deleteN(s, len - suffixes[i].length, len, suffixes[i].length);
@ -115,7 +115,7 @@ public class ArabicStemmer {
* @param prefix prefix to check
* @return true if the prefix matches and can be stemmed
*/
boolean startsWithCheckLength(char s[], int len, char prefix[]) {
boolean startsWithCheckLength(char[] s, int len, char[] prefix) {
if (prefix.length == 1 && len < 4) { // wa- prefix requires at least 3 characters
return false;
} else if (len < prefix.length + 2) { // other prefixes require only 2.
@ -139,7 +139,7 @@ public class ArabicStemmer {
* @param suffix suffix to check
* @return true if the suffix matches and can be stemmed
*/
boolean endsWithCheckLength(char s[], int len, char suffix[]) {
boolean endsWithCheckLength(char[] s, int len, char[] suffix) {
if (len < suffix.length + 2) { // all suffixes require at least 2 characters after stemming
return false;
} else {

View File

@ -33,7 +33,7 @@ public class BulgarianStemmer {
* @param len length of input buffer
* @return length of input buffer after normalization
*/
public int stem(final char s[], int len) {
public int stem(final char[] s, int len) {
if (len < 4) // do not stem
return len;
@ -76,7 +76,7 @@ public class BulgarianStemmer {
* @param len length of input buffer
* @return new stemmed length
*/
private int removeArticle(final char s[], final int len) {
private int removeArticle(final char[] s, final int len) {
if (len > 6 && endsWith(s, len, "ият")) return len - 3;
if (len > 5) {
@ -96,7 +96,7 @@ public class BulgarianStemmer {
return len;
}
private int removePlural(final char s[], final int len) {
private int removePlural(final char[] s, final int len) {
if (len > 6) {
if (endsWith(s, len, "овци")) return len - 3; // replace with о
if (endsWith(s, len, "ове")) return len - 3;

View File

@ -33,7 +33,7 @@ public class BengaliNormalizer {
* @param len length of input buffer
* @return length of input buffer after normalization
*/
public int normalize(char s[], int len) {
public int normalize(char[] s, int len) {
for (int i = 0; i < len; i++) {
switch (s[i]) {

View File

@ -28,7 +28,7 @@ import static org.apache.lucene.analysis.util.StemmerUtil.endsWith;
* <i>http://members.unine.ch/jacques.savoy/clef/BengaliStemmerLight.java.txt</i>
*/
public class BengaliStemmer {
public int stem(char buffer[], int len) {
public int stem(char[] buffer, int len) {
// 8
if (len > 9

View File

@ -28,8 +28,8 @@ import org.apache.lucene.util.ArrayUtil;
*/
public abstract class BaseCharFilter extends CharFilter {
private int offsets[];
private int diffs[];
private int[] offsets;
private int[] diffs;
private int size = 0;
public BaseCharFilter(Reader in) {

View File

@ -29678,7 +29678,7 @@ public final class HTMLStripCharFilter extends BaseCharFilter {
private static final int ZZ_PUSHBACK_2BIG = 2;
/* error messages for the codes above */
private static final String ZZ_ERROR_MSG[] = {
private static final String[] ZZ_ERROR_MSG = {
"Unknown internal scanner error",
"Error: could not match input",
"Error: pushback value was too large"
@ -29806,7 +29806,7 @@ public final class HTMLStripCharFilter extends BaseCharFilter {
/** this buffer contains the current text to be matched and is
the source of the yytext() string */
private char zzBuffer[] = new char[ZZ_BUFFERSIZE];
private char[] zzBuffer = new char[ZZ_BUFFERSIZE];
/** the textposition at the last accepting state */
private int zzMarkedPos;
@ -30017,7 +30017,7 @@ public final class HTMLStripCharFilter extends BaseCharFilter {
}
@Override
public int read(char cbuf[], int off, int len) throws IOException {
public int read(char[] cbuf, int off, int len) throws IOException {
int i = 0;
for ( ; i < len ; ++i) {
int ch = read();
@ -30131,7 +30131,7 @@ public final class HTMLStripCharFilter extends BaseCharFilter {
/* is the buffer big enough? */
if (zzCurrentPos >= zzBuffer.length - zzFinalHighSurrogate) {
/* if not: blow it up */
char newBuffer[] = new char[zzBuffer.length*2];
char[] newBuffer = new char[zzBuffer.length*2];
System.arraycopy(zzBuffer, 0, newBuffer, 0, zzBuffer.length);
zzBuffer = newBuffer;
zzEndRead += zzFinalHighSurrogate;

View File

@ -212,7 +212,7 @@ InlineElment = ( [aAbBiIqQsSuU] |
}
@Override
public int read(char cbuf[], int off, int len) throws IOException {
public int read(char[] cbuf, int off, int len) throws IOException {
int i = 0;
for ( ; i < len ; ++i) {
int ch = read();

View File

@ -93,9 +93,9 @@ public final class CJKBigramFilter extends TokenFilter {
private final PositionLengthAttribute posLengthAtt = addAttribute(PositionLengthAttribute.class);
// buffers containing codepoint and offsets in parallel
int buffer[] = new int[8];
int startOffset[] = new int[8];
int endOffset[] = new int[8];
int[] buffer = new int[8];
int[] startOffset = new int[8];
int[] endOffset = new int[8];
// length of valid buffer
int bufferLen;
// current buffer index
@ -264,7 +264,7 @@ public final class CJKBigramFilter extends TokenFilter {
index -= last;
}
char termBuffer[] = termAtt.buffer();
char[] termBuffer = termAtt.buffer();
int len = termAtt.length();
int start = offsetAtt.startOffset();
int end = offsetAtt.endOffset();
@ -300,7 +300,7 @@ public final class CJKBigramFilter extends TokenFilter {
*/
private void flushBigram() {
clearAttributes();
char termBuffer[] =
char[] termBuffer =
termAtt.resizeBuffer(4); // maximum bigram length in code units (2 supplementaries)
int len1 = Character.toChars(buffer[index], termBuffer, 0);
int len2 = len1 + Character.toChars(buffer[index + 1], termBuffer, len1);
@ -322,7 +322,7 @@ public final class CJKBigramFilter extends TokenFilter {
*/
private void flushUnigram() {
clearAttributes();
char termBuffer[] = termAtt.resizeBuffer(2); // maximum unigram length (2 surrogates)
char[] termBuffer = termAtt.resizeBuffer(2); // maximum unigram length (2 surrogates)
int len = Character.toChars(buffer[index], termBuffer, 0);
termAtt.setLength(len);
offsetAtt.setOffset(startOffset[index], endOffset[index]);

View File

@ -39,7 +39,7 @@ public class CJKWidthCharFilter extends BaseCharFilter {
* as a fallback when they cannot properly combine with a preceding
* character into a composed form.
*/
private static final char KANA_NORM[] =
private static final char[] KANA_NORM =
new char[] {
0x30fb, 0x30f2, 0x30a1, 0x30a3, 0x30a5, 0x30a7, 0x30a9, 0x30e3, 0x30e5,
0x30e7, 0x30c3, 0x30fc, 0x30a2, 0x30a4, 0x30a6, 0x30a8, 0x30aa, 0x30ab,
@ -51,7 +51,7 @@ public class CJKWidthCharFilter extends BaseCharFilter {
};
/* kana combining diffs: 0x30A6-0x30FD */
private static final byte KANA_COMBINE_VOICED[] =
private static final byte[] KANA_COMBINE_VOICED =
new byte[] {
78, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1,
0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1,
@ -59,7 +59,7 @@ public class CJKWidthCharFilter extends BaseCharFilter {
0, 8, 8, 8, 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1
};
private static final byte KANA_COMBINE_SEMI_VOICED[] =
private static final byte[] KANA_COMBINE_SEMI_VOICED =
new byte[] {
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 2, 0, 0, 2,

View File

@ -42,7 +42,7 @@ public final class CJKWidthFilter extends TokenFilter {
* as a fallback when they cannot properly combine with a preceding
* character into a composed form.
*/
private static final char KANA_NORM[] =
private static final char[] KANA_NORM =
new char[] {
0x30fb, 0x30f2, 0x30a1, 0x30a3, 0x30a5, 0x30a7, 0x30a9, 0x30e3, 0x30e5,
0x30e7, 0x30c3, 0x30fc, 0x30a2, 0x30a4, 0x30a6, 0x30a8, 0x30aa, 0x30ab,
@ -60,7 +60,7 @@ public final class CJKWidthFilter extends TokenFilter {
@Override
public boolean incrementToken() throws IOException {
if (input.incrementToken()) {
char text[] = termAtt.buffer();
char[] text = termAtt.buffer();
int length = termAtt.length();
for (int i = 0; i < length; i++) {
final char ch = text[i];
@ -84,14 +84,14 @@ public final class CJKWidthFilter extends TokenFilter {
}
/* kana combining diffs: 0x30A6-0x30FD */
private static final byte KANA_COMBINE_VOICED[] =
private static final byte[] KANA_COMBINE_VOICED =
new byte[] {
78, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0,
1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 8, 8, 8, 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1
};
private static final byte KANA_COMBINE_HALF_VOICED[] =
private static final byte[] KANA_COMBINE_HALF_VOICED =
new byte[] {
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 2, 0, 0, 2,
@ -100,7 +100,7 @@ public final class CJKWidthFilter extends TokenFilter {
};
/** returns true if we successfully combined the voice mark */
private static boolean combine(char text[], int pos, char ch) {
private static boolean combine(char[] text, int pos, char ch) {
final char prev = text[pos - 1];
if (prev >= 0x30A6 && prev <= 0x30FD) {
text[pos - 1] +=

View File

@ -69,7 +69,7 @@ public class SoraniNormalizer {
* @param len length of input buffer
* @return length of input buffer after normalization
*/
public int normalize(char s[], int len) {
public int normalize(char[] s, int len) {
for (int i = 0; i < len; i++) {
switch (s[i]) {
case YEH:

View File

@ -28,7 +28,7 @@ public class SoraniStemmer {
* @param len length of input buffer
* @return length of input buffer after normalization
*/
public int stem(char s[], int len) {
public int stem(char[] s, int len) {
// postposition
if (len > 5 && endsWith(s, len, "دا")) {
len -= 2;

View File

@ -234,7 +234,7 @@ class ClassicTokenizerImpl {
private static final int ZZ_PUSHBACK_2BIG = 2;
/* error messages for the codes above */
private static final String ZZ_ERROR_MSG[] = {
private static final String[] ZZ_ERROR_MSG = {
"Unknown internal scanner error",
"Error: could not match input",
"Error: pushback value was too large"
@ -278,7 +278,7 @@ class ClassicTokenizerImpl {
/**
* this buffer contains the current text to be matched and is the source of the yytext() string
*/
private char zzBuffer[] = new char[ZZ_BUFFERSIZE];
private char[] zzBuffer = new char[ZZ_BUFFERSIZE];
/** the textposition at the last accepting state */
private int zzMarkedPos;
@ -396,7 +396,7 @@ class ClassicTokenizerImpl {
/* is the buffer big enough? */
if (zzCurrentPos >= zzBuffer.length - zzFinalHighSurrogate) {
/* if not: blow it up */
char newBuffer[] = new char[zzBuffer.length * 2];
char[] newBuffer = new char[zzBuffer.length * 2];
System.arraycopy(zzBuffer, 0, newBuffer, 0, zzBuffer.length);
zzBuffer = newBuffer;
zzEndRead += zzFinalHighSurrogate;

View File

@ -155,7 +155,7 @@ public final class CommonGramsFilter extends TokenFilter {
clearAttributes();
int length = buffer.length();
char termText[] = termAttribute.buffer();
char[] termText = termAttribute.buffer();
if (length > termText.length) {
termText = termAttribute.resizeBuffer(length);
}

View File

@ -317,7 +317,7 @@ public class PatternParser extends DefaultHandler {
/** @see org.xml.sax.ContentHandler#characters(char[], int, int) */
@SuppressWarnings({"unchecked", "rawtypes"})
@Override
public void characters(char ch[], int start, int length) {
public void characters(char[] ch, int start, int length) {
StringBuilder chars = new StringBuilder(length);
chars.append(ch, start, length);
String word = readToken(chars);

View File

@ -122,7 +122,7 @@ public class TernaryTree implements Cloneable {
if (freenode + len > eq.length) {
redimNodeArrays(eq.length + BLOCK_SIZE);
}
char strkey[] = new char[len--];
char[] strkey = new char[len--];
key.getChars(0, len, strkey, 0);
strkey[len] = 0;
root = insert(root, strkey, 0, val);
@ -255,7 +255,7 @@ public class TernaryTree implements Cloneable {
public int find(String key) {
int len = key.length();
char strkey[] = new char[len + 1];
char[] strkey = new char[len + 1];
key.getChars(0, len, strkey, 0);
strkey[len] = 0;

View File

@ -37,7 +37,7 @@ public final class DecimalDigitFilter extends TokenFilter {
@Override
public boolean incrementToken() throws IOException {
if (input.incrementToken()) {
char buffer[] = termAtt.buffer();
char[] buffer = termAtt.buffer();
int length = termAtt.length();
for (int i = 0; i < length; i++) {

View File

@ -34,7 +34,7 @@ public class CzechStemmer {
* @return length of input buffer after normalization
* <p><b>NOTE</b>: Input is expected to be in lowercase, but with diacritical marks
*/
public int stem(char s[], int len) {
public int stem(char[] s, int len) {
len = removeCase(s, len);
len = removePossessives(s, len);
if (len > 0) {
@ -43,7 +43,7 @@ public class CzechStemmer {
return len;
}
private int removeCase(char s[], int len) {
private int removeCase(char[] s, int len) {
if (len > 7 && endsWith(s, len, "atech")) return len - 5;
if (len > 6
@ -112,14 +112,14 @@ public class CzechStemmer {
return len;
}
private int removePossessives(char s[], int len) {
private int removePossessives(char[] s, int len) {
if (len > 5 && (endsWith(s, len, "ov") || endsWith(s, len, "in") || endsWith(s, len, "ův")))
return len - 2;
return len;
}
private int normalize(char s[], int len) {
private int normalize(char[] s, int len) {
if (endsWith(s, len, "čt")) { // čt -> ck
s[len - 2] = 'c';
s[len - 1] = 'k';

View File

@ -59,7 +59,7 @@ package org.apache.lucene.analysis.de;
*/
public class GermanLightStemmer {
public int stem(char s[], int len) {
public int stem(char[] s, int len) {
for (int i = 0; i < len; i++)
switch (s[i]) {
case 'ä':
@ -110,7 +110,7 @@ public class GermanLightStemmer {
}
}
private int step1(char s[], int len) {
private int step1(char[] s, int len) {
if (len > 5 && s[len - 3] == 'e' && s[len - 2] == 'r' && s[len - 1] == 'n') return len - 3;
if (len > 4 && s[len - 2] == 'e')
@ -129,7 +129,7 @@ public class GermanLightStemmer {
return len;
}
private int step2(char s[], int len) {
private int step2(char[] s, int len) {
if (len > 5 && s[len - 3] == 'e' && s[len - 2] == 's' && s[len - 1] == 't') return len - 3;
if (len > 4 && s[len - 2] == 'e' && (s[len - 1] == 'r' || s[len - 1] == 'n')) return len - 2;

View File

@ -59,7 +59,7 @@ package org.apache.lucene.analysis.de;
*/
public class GermanMinimalStemmer {
public int stem(char s[], int len) {
public int stem(char[] s, int len) {
if (len < 5) return len;
for (int i = 0; i < len; i++)

View File

@ -53,7 +53,7 @@ public final class GermanNormalizationFilter extends TokenFilter {
public boolean incrementToken() throws IOException {
if (input.incrementToken()) {
int state = N;
char buffer[] = termAtt.buffer();
char[] buffer = termAtt.buffer();
int length = termAtt.length();
for (int i = 0; i < length; i++) {
final char c = buffer[i];

View File

@ -39,7 +39,7 @@ public class GreekStemmer {
* @param len The length of the char[] array.
* @return The new length of the stemmed word.
*/
public int stem(char s[], int len) {
public int stem(char[] s, int len) {
if (len < 4) // too short
return len;
@ -72,7 +72,7 @@ public class GreekStemmer {
return rule22(s, len);
}
private int rule0(char s[], int len) {
private int rule0(char[] s, int len) {
if (len > 9 && (endsWith(s, len, "καθεστωτοσ") || endsWith(s, len, "καθεστωτων")))
return len - 4;
@ -131,7 +131,7 @@ public class GreekStemmer {
return len;
}
private int rule1(char s[], int len) {
private int rule1(char[] s, int len) {
if (len > 4 && (endsWith(s, len, "αδεσ") || endsWith(s, len, "αδων"))) {
len -= 4;
if (!(endsWith(s, len, "οκ")
@ -148,7 +148,7 @@ public class GreekStemmer {
return len;
}
private int rule2(char s[], int len) {
private int rule2(char[] s, int len) {
if (len > 4 && (endsWith(s, len, "εδεσ") || endsWith(s, len, "εδων"))) {
len -= 4;
if (endsWith(s, len, "οπ")
@ -163,7 +163,7 @@ public class GreekStemmer {
return len;
}
private int rule3(char s[], int len) {
private int rule3(char[] s, int len) {
if (len > 5 && (endsWith(s, len, "ουδεσ") || endsWith(s, len, "ουδων"))) {
len -= 5;
if (endsWith(s, len, "αρκ")
@ -188,7 +188,7 @@ public class GreekStemmer {
private static final CharArraySet exc4 =
new CharArraySet(Arrays.asList("θ", "δ", "ελ", "γαλ", "ν", "π", "ιδ", "παρ"), false);
private int rule4(char s[], int len) {
private int rule4(char[] s, int len) {
if (len > 3 && (endsWith(s, len, "εωσ") || endsWith(s, len, "εων"))) {
len -= 3;
if (exc4.contains(s, 0, len)) len++; // add back -ε
@ -196,7 +196,7 @@ public class GreekStemmer {
return len;
}
private int rule5(char s[], int len) {
private int rule5(char[] s, int len) {
if (len > 2 && endsWith(s, len, "ια")) {
len -= 2;
if (endsWithVowel(s, len)) len++; // add back -ι
@ -216,7 +216,7 @@ public class GreekStemmer {
"συναδ", "τσαμ", "υποδ", "φιλον", "φυλοδ", "χασ"),
false);
private int rule6(char s[], int len) {
private int rule6(char[] s, int len) {
boolean removed = false;
if (len > 3 && (endsWith(s, len, "ικα") || endsWith(s, len, "ικο"))) {
len -= 3;
@ -239,7 +239,7 @@ public class GreekStemmer {
"χ"),
false);
private int rule7(char s[], int len) {
private int rule7(char[] s, int len) {
if (len == 5 && endsWith(s, len, "αγαμε")) return len - 1;
if (len > 7 && endsWith(s, len, "ηθηκαμε")) len -= 7;
@ -359,7 +359,7 @@ public class GreekStemmer {
"ψηλοταβ"),
false);
private int rule8(char s[], int len) {
private int rule8(char[] s, int len) {
boolean removed = false;
if (len > 8 && endsWith(s, len, "ιουντανε")) {
@ -410,7 +410,7 @@ public class GreekStemmer {
"θαρρ", "θ"),
false);
private int rule9(char s[], int len) {
private int rule9(char[] s, int len) {
if (len > 5 && endsWith(s, len, "ησετε")) len -= 5;
if (len > 3 && endsWith(s, len, "ετε")) {
@ -455,7 +455,7 @@ public class GreekStemmer {
return len;
}
private int rule10(char s[], int len) {
private int rule10(char[] s, int len) {
if (len > 5 && (endsWith(s, len, "οντασ") || endsWith(s, len, "ωντασ"))) {
len -= 5;
if (len == 3 && endsWith(s, len, "αρχ")) {
@ -471,7 +471,7 @@ public class GreekStemmer {
return len;
}
private int rule11(char s[], int len) {
private int rule11(char[] s, int len) {
if (len > 6 && endsWith(s, len, "ομαστε")) {
len -= 6;
if (len == 2 && endsWith(s, len, "ον")) {
@ -498,7 +498,7 @@ public class GreekStemmer {
new CharArraySet(
Arrays.asList("αλ", "αρ", "εκτελ", "ζ", "μ", "ξ", "παρακαλ", "αρ", "προ", "νισ"), false);
private int rule12(char s[], int len) {
private int rule12(char[] s, int len) {
if (len > 5 && endsWith(s, len, "ιεστε")) {
len -= 5;
if (exc12a.contains(s, 0, len)) len += 4; // add back -ιεστ
@ -515,7 +515,7 @@ public class GreekStemmer {
private static final CharArraySet exc13 =
new CharArraySet(Arrays.asList("διαθ", "θ", "παρακαταθ", "προσθ", "συνθ"), false);
private int rule13(char s[], int len) {
private int rule13(char[] s, int len) {
if (len > 6 && endsWith(s, len, "ηθηκεσ")) {
len -= 6;
} else if (len > 5 && (endsWith(s, len, "ηθηκα") || endsWith(s, len, "ηθηκε"))) {
@ -576,7 +576,7 @@ public class GreekStemmer {
"τσα"),
false);
private int rule14(char s[], int len) {
private int rule14(char[] s, int len) {
boolean removed = false;
if (len > 5 && endsWith(s, len, "ουσεσ")) {
@ -660,7 +660,7 @@ public class GreekStemmer {
private static final CharArraySet exc15b =
new CharArraySet(Arrays.asList("ψοφ", "ναυλοχ"), false);
private int rule15(char s[], int len) {
private int rule15(char[] s, int len) {
boolean removed = false;
if (len > 4 && endsWith(s, len, "αγεσ")) {
len -= 4;
@ -696,7 +696,7 @@ public class GreekStemmer {
new CharArraySet(
Arrays.asList("ν", "χερσον", "δωδεκαν", "ερημον", "μεγαλον", "επταν"), false);
private int rule16(char s[], int len) {
private int rule16(char[] s, int len) {
boolean removed = false;
if (len > 4 && endsWith(s, len, "ησου")) {
len -= 4;
@ -717,7 +717,7 @@ public class GreekStemmer {
"ασβ", "σβ", "αχρ", "χρ", "απλ", "αειμν", "δυσχρ", "ευχρ", "κοινοχρ", "παλιμψ"),
false);
private int rule17(char s[], int len) {
private int rule17(char[] s, int len) {
if (len > 4 && endsWith(s, len, "ηστε")) {
len -= 4;
if (exc17.contains(s, 0, len)) len += 3; // add back the -ηστ
@ -729,7 +729,7 @@ public class GreekStemmer {
private static final CharArraySet exc18 =
new CharArraySet(Arrays.asList("ν", "ρ", "σπι", "στραβομουτσ", "κακομουτσ", "εξων"), false);
private int rule18(char s[], int len) {
private int rule18(char[] s, int len) {
boolean removed = false;
if (len > 6 && (endsWith(s, len, "ησουνε") || endsWith(s, len, "ηθουνε"))) {
@ -753,7 +753,7 @@ public class GreekStemmer {
new CharArraySet(
Arrays.asList("παρασουσ", "φ", "χ", "ωριοπλ", "αζ", "αλλοσουσ", "ασουσ"), false);
private int rule19(char s[], int len) {
private int rule19(char[] s, int len) {
boolean removed = false;
if (len > 6 && (endsWith(s, len, "ησουμε") || endsWith(s, len, "ηθουμε"))) {
@ -773,13 +773,13 @@ public class GreekStemmer {
return len;
}
private int rule20(char s[], int len) {
private int rule20(char[] s, int len) {
if (len > 5 && (endsWith(s, len, "ματων") || endsWith(s, len, "ματοσ"))) len -= 3;
else if (len > 4 && endsWith(s, len, "ματα")) len -= 2;
return len;
}
private int rule21(char s[], int len) {
private int rule21(char[] s, int len) {
if (len > 9 && endsWith(s, len, "ιοντουσαν")) return len - 9;
if (len > 8
@ -877,7 +877,7 @@ public class GreekStemmer {
return len;
}
private int rule22(char s[], int len) {
private int rule22(char[] s, int len) {
if (endsWith(s, len, "εστερ") || endsWith(s, len, "εστατ")) return len - 5;
if (endsWith(s, len, "οτερ")
@ -899,7 +899,7 @@ public class GreekStemmer {
* @param suffix A {@link String} object to check if the word given ends with these characters.
* @return True if the word ends with the suffix given , false otherwise.
*/
private boolean endsWith(char s[], int len, String suffix) {
private boolean endsWith(char[] s, int len, String suffix) {
final int suffixLen = suffix.length();
if (suffixLen > len) return false;
for (int i = suffixLen - 1; i >= 0; i--)
@ -916,7 +916,7 @@ public class GreekStemmer {
* @return True if the word contained in the leading portion of char[] array , ends with a vowel ,
* false otherwise.
*/
private boolean endsWithVowel(char s[], int len) {
private boolean endsWithVowel(char[] s, int len) {
if (len == 0) return false;
switch (s[len - 1]) {
case 'α':
@ -940,7 +940,7 @@ public class GreekStemmer {
* @return True if the word contained in the leading portion of char[] array , ends with a vowel ,
* false otherwise.
*/
private boolean endsWithVowelNoY(char s[], int len) {
private boolean endsWithVowelNoY(char[] s, int len) {
if (len == 0) return false;
switch (s[len - 1]) {
case 'α':

View File

@ -38993,7 +38993,7 @@ public final class UAX29URLEmailTokenizerImpl {
private static final int ZZ_PUSHBACK_2BIG = 2;
/* error messages for the codes above */
private static final String ZZ_ERROR_MSG[] = {
private static final String[] ZZ_ERROR_MSG = {
"Unknown internal scanner error",
"Error: could not match input",
"Error: pushback value was too large"
@ -39168,7 +39168,7 @@ public final class UAX29URLEmailTokenizerImpl {
/** this buffer contains the current text to be matched and is
the source of the yytext() string */
private char zzBuffer[] = new char[ZZ_BUFFERSIZE];
private char[] zzBuffer = new char[ZZ_BUFFERSIZE];
/** the textposition at the last accepting state */
private int zzMarkedPos;

View File

@ -23,7 +23,7 @@ package org.apache.lucene.analysis.en;
*/
public class EnglishMinimalStemmer {
@SuppressWarnings("fallthrough")
public int stem(char s[], int len) {
public int stem(char[] s, int len) {
if (len < 3 || s[len - 1] != 's') return len;
switch (s[len - 2]) {

View File

@ -59,7 +59,7 @@ package org.apache.lucene.analysis.es;
*/
public class SpanishLightStemmer {
public int stem(char s[], int len) {
public int stem(char[] s, int len) {
if (len < 5) return len;
for (int i = 0; i < len; i++)

View File

@ -24,7 +24,7 @@ package org.apache.lucene.analysis.es;
*/
public class SpanishMinimalStemmer {
public int stem(char s[], int len) {
public int stem(char[] s, int len) {
if (len < 4 || s[len - 1] != 's') return len;
for (int i = 0; i < len; i++)

View File

@ -57,7 +57,7 @@ public class PersianNormalizer {
* @param len length of input buffer
* @return length of input buffer after normalization
*/
public int normalize(char s[], int len) {
public int normalize(char[] s, int len) {
for (int i = 0; i < len; i++) {
switch (s[i]) {

View File

@ -61,7 +61,7 @@ import static org.apache.lucene.analysis.util.StemmerUtil.*;
*/
public class FinnishLightStemmer {
public int stem(char s[], int len) {
public int stem(char[] s, int len) {
if (len < 4) return len;
for (int i = 0; i < len; i++)
@ -83,7 +83,7 @@ public class FinnishLightStemmer {
return len;
}
private int step1(char s[], int len) {
private int step1(char[] s, int len) {
if (len > 8) {
if (endsWith(s, len, "kin")) return step1(s, len - 3);
if (endsWith(s, len, "ko")) return step1(s, len - 2);
@ -96,7 +96,7 @@ public class FinnishLightStemmer {
return len;
}
private int step2(char s[], int len) {
private int step2(char[] s, int len) {
if (len > 5) {
if (endsWith(s, len, "lla") || endsWith(s, len, "tse") || endsWith(s, len, "sti"))
return len - 3;
@ -109,7 +109,7 @@ public class FinnishLightStemmer {
return len;
}
private int step3(char s[], int len) {
private int step3(char[] s, int len) {
if (len > 8) {
if (endsWith(s, len, "nnen")) {
s[len - 4] = 's';
@ -173,7 +173,7 @@ public class FinnishLightStemmer {
return len;
}
private int norm1(char s[], int len) {
private int norm1(char[] s, int len) {
if (len > 5 && endsWith(s, len, "hde")) {
s[len - 3] = 'k';
s[len - 2] = 's';
@ -198,7 +198,7 @@ public class FinnishLightStemmer {
return len;
}
private int norm2(char s[], int len) {
private int norm2(char[] s, int len) {
if (len > 8) {
if (s[len - 1] == 'e' || s[len - 1] == 'o' || s[len - 1] == 'u') len--;
}

View File

@ -61,7 +61,7 @@ import static org.apache.lucene.analysis.util.StemmerUtil.*;
*/
public class FrenchLightStemmer {
public int stem(char s[], int len) {
public int stem(char[] s, int len) {
if (len > 5 && s[len - 1] == 'x') {
if (s[len - 3] == 'a' && s[len - 2] == 'u' && s[len - 4] != 'e') s[len - 2] = 'l';
len--;
@ -209,7 +209,7 @@ public class FrenchLightStemmer {
return norm(s, len);
}
private int norm(char s[], int len) {
private int norm(char[] s, int len) {
if (len > 4) {
for (int i = 0; i < len; i++)
switch (s[i]) {

View File

@ -58,7 +58,7 @@ package org.apache.lucene.analysis.fr;
* general French corpora.</i> Jacques Savoy.
*/
public class FrenchMinimalStemmer {
public int stem(char s[], int len) {
public int stem(char[] s, int len) {
if (len < 6) return len;
if (s[len - 1] == 'x') {

View File

@ -31,7 +31,7 @@ public class GalicianMinimalStemmer extends RSLPStemmerBase {
private static final Step pluralStep =
parse(GalicianMinimalStemmer.class, "galician.rslp").get("Plural");
public int stem(char s[], int len) {
public int stem(char[] s, int len) {
return pluralStep.apply(s, len);
}
}

View File

@ -44,7 +44,7 @@ public class GalicianStemmer extends RSLPStemmerBase {
* @param len initial valid length of buffer
* @return new valid length, stemmed
*/
public int stem(char s[], int len) {
public int stem(char[] s, int len) {
assert s.length >= len + 1 : "this stemmer requires an oversized array of at least 1";
len = plural.apply(s, len);

View File

@ -43,7 +43,7 @@ public class HindiNormalizer {
* @param len length of input buffer
* @return length of input buffer after normalization
*/
public int normalize(char s[], int len) {
public int normalize(char[] s, int len) {
for (int i = 0; i < len; i++) {
switch (s[i]) {

View File

@ -26,7 +26,7 @@ import static org.apache.lucene.analysis.util.StemmerUtil.*;
* http://computing.open.ac.uk/Sites/EACLSouthAsia/Papers/p6-Ramanathan.pdf
*/
public class HindiStemmer {
public int stem(char buffer[], int len) {
public int stem(char[] buffer, int len) {
// 5
if ((len > 6)
&& (endsWith(buffer, len, "ाएंगी")

View File

@ -60,7 +60,7 @@ import static org.apache.lucene.analysis.util.StemmerUtil.*;
* Portuguese, German and Hungarian Languages</i> Jacques Savoy
*/
public class HungarianLightStemmer {
public int stem(char s[], int len) {
public int stem(char[] s, int len) {
for (int i = 0; i < len; i++)
switch (s[i]) {
case 'á':
@ -94,7 +94,7 @@ public class HungarianLightStemmer {
return normalize(s, len);
}
private int removeCase(char s[], int len) {
private int removeCase(char[] s, int len) {
if (len > 6 && endsWith(s, len, "kent")) return len - 4;
if (len > 5) {
@ -147,7 +147,7 @@ public class HungarianLightStemmer {
return len;
}
private int removePossessive(char s[], int len) {
private int removePossessive(char[] s, int len) {
if (len > 6) {
if (!isVowel(s[len - 5])
&& (endsWith(s, len, "atok") || endsWith(s, len, "otok") || endsWith(s, len, "etek")))
@ -202,7 +202,7 @@ public class HungarianLightStemmer {
}
@SuppressWarnings("fallthrough")
private int removePlural(char s[], int len) {
private int removePlural(char[] s, int len) {
if (len > 3 && s[len - 1] == 'k')
switch (s[len - 2]) {
case 'a':
@ -215,7 +215,7 @@ public class HungarianLightStemmer {
return len;
}
private int normalize(char s[], int len) {
private int normalize(char[] s, int len) {
if (len > 3)
switch (s[len - 1]) {
case 'a':

View File

@ -94,7 +94,7 @@ public class HunspellStemFilterFactory extends TokenFilterFactory implements Res
@Override
public void inform(ResourceLoader loader) throws IOException {
String dicts[] = dictionaryFiles.split(",");
String[] dicts = dictionaryFiles.split(",");
InputStream affix = null;
List<InputStream> dictionaries = new ArrayList<>();

View File

@ -25,7 +25,7 @@ import java.nio.charset.StandardCharsets;
// many hunspell dictionaries use this encoding, yet java does not have it?!?!
final class ISO8859_14Decoder extends CharsetDecoder {
static final char TABLE[] =
static final char[] TABLE =
new char[] {
0x00A0, 0x1E02, 0x1E03, 0x00A3, 0x010A, 0x010B, 0x1E0A, 0x00A7,
0x1E80, 0x00A9, 0x1E82, 0x1E0B, 0x1EF2, 0x00AD, 0x00AE, 0x0178,

View File

@ -42,7 +42,7 @@ public class IndonesianStemmer {
* <p>Use <code>stemDerivational</code> to control whether full stemming or only light
* inflectional stemming is done.
*/
public int stem(char text[], int length, boolean stemDerivational) {
public int stem(char[] text, int length, boolean stemDerivational) {
flags = 0;
numSyllables = 0;
for (int i = 0; i < length; i++) if (isVowel(text[i])) numSyllables++;
@ -54,7 +54,7 @@ public class IndonesianStemmer {
return length;
}
private int stemDerivational(char text[], int length) {
private int stemDerivational(char[] text, int length) {
int oldLength = length;
if (numSyllables > 2) length = removeFirstOrderPrefix(text, length);
if (oldLength != length) { // a rule is fired
@ -82,7 +82,7 @@ public class IndonesianStemmer {
}
}
private int removeParticle(char text[], int length) {
private int removeParticle(char[] text, int length) {
if (endsWith(text, length, "kah")
|| endsWith(text, length, "lah")
|| endsWith(text, length, "pun")) {
@ -93,7 +93,7 @@ public class IndonesianStemmer {
return length;
}
private int removePossessivePronoun(char text[], int length) {
private int removePossessivePronoun(char[] text, int length) {
if (endsWith(text, length, "ku") || endsWith(text, length, "mu")) {
numSyllables--;
return length - 2;
@ -107,7 +107,7 @@ public class IndonesianStemmer {
return length;
}
private int removeFirstOrderPrefix(char text[], int length) {
private int removeFirstOrderPrefix(char[] text, int length) {
if (startsWith(text, length, "meng")) {
flags |= REMOVED_MENG;
numSyllables--;
@ -198,7 +198,7 @@ public class IndonesianStemmer {
return length;
}
private int removeSecondOrderPrefix(char text[], int length) {
private int removeSecondOrderPrefix(char[] text, int length) {
if (startsWith(text, length, "ber")) {
flags |= REMOVED_BER;
numSyllables--;
@ -240,7 +240,7 @@ public class IndonesianStemmer {
return length;
}
private int removeSuffix(char text[], int length) {
private int removeSuffix(char[] text, int length) {
if (endsWith(text, length, "kan")
&& (flags & REMOVED_KE) == 0
&& (flags & REMOVED_PENG) == 0

View File

@ -71,7 +71,7 @@ public class IndicNormalizer {
* <p>the columns are: ch1, ch2, ch3, res, flags ch1, ch2, and ch3 are the decomposition res is
* the composition, and flags are the scripts to which it applies.
*/
private static final int decompositions[][] = {
private static final int[][] decompositions = {
/* devanagari, gujarati vowel candra O */
{0x05, 0x3E, 0x45, 0x11, flag(DEVANAGARI) | flag(GUJARATI)},
/* devanagari short O */
@ -243,7 +243,7 @@ public class IndicNormalizer {
* @param len valid length
* @return normalized length
*/
public int normalize(char text[], int len) {
public int normalize(char[] text, int len) {
for (int i = 0; i < len; i++) {
final Character.UnicodeBlock block = Character.UnicodeBlock.of(text[i]);
final ScriptData sd = scripts.get(block);
@ -257,7 +257,7 @@ public class IndicNormalizer {
/** Compose into standard form any compositions in the decompositions table. */
private int compose(
int ch0, Character.UnicodeBlock block0, ScriptData sd, char text[], int pos, int len) {
int ch0, Character.UnicodeBlock block0, ScriptData sd, char[] text, int pos, int len) {
if (pos + 1 >= len) /* need at least 2 chars! */ return len;
final int ch1 = text[pos + 1] - sd.base;

View File

@ -59,7 +59,7 @@ package org.apache.lucene.analysis.it;
*/
public class ItalianLightStemmer {
public int stem(char s[], int len) {
public int stem(char[] s, int len) {
if (len < 6) return len;
for (int i = 0; i < len; i++)

View File

@ -34,7 +34,7 @@ import static org.apache.lucene.analysis.util.StemmerUtil.*;
*/
public class LatvianStemmer {
/** Stem a latvian word. returns the new adjusted length. */
public int stem(char s[], int len) {
public int stem(char[] s, int len) {
int numVowels = numVowels(s, len);
for (int i = 0; i < affixes.length; i++) {
@ -48,7 +48,7 @@ public class LatvianStemmer {
return len;
}
static final Affix affixes[] = {
static final Affix[] affixes = {
new Affix("ajiem", 3, false), new Affix("ajai", 3, false),
new Affix("ajam", 2, false), new Affix("ajām", 2, false),
new Affix("ajos", 2, false), new Affix("ajās", 2, false),
@ -71,7 +71,7 @@ public class LatvianStemmer {
};
static class Affix {
char affix[]; // suffix
char[] affix; // suffix
int vc; // vowel count of the suffix
boolean palatalizes; // true if we should fire palatalization rules.
@ -92,7 +92,7 @@ public class LatvianStemmer {
* <li>z -&gt; ž
* </ul>
*/
private int unpalatalize(char s[], int len) {
private int unpalatalize(char[] s, int len) {
// we check the character removed: if it's -u then
// it's 2,5, or 6 gen pl., and these two can only apply then.
if (s[len] == 'u') {
@ -160,7 +160,7 @@ public class LatvianStemmer {
* Count the vowels in the string, we always require at least one in the remaining stem to accept
* it.
*/
private int numVowels(char s[], int len) {
private int numVowels(char[] s, int len) {
int n = 0;
for (int i = 0; i < len; i++) {
switch (s[i]) {

View File

@ -188,7 +188,7 @@ public final class ASCIIFoldingFilter extends TokenFilter {
* @lucene.internal
*/
public static final int foldToASCII(
char input[], int inputPos, char output[], int outputPos, int length) {
char[] input, int inputPos, char[] output, int outputPos, int length) {
final int end = inputPos + length;
for (int pos = inputPos; pos < end; ++pos) {
final char c = input[pos];

View File

@ -88,7 +88,7 @@ public class FingerprintFilter extends TokenFilter {
private final boolean buildSingleOutputToken() throws IOException {
inputEnded = false;
char clonedLastTerm[] = null;
char[] clonedLastTerm = null;
uniqueTerms = new CharArraySet(8, false);
int outputTokenSize = 0;
while (input.incrementToken()) {
@ -96,7 +96,7 @@ public class FingerprintFilter extends TokenFilter {
continue;
}
final char term[] = termAttribute.buffer();
final char[] term = termAttribute.buffer();
final int length = termAttribute.length();
if (!uniqueTerms.contains(term, 0, length)) {
@ -150,8 +150,8 @@ public class FingerprintFilter extends TokenFilter {
new Comparator<Object>() {
@Override
public int compare(Object o1, Object o2) {
char v1[] = (char[]) o1;
char v2[] = (char[]) o2;
char[] v1 = (char[]) o1;
char[] v2 = (char[]) o2;
int len1 = v1.length;
int len2 = v2.length;
int lim = Math.min(len1, len2);

View File

@ -121,7 +121,7 @@ public final class HyphenatedWordsFilter extends TokenFilter {
restoreState(savedState);
savedState = null;
char term[] = termAttribute.buffer();
char[] term = termAttribute.buffer();
int length = hyphenated.length();
if (length > termAttribute.length()) {
term = termAttribute.resizeBuffer(length);

View File

@ -47,7 +47,7 @@ public final class RemoveDuplicatesTokenFilter extends TokenFilter {
@Override
public boolean incrementToken() throws IOException {
while (input.incrementToken()) {
final char term[] = termAttribute.buffer();
final char[] term = termAttribute.buffer();
final int length = termAttribute.length();
final int posIncrement = posIncAttribute.getPositionIncrement();
@ -58,7 +58,7 @@ public final class RemoveDuplicatesTokenFilter extends TokenFilter {
boolean duplicate = (posIncrement == 0 && previous.contains(term, 0, length));
// clone the term, and add to the set of seen terms.
char saved[] = new char[length];
char[] saved = new char[length];
System.arraycopy(term, 0, saved, 0, length);
previous.add(saved);

View File

@ -183,7 +183,7 @@ public final class WordDelimiterFilter extends TokenFilter {
// used for accumulating position increment gaps
private int accumPosInc = 0;
private char savedBuffer[] = new char[1024];
private char[] savedBuffer = new char[1024];
private int savedStartOffset;
private int savedEndOffset;
private String savedType;
@ -377,9 +377,9 @@ public final class WordDelimiterFilter extends TokenFilter {
first = true;
}
private AttributeSource.State buffered[] = new AttributeSource.State[8];
private int startOff[] = new int[8];
private int posInc[] = new int[8];
private AttributeSource.State[] buffered = new AttributeSource.State[8];
private int[] startOff = new int[8];
private int[] posInc = new int[8];
private int bufferedLen = 0;
private int bufferedPos = 0;
private boolean first;
@ -616,7 +616,7 @@ public final class WordDelimiterFilter extends TokenFilter {
* @param offset Offset in the concetenation to add the text
* @param length Length of the text to append
*/
void append(char text[], int offset, int length) {
void append(char[] text, int offset, int length) {
buffer.append(text, offset, length);
subwordCount++;
}
@ -627,7 +627,7 @@ public final class WordDelimiterFilter extends TokenFilter {
if (termAttribute.length() < buffer.length()) {
termAttribute.resizeBuffer(buffer.length());
}
char termbuffer[] = termAttribute.buffer();
char[] termbuffer = termAttribute.buffer();
buffer.getChars(0, buffer.length(), termbuffer, 0);
termAttribute.setLength(buffer.length());

View File

@ -173,7 +173,7 @@ public class WordDelimiterFilterFactory extends TokenFilterFactory implements Re
}
// ensure the table is always at least as big as DEFAULT_WORD_DELIM_TABLE for performance
byte types[] =
byte[] types =
new byte
[Math.max(
typeMap.lastKey() + 1, WordDelimiterIterator.DEFAULT_WORD_DELIM_TABLE.length)];

View File

@ -608,7 +608,7 @@ public final class WordDelimiterGraphFilter extends TokenFilter {
* @param offset Offset in the concetenation to add the text
* @param length Length of the text to append
*/
void append(char text[], int offset, int length) {
void append(char[] text, int offset, int length) {
buffer.append(text, offset, length);
subwordCount++;
}

View File

@ -160,7 +160,7 @@ public class WordDelimiterGraphFilterFactory extends TokenFilterFactory
}
// ensure the table is always at least as big as DEFAULT_WORD_DELIM_TABLE for performance
byte types[] =
byte[] types =
new byte
[Math.max(
typeMap.lastKey() + 1, WordDelimiterIterator.DEFAULT_WORD_DELIM_TABLE.length)];

View File

@ -40,7 +40,7 @@ public final class WordDelimiterIterator {
public static final byte[] DEFAULT_WORD_DELIM_TABLE;
char text[];
char[] text;
int length;
/** start position of text, excluding leading delimiters */
@ -207,7 +207,7 @@ public final class WordDelimiterIterator {
* @param text New text
* @param length length of the text
*/
void setText(char text[], int length) {
void setText(char[] text, int length) {
this.text = text;
this.length = this.endBounds = length;
current = startBounds = end = 0;

View File

@ -82,7 +82,7 @@ public class NorwegianLightStemmer {
useNynorsk = (flags & NYNORSK) != 0;
}
public int stem(char s[], int len) {
public int stem(char[] s, int len) {
// Remove posessive -s (bilens -> bilen) and continue checking
if (len > 4 && s[len - 1] == 's') len--;

View File

@ -78,7 +78,7 @@ public class NorwegianMinimalStemmer {
useNynorsk = (flags & NYNORSK) != 0;
}
public int stem(char s[], int len) {
public int stem(char[] s, int len) {
// Remove genitiv s
if (len > 4 && s[len - 1] == 's') len--;

View File

@ -61,7 +61,7 @@ import static org.apache.lucene.analysis.util.StemmerUtil.*;
*/
public class PortugueseLightStemmer {
public int stem(char s[], int len) {
public int stem(char[] s, int len) {
if (len < 4) return len;
len = removeSuffix(s, len);
@ -119,7 +119,7 @@ public class PortugueseLightStemmer {
return len;
}
private int removeSuffix(char s[], int len) {
private int removeSuffix(char[] s, int len) {
if (len > 4 && endsWith(s, len, "es"))
switch (s[len - 3]) {
case 'r':
@ -169,7 +169,7 @@ public class PortugueseLightStemmer {
return len;
}
private int normFeminine(char s[], int len) {
private int normFeminine(char[] s, int len) {
if (len > 7
&& (endsWith(s, len, "inha") || endsWith(s, len, "iaca") || endsWith(s, len, "eira"))) {
s[len - 1] = 'o';

View File

@ -31,7 +31,7 @@ public class PortugueseMinimalStemmer extends RSLPStemmerBase {
private static final Step pluralStep =
parse(PortugueseMinimalStemmer.class, "portuguese.rslp").get("Plural");
public int stem(char s[], int len) {
public int stem(char[] s, int len) {
return pluralStep.apply(s, len);
}
}

View File

@ -43,7 +43,7 @@ public class PortugueseStemmer extends RSLPStemmerBase {
* @param len initial valid length of buffer
* @return new valid length, stemmed
*/
public int stem(char s[], int len) {
public int stem(char[] s, int len) {
assert s.length >= len + 1 : "this stemmer requires an oversized array of at least 1";
len = plural.apply(s, len);

View File

@ -94,8 +94,8 @@ public abstract class RSLPStemmerBase {
/** A basic rule, with no exceptions. */
protected static class Rule {
protected final char suffix[];
protected final char replacement[];
protected final char[] suffix;
protected final char[] replacement;
protected final int min;
/**
@ -112,12 +112,12 @@ public abstract class RSLPStemmerBase {
}
/** @return true if the word matches this rule. */
public boolean matches(char s[], int len) {
public boolean matches(char[] s, int len) {
return (len - suffix.length >= min && endsWith(s, len, suffix));
}
/** @return new valid length of the string after firing this rule. */
public int replace(char s[], int len) {
public int replace(char[] s, int len) {
if (replacement.length > 0) {
System.arraycopy(replacement, 0, s, len - suffix.length, replacement.length);
}
@ -140,7 +140,7 @@ public abstract class RSLPStemmerBase {
}
@Override
public boolean matches(char s[], int len) {
public boolean matches(char[] s, int len) {
return super.matches(s, len) && !exceptions.contains(s, 0, len);
}
}
@ -167,7 +167,7 @@ public abstract class RSLPStemmerBase {
}
@Override
public boolean matches(char s[], int len) {
public boolean matches(char[] s, int len) {
if (!super.matches(s, len)) return false;
for (int i = 0; i < exceptions.length; i++) if (endsWith(s, len, exceptions[i])) return false;
@ -179,7 +179,7 @@ public abstract class RSLPStemmerBase {
/** A step containing a list of rules. */
protected static class Step {
protected final String name;
protected final Rule rules[];
protected final Rule[] rules;
protected final int min;
protected final char[][] suffixes;
@ -191,7 +191,7 @@ public abstract class RSLPStemmerBase {
* @param min minimum word size. if this is 0 it is automatically calculated.
* @param suffixes optional list of conditional suffixes. may be null.
*/
public Step(String name, Rule rules[], int min, String suffixes[]) {
public Step(String name, Rule[] rules, int min, String[] suffixes) {
this.name = name;
this.rules = rules;
if (min == 0) {
@ -209,7 +209,7 @@ public abstract class RSLPStemmerBase {
}
/** @return new valid length of the string after applying the entire step. */
public int apply(char s[], int len) {
public int apply(char[] s, int len) {
if (len < min) return len;
if (suffixes != null) {
@ -275,8 +275,8 @@ public abstract class RSLPStemmerBase {
String name = matcher.group(1);
int min = Integer.parseInt(matcher.group(2));
int type = Integer.parseInt(matcher.group(3));
String suffixes[] = parseList(matcher.group(4));
Rule rules[] = parseRules(r, type);
String[] suffixes = parseList(matcher.group(4));
Rule[] rules = parseRules(r, type);
return new Step(name, rules, min, suffixes);
}
@ -322,7 +322,7 @@ public abstract class RSLPStemmerBase {
private static String[] parseList(String s) {
if (s.length() == 0) return null;
String list[] = s.split(",");
String[] list = s.split(",");
for (int i = 0; i < list.length; i++) list[i] = parseString(list[i].trim());
return list;
}

View File

@ -61,12 +61,12 @@ import static org.apache.lucene.analysis.util.StemmerUtil.*;
*/
public class RussianLightStemmer {
public int stem(char s[], int len) {
public int stem(char[] s, int len) {
len = removeCase(s, len);
return normalize(s, len);
}
private int normalize(char s[], int len) {
private int normalize(char[] s, int len) {
if (len > 3)
switch (s[len - 1]) {
case 'ь':
@ -78,7 +78,7 @@ public class RussianLightStemmer {
return len;
}
private int removeCase(char s[], int len) {
private int removeCase(char[] s, int len) {
if (len > 6 && (endsWith(s, len, "иями") || endsWith(s, len, "оями"))) return len - 4;
if (len > 5

View File

@ -85,11 +85,11 @@ public final class SnowballFilter extends TokenFilter {
public final boolean incrementToken() throws IOException {
if (input.incrementToken()) {
if (!keywordAttr.isKeyword()) {
char termBuffer[] = termAtt.buffer();
char[] termBuffer = termAtt.buffer();
final int length = termAtt.length();
stemmer.setCurrent(termBuffer, length);
stemmer.stem();
final char finalTerm[] = stemmer.getCurrentBuffer();
final char[] finalTerm = stemmer.getCurrentBuffer();
final int newLength = stemmer.getCurrentBufferLength();
if (finalTerm != termBuffer) termAtt.copyBuffer(finalTerm, 0, newLength);
else termAtt.setLength(newLength);

View File

@ -40,7 +40,7 @@ public final class SerbianNormalizationFilter extends TokenFilter {
@Override
public boolean incrementToken() throws IOException {
if (input.incrementToken()) {
char buffer[] = termAtt.buffer();
char[] buffer = termAtt.buffer();
int length = termAtt.length();
for (int i = 0; i < length; i++) {
final char c = buffer[i];

View File

@ -37,7 +37,7 @@ public final class SerbianNormalizationRegularFilter extends TokenFilter {
@Override
public boolean incrementToken() throws IOException {
if (input.incrementToken()) {
char buffer[] = termAtt.buffer();
char[] buffer = termAtt.buffer();
int length = termAtt.length();
for (int i = 0; i < length; i++) {
final char c = buffer[i];

View File

@ -61,7 +61,7 @@ import static org.apache.lucene.analysis.util.StemmerUtil.*;
*/
public class SwedishLightStemmer {
public int stem(char s[], int len) {
public int stem(char[] s, int len) {
if (len > 4 && s[len - 1] == 's') len--;
if (len > 7 && (endsWith(s, len, "elser") || endsWith(s, len, "heten"))) return len - 5;

View File

@ -62,7 +62,7 @@ import static org.apache.lucene.analysis.util.StemmerUtil.endsWith;
*/
public class SwedishMinimalStemmer {
public int stem(char s[], int len) {
public int stem(char[] s, int len) {
if (len > 4 && s[len - 1] == 's') len--;
if (len > 6

View File

@ -87,19 +87,19 @@ public class SolrSynonymParser extends SynonymMap.Parser {
}
// TODO: we could process this more efficiently.
String sides[] = split(line, "=>");
String[] sides = split(line, "=>");
if (sides.length > 1) { // explicit mapping
if (sides.length != 2) {
throw new IllegalArgumentException(
"more than one explicit mapping specified on the same line");
}
String inputStrings[] = split(sides[0], ",");
String[] inputStrings = split(sides[0], ",");
CharsRef[] inputs = new CharsRef[inputStrings.length];
for (int i = 0; i < inputs.length; i++) {
inputs[i] = analyze(unescape(inputStrings[i]).trim(), new CharsRefBuilder());
}
String outputStrings[] = split(sides[1], ",");
String[] outputStrings = split(sides[1], ",");
CharsRef[] outputs = new CharsRef[outputStrings.length];
for (int i = 0; i < outputs.length; i++) {
outputs[i] = analyze(unescape(outputStrings[i]).trim(), new CharsRefBuilder());
@ -111,7 +111,7 @@ public class SolrSynonymParser extends SynonymMap.Parser {
}
}
} else {
String inputStrings[] = split(line, ",");
String[] inputStrings = split(line, ",");
CharsRef[] inputs = new CharsRef[inputStrings.length];
for (int i = 0; i < inputs.length; i++) {
inputs[i] = analyze(unescape(inputStrings[i]).trim(), new CharsRefBuilder());

View File

@ -235,7 +235,7 @@ public class SynonymMap {
final byte[] spare = new byte[5];
Set<CharsRef> keys = workingSet.keySet();
CharsRef sortedKeys[] = keys.toArray(new CharsRef[keys.size()]);
CharsRef[] sortedKeys = keys.toArray(new CharsRef[keys.size()]);
Arrays.sort(sortedKeys, CharsRef.getUTF16SortedAsUTF8Comparator());
final IntsRefBuilder scratchIntsRef = new IntsRefBuilder();

View File

@ -47,7 +47,7 @@ public class WordnetSynonymParser extends SynonymMap.Parser {
try {
String line = null;
String lastSynSetID = "";
CharsRef synset[] = new CharsRef[8];
CharsRef[] synset = new CharsRef[8];
int synsetSize = 0;
while ((line = br.readLine()) != null) {
@ -88,7 +88,7 @@ public class WordnetSynonymParser extends SynonymMap.Parser {
return analyze(text, reuse);
}
private void addInternal(CharsRef synset[], int size) {
private void addInternal(CharsRef[] synset, int size) {
if (size <= 1) {
return; // nothing to do
}

View File

@ -90,7 +90,7 @@ public final class TurkishLowerCaseFilter extends TokenFilter {
}
/** lookahead for a combining dot above. other NSMs may be in between. */
private boolean isBeforeDot(char s[], int pos, int len) {
private boolean isBeforeDot(char[] s, int pos, int len) {
for (int i = pos; i < len; ) {
final int ch = Character.codePointAt(s, i, len);
if (Character.getType(ch) != Character.NON_SPACING_MARK) return false;
@ -104,7 +104,7 @@ public final class TurkishLowerCaseFilter extends TokenFilter {
/**
* delete a character in-place. rarely happens, only if COMBINING_DOT_ABOVE is found after an i
*/
private int delete(char s[], int pos, int len) {
private int delete(char[] s, int pos, int len) {
if (pos < len) System.arraycopy(s, pos + 1, s, pos, len - pos - 1);
return len - 1;

View File

@ -26,7 +26,7 @@ import java.util.Locale;
* @lucene.internal
*/
public abstract class CharArrayIterator implements CharacterIterator {
private char array[];
private char[] array;
private int start;
private int index;
private int length;
@ -51,7 +51,7 @@ public abstract class CharArrayIterator implements CharacterIterator {
* @param start offset into buffer
* @param length maximum length to examine
*/
public void setText(final char array[], int start, int length) {
public void setText(final char[] array, int start, int length) {
this.array = array;
this.start = start;
this.index = start;

View File

@ -101,13 +101,13 @@ public class OpenStringBuilder implements Appendable, CharSequence {
unsafeWrite((char) b);
}
public void unsafeWrite(char b[], int off, int len) {
public void unsafeWrite(char[] b, int off, int len) {
System.arraycopy(b, off, buf, this.len, len);
this.len += len;
}
protected void resize(int len) {
char newbuf[] = new char[Math.max(buf.length << 1, len)];
char[] newbuf = new char[Math.max(buf.length << 1, len)];
System.arraycopy(buf, 0, newbuf, 0, size());
buf = newbuf;
}
@ -131,7 +131,7 @@ public class OpenStringBuilder implements Appendable, CharSequence {
write(b, 0, b.length);
}
public void write(char b[], int off, int len) {
public void write(char[] b, int off, int len) {
reserve(len);
unsafeWrite(b, off, len);
}
@ -153,7 +153,7 @@ public class OpenStringBuilder implements Appendable, CharSequence {
}
public char[] toCharArray() {
char newbuf[] = new char[size()];
char[] newbuf = new char[size()];
System.arraycopy(buf, 0, newbuf, 0, size());
return newbuf;
}

View File

@ -37,7 +37,7 @@ import org.apache.lucene.util.AttributeFactory;
*/
public abstract class SegmentingTokenizerBase extends Tokenizer {
protected static final int BUFFERMAX = 1024;
protected final char buffer[] = new char[BUFFERMAX];
protected final char[] buffer = new char[BUFFERMAX];
/** true length of text in the buffer */
private int length = 0;
/** length in buffer that can be evaluated safely, up to a safe end point */

View File

@ -33,7 +33,7 @@ public class StemmerUtil {
* @param prefix Prefix string to test
* @return true if <code>s</code> starts with <code>prefix</code>
*/
public static boolean startsWith(char s[], int len, String prefix) {
public static boolean startsWith(char[] s, int len, String prefix) {
final int prefixLen = prefix.length();
if (prefixLen > len) return false;
for (int i = 0; i < prefixLen; i++) if (s[i] != prefix.charAt(i)) return false;
@ -48,7 +48,7 @@ public class StemmerUtil {
* @param suffix Suffix string to test
* @return true if <code>s</code> ends with <code>suffix</code>
*/
public static boolean endsWith(char s[], int len, String suffix) {
public static boolean endsWith(char[] s, int len, String suffix) {
final int suffixLen = suffix.length();
if (suffixLen > len) return false;
for (int i = suffixLen - 1; i >= 0; i--)
@ -65,7 +65,7 @@ public class StemmerUtil {
* @param suffix Suffix string to test
* @return true if <code>s</code> ends with <code>suffix</code>
*/
public static boolean endsWith(char s[], int len, char suffix[]) {
public static boolean endsWith(char[] s, int len, char[] suffix) {
final int suffixLen = suffix.length;
if (suffixLen > len) return false;
for (int i = suffixLen - 1; i >= 0; i--)
@ -82,7 +82,7 @@ public class StemmerUtil {
* @param len length of input buffer
* @return length of input buffer after deletion
*/
public static int delete(char s[], int pos, int len) {
public static int delete(char[] s, int pos, int len) {
assert pos < len;
if (pos < len - 1) { // don't arraycopy if asked to delete last character
System.arraycopy(s, pos + 1, s, pos, len - pos - 1);
@ -99,7 +99,7 @@ public class StemmerUtil {
* @param nChars number of characters to delete
* @return length of input buffer after deletion
*/
public static int deleteN(char s[], int pos, int len, int nChars) {
public static int deleteN(char[] s, int pos, int len, int nChars) {
assert pos + nChars <= len;
if (pos + nChars < len) { // don't arraycopy if asked to delete the last characters
System.arraycopy(s, pos + nChars, s, pos, len - pos - nChars);

View File

@ -326,7 +326,7 @@ class WikipediaTokenizerImpl {
private static final int ZZ_PUSHBACK_2BIG = 2;
/* error messages for the codes above */
private static final String ZZ_ERROR_MSG[] = {
private static final String[] ZZ_ERROR_MSG = {
"Unknown internal scanner error",
"Error: could not match input",
"Error: pushback value was too large"
@ -377,7 +377,7 @@ class WikipediaTokenizerImpl {
/**
* this buffer contains the current text to be matched and is the source of the yytext() string
*/
private char zzBuffer[] = new char[ZZ_BUFFERSIZE];
private char[] zzBuffer = new char[ZZ_BUFFERSIZE];
/** the textposition at the last accepting state */
private int zzMarkedPos;
@ -537,7 +537,7 @@ class WikipediaTokenizerImpl {
/* is the buffer big enough? */
if (zzCurrentPos >= zzBuffer.length - zzFinalHighSurrogate) {
/* if not: blow it up */
char newBuffer[] = new char[zzBuffer.length * 2];
char[] newBuffer = new char[zzBuffer.length * 2];
System.arraycopy(zzBuffer, 0, newBuffer, 0, zzBuffer.length);
zzBuffer = newBuffer;
zzEndRead += zzFinalHighSurrogate;

View File

@ -65,7 +65,7 @@ public class SnowballProgram implements Serializable {
* @param text character array containing input
* @param length valid length of text.
*/
public void setCurrent(char text[], int length) {
public void setCurrent(char[] text, int length) {
current = text;
cursor = 0;
limit = length;
@ -101,7 +101,7 @@ public class SnowballProgram implements Serializable {
}
// current string
private char current[];
private char[] current;
protected int cursor;
protected int limit;
@ -346,7 +346,7 @@ public class SnowballProgram implements Serializable {
final int newLength = limit + adjustment;
// resize if necessary
if (newLength > current.length) {
char newBuffer[] = new char[oversize(newLength)];
char[] newBuffer = new char[oversize(newLength)];
System.arraycopy(current, 0, newBuffer, 0, limit);
current = newBuffer;
}

View File

@ -200,7 +200,7 @@ public class TestAnalyzers extends BaseTokenStreamTestCase {
/** blast some random strings through the analyzer */
public void testRandomStrings() throws Exception {
Analyzer analyzers[] =
Analyzer[] analyzers =
new Analyzer[] {
new WhitespaceAnalyzer(),
new SimpleAnalyzer(),
@ -215,7 +215,7 @@ public class TestAnalyzers extends BaseTokenStreamTestCase {
/** blast some random large strings through the analyzer */
public void testRandomHugeStrings() throws Exception {
Analyzer analyzers[] =
Analyzer[] analyzers =
new Analyzer[] {
new WhitespaceAnalyzer(),
new SimpleAnalyzer(),

View File

@ -283,7 +283,7 @@ public class TestBugInSomething extends BaseTokenStreamTestCase {
new HashSet<>(
Arrays.asList("rrdpafa", "pupmmlu", "xlq", "dyy", "zqrxrrck", "o", "hsrlfvcha")),
false);
final byte table[] =
final byte[] table =
new byte[] {
-57, 26, 1, 48, 63, -23, 55, -84, 18, 120, -97, 103, 58, 13, 84, 89, 57, -13, -63, 5, 28,
97, -54, -94, 102, -108, -5, 5, 46, 40, 43, 78, 43, -72, 36, 29, 124, -106, -22, -51, 65,

View File

@ -379,7 +379,7 @@ public class TestRandomChains extends BaseTokenStreamTestCase {
put(
byte[].class,
random -> {
byte bytes[] = new byte[random.nextInt(256)];
byte[] bytes = new byte[random.nextInt(256)];
random.nextBytes(bytes);
return bytes;
});
@ -802,7 +802,7 @@ public class TestRandomChains extends BaseTokenStreamTestCase {
final Constructor<? extends Tokenizer> ctor =
tokenizers.get(random.nextInt(tokenizers.size()));
final StringBuilder descr = new StringBuilder();
final Object args[] = newTokenizerArgs(random, ctor.getParameterTypes());
final Object[] args = newTokenizerArgs(random, ctor.getParameterTypes());
if (broken(ctor, args)) {
continue;
}
@ -823,7 +823,7 @@ public class TestRandomChains extends BaseTokenStreamTestCase {
while (true) {
final Constructor<? extends CharFilter> ctor =
charfilters.get(random.nextInt(charfilters.size()));
final Object args[] = newCharFilterArgs(random, spec.reader, ctor.getParameterTypes());
final Object[] args = newCharFilterArgs(random, spec.reader, ctor.getParameterTypes());
if (broken(ctor, args)) {
continue;
}
@ -860,7 +860,7 @@ public class TestRandomChains extends BaseTokenStreamTestCase {
new ConditionalTokenFilter(
spec.stream,
in -> {
final Object args[] = newFilterArgs(random, in, ctor.getParameterTypes());
final Object[] args = newFilterArgs(random, in, ctor.getParameterTypes());
if (broken(ctor, args)) {
return in;
}
@ -885,7 +885,7 @@ public class TestRandomChains extends BaseTokenStreamTestCase {
};
break;
} else {
final Object args[] = newFilterArgs(random, spec.stream, ctor.getParameterTypes());
final Object[] args = newFilterArgs(random, spec.stream, ctor.getParameterTypes());
if (broken(ctor, args)) {
continue;
}

View File

@ -86,7 +86,7 @@ public class TestStopAnalyzer extends BaseTokenStreamTestCase {
CharArraySet stopWordsSet = new CharArraySet(asSet("good", "test", "analyzer"), false);
StopAnalyzer newStop = new StopAnalyzer(stopWordsSet);
String s = "This is a good test of the english stop analyzer with positions";
int expectedIncr[] = {1, 1, 1, 3, 1, 1, 1, 2, 1};
int[] expectedIncr = {1, 1, 1, 3, 1, 1, 1, 2, 1};
try (TokenStream stream = newStop.tokenStream("test", s)) {
assertNotNull(stream);
int i = 0;

View File

@ -51,7 +51,7 @@ public class TestTypeTokenFilter extends BaseTokenStreamTestCase {
}
}
log(sb.toString());
String stopTypes[] = new String[] {"<NUM>"};
String[] stopTypes = new String[] {"<NUM>"};
Set<String> stopSet = asSet(stopTypes);
// with increments

View File

@ -39,7 +39,7 @@ public class TestUAX29URLEmailAnalyzer extends BaseTokenStreamTestCase {
public void testHugeDoc() throws IOException {
StringBuilder sb = new StringBuilder();
char whitespace[] = new char[4094];
char[] whitespace = new char[4094];
Arrays.fill(whitespace, ' ');
sb.append(whitespace);
sb.append("testing 1234");

View File

@ -81,7 +81,7 @@ public class TestUAX29URLEmailTokenizer extends BaseTokenStreamTestCase {
public void testHugeDoc() throws IOException {
StringBuilder sb = new StringBuilder();
char whitespace[] = new char[4094];
char[] whitespace = new char[4094];
Arrays.fill(whitespace, ' ');
sb.append(whitespace);
sb.append("testing 1234");

View File

@ -192,7 +192,7 @@ public class TestCapitalizationFilter extends BaseTokenStreamTestCase {
static void assertCapitalizesTo(
Tokenizer tokenizer,
String expected[],
String[] expected,
boolean onlyFirstWord,
CharArraySet keep,
boolean forceFirstLetter,
@ -216,7 +216,7 @@ public class TestCapitalizationFilter extends BaseTokenStreamTestCase {
static void assertCapitalizesTo(
String input,
String expected[],
String[] expected,
boolean onlyFirstWord,
CharArraySet keep,
boolean forceFirstLetter,

View File

@ -30,10 +30,10 @@ import org.apache.lucene.store.ByteBuffersDirectory;
import org.apache.lucene.store.Directory;
public class TestQueryAutoStopWordAnalyzer extends BaseTokenStreamTestCase {
String variedFieldValues[] = {
String[] variedFieldValues = {
"the", "quick", "brown", "fox", "jumped", "over", "the", "lazy", "boring", "dog"
};
String repetitiveFieldValues[] = {"boring", "boring", "vaguelyboring"};
String[] repetitiveFieldValues = {"boring", "boring", "vaguelyboring"};
Directory dir;
Analyzer appAnalyzer;
IndexReader reader;

View File

@ -1194,9 +1194,9 @@ public class TestShingleFilter extends BaseTokenStreamTestCase {
protected void shingleFilterTestCommon(
ShingleFilter filter, Token[] tokensToCompare, int[] positionIncrements, String[] types)
throws IOException {
String text[] = new String[tokensToCompare.length];
int startOffsets[] = new int[tokensToCompare.length];
int endOffsets[] = new int[tokensToCompare.length];
String[] text = new String[tokensToCompare.length];
int[] startOffsets = new int[tokensToCompare.length];
int[] endOffsets = new int[tokensToCompare.length];
for (int i = 0; i < tokensToCompare.length; i++) {
text[i] = new String(tokensToCompare[i].buffer(), 0, tokensToCompare[i].length());

View File

@ -33,7 +33,7 @@ public class TestCharArrayIterator extends LuceneTestCase {
BreakIterator bi = BreakIterator.getWordInstance(Locale.getDefault());
CharArrayIterator ci = CharArrayIterator.newWordInstance();
for (int i = 0; i < 10000; i++) {
char text[] = TestUtil.randomUnicodeString(random()).toCharArray();
char[] text = TestUtil.randomUnicodeString(random()).toCharArray();
ci.setText(text, 0, text.length);
consume(bi, ci);
}
@ -45,7 +45,7 @@ public class TestCharArrayIterator extends LuceneTestCase {
BreakIterator bi = BreakIterator.getWordInstance(Locale.getDefault());
Segment ci = new Segment();
for (int i = 0; i < 10000; i++) {
char text[] = _TestUtil.randomUnicodeString(random).toCharArray();
char[] text = _TestUtil.randomUnicodeString(random).toCharArray();
ci.array = text;
ci.offset = 0;
ci.count = text.length;
@ -63,7 +63,7 @@ public class TestCharArrayIterator extends LuceneTestCase {
BreakIterator bi = BreakIterator.getSentenceInstance(Locale.getDefault());
CharArrayIterator ci = CharArrayIterator.newSentenceInstance();
for (int i = 0; i < 10000; i++) {
char text[] = TestUtil.randomUnicodeString(random()).toCharArray();
char[] text = TestUtil.randomUnicodeString(random()).toCharArray();
ci.setText(text, 0, text.length);
consume(bi, ci);
}
@ -75,7 +75,7 @@ public class TestCharArrayIterator extends LuceneTestCase {
BreakIterator bi = BreakIterator.getSentenceInstance(Locale.getDefault());
Segment ci = new Segment();
for (int i = 0; i < 10000; i++) {
char text[] = _TestUtil.randomUnicodeString(random).toCharArray();
char[] text = _TestUtil.randomUnicodeString(random).toCharArray();
ci.array = text;
ci.offset = 0;
ci.count = text.length;
@ -145,7 +145,7 @@ public class TestCharArrayIterator extends LuceneTestCase {
});
// clone()
char text[] = "testing".toCharArray();
char[] text = "testing".toCharArray();
ci.setText(text, 0, text.length);
ci.next();
CharArrayIterator ci2 = ci.clone();

View File

@ -116,7 +116,7 @@ public class TestSegmentingTokenizerBase extends BaseTokenStreamTestCase {
/** Tests terms which span across boundaries */
public void testHugeDoc() throws IOException {
StringBuilder sb = new StringBuilder();
char whitespace[] = new char[4094];
char[] whitespace = new char[4094];
Arrays.fill(whitespace, '\n');
sb.append(whitespace);
sb.append("testing 1234");
@ -131,10 +131,10 @@ public class TestSegmentingTokenizerBase extends BaseTokenStreamTestCase {
sb.append('a');
}
String input = sb.toString();
char token[] = new char[1024];
char[] token = new char[1024];
Arrays.fill(token, 'a');
String expectedToken = new String(token);
String expected[] = {
String[] expected = {
expectedToken, expectedToken, expectedToken,
expectedToken, expectedToken, expectedToken,
expectedToken, expectedToken, expectedToken,

View File

@ -413,7 +413,7 @@ public class TestWikipediaTokenizer extends BaseTokenStreamTestCase {
// now check the flags, TODO: add way to check flags from BaseTokenStreamTestCase?
tf = new WikipediaTokenizer(newAttributeFactory(), WikipediaTokenizer.BOTH, untoks);
tf.setReader(new StringReader(test));
int expectedFlags[] =
int[] expectedFlags =
new int[] {
UNTOKENIZED_TOKEN_FLAG,
0,

View File

@ -113,7 +113,7 @@ public final class ICUTransformFilter extends TokenFilter {
/** Wrap a {@link CharTermAttribute} with the Replaceable API. */
static final class ReplaceableTermAttribute implements Replaceable {
private char buffer[];
private char[] buffer;
private int length;
private CharTermAttribute token;
@ -135,7 +135,7 @@ public final class ICUTransformFilter extends TokenFilter {
@Override
public void copy(int start, int limit, int dest) {
char text[] = new char[limit - start];
char[] text = new char[limit - start];
getChars(start, limit, text, 0);
replace(dest, dest, text, 0, limit - start);
}

View File

@ -30,7 +30,7 @@ import com.ibm.icu.text.UnicodeSet;
final class BreakIteratorWrapper {
private final CharArrayIterator textIterator = new CharArrayIterator();
private final RuleBasedBreakIterator rbbi;
private char text[];
private char[] text;
private int start;
private int status;
@ -90,7 +90,7 @@ final class BreakIteratorWrapper {
return false;
}
void setText(char text[], int start, int length) {
void setText(char[] text, int start, int length) {
this.text = text;
this.start = start;
textIterator.setText(text, start, length);

View File

@ -24,7 +24,7 @@ import java.text.CharacterIterator;
* @lucene.experimental
*/
final class CharArrayIterator implements CharacterIterator {
private char array[];
private char[] array;
private int start;
private int index;
private int length;
@ -49,7 +49,7 @@ final class CharArrayIterator implements CharacterIterator {
* @param start offset into buffer
* @param length maximum length to examine
*/
void setText(final char array[], int start, int length) {
void setText(final char[] array, int start, int length) {
this.array = array;
this.start = start;
this.index = start;

View File

@ -39,13 +39,13 @@ import com.ibm.icu.text.BreakIterator;
*/
final class CompositeBreakIterator {
private final ICUTokenizerConfig config;
private final BreakIteratorWrapper wordBreakers[] =
private final BreakIteratorWrapper[] wordBreakers =
new BreakIteratorWrapper[1 + UCharacter.getIntPropertyMaxValue(UProperty.SCRIPT)];
private BreakIteratorWrapper rbbi;
private final ScriptIterator scriptIterator;
private char text[];
private char[] text;
CompositeBreakIterator(ICUTokenizerConfig config) {
this.config = config;
@ -111,7 +111,7 @@ final class CompositeBreakIterator {
* @param start offset into buffer
* @param length maximum length to examine
*/
void setText(final char text[], int start, int length) {
void setText(final char[] text, int start, int length) {
this.text = text;
scriptIterator.setText(text, start, length);
if (scriptIterator.next()) {

View File

@ -39,7 +39,7 @@ import org.apache.lucene.util.AttributeFactory;
*/
public final class ICUTokenizer extends Tokenizer {
private static final int IOBUFFER = 4096;
private final char buffer[] = new char[IOBUFFER];
private final char[] buffer = new char[IOBUFFER];
/** true length of text in the buffer */
private int length = 0;
/** length in buffer that can be evaluated safely, up to a safe end point */

View File

@ -111,7 +111,7 @@ public class ICUTokenizerFactory extends TokenizerFactory implements ResourceLoa
if (tailored.isEmpty()) {
config = new DefaultICUTokenizerConfig(cjkAsWords, myanmarAsWords);
} else {
final BreakIterator breakers[] =
final BreakIterator[] breakers =
new BreakIterator[1 + UCharacter.getIntPropertyMaxValue(UProperty.SCRIPT)];
for (Map.Entry<Integer, String> entry : tailored.entrySet()) {
int code = entry.getKey();

Some files were not shown because too many files have changed in this diff Show More