diff --git a/gradle/generation/jflex/skeleton.default.txt b/gradle/generation/jflex/skeleton.default.txt index 2eaa2916a56..ebce03e3563 100644 --- a/gradle/generation/jflex/skeleton.default.txt +++ b/gradle/generation/jflex/skeleton.default.txt @@ -14,7 +14,7 @@ private static final int ZZ_PUSHBACK_2BIG = 2; /* error messages for the codes above */ - private static final String ZZ_ERROR_MSG[] = { + private static final String[] ZZ_ERROR_MSG = { "Unknown internal scanner error", "Error: could not match input", "Error: pushback value was too large" @@ -32,7 +32,7 @@ /** this buffer contains the current text to be matched and is the source of the yytext() string */ - private char zzBuffer[] = new char[ZZ_BUFFERSIZE]; + private char[] zzBuffer = new char[ZZ_BUFFERSIZE]; /** the textposition at the last accepting state */ private int zzMarkedPos; @@ -110,7 +110,7 @@ /* is the buffer big enough? */ if (zzCurrentPos >= zzBuffer.length - zzFinalHighSurrogate) { /* if not: blow it up */ - char newBuffer[] = new char[zzBuffer.length*2]; + char[] newBuffer = new char[zzBuffer.length*2]; System.arraycopy(zzBuffer, 0, newBuffer, 0, zzBuffer.length); zzBuffer = newBuffer; zzEndRead += zzFinalHighSurrogate; diff --git a/gradle/generation/jflex/skeleton.disable.buffer.expansion.txt b/gradle/generation/jflex/skeleton.disable.buffer.expansion.txt index 67032d659ff..281c5d20e71 100644 --- a/gradle/generation/jflex/skeleton.disable.buffer.expansion.txt +++ b/gradle/generation/jflex/skeleton.disable.buffer.expansion.txt @@ -14,7 +14,7 @@ private static final int ZZ_PUSHBACK_2BIG = 2; /* error messages for the codes above */ - private static final String ZZ_ERROR_MSG[] = { + private static final String[] ZZ_ERROR_MSG = { "Unknown internal scanner error", "Error: could not match input", "Error: pushback value was too large" @@ -32,7 +32,7 @@ /** this buffer contains the current text to be matched and is the source of the yytext() string */ - private char zzBuffer[] = new char[ZZ_BUFFERSIZE]; + private char[] zzBuffer = new char[ZZ_BUFFERSIZE]; /** the textposition at the last accepting state */ private int zzMarkedPos; diff --git a/gradle/generation/snowball/snowball.patch b/gradle/generation/snowball/snowball.patch index bd92a1cfd6e..c72722d1e4b 100644 --- a/gradle/generation/snowball/snowball.patch +++ b/gradle/generation/snowball/snowball.patch @@ -740,7 +740,7 @@ index 1b27b96..94f2d4b 100644 - public String getCurrent() - { - return current.toString(); -+ public void setCurrent(char text[], int length) { ++ public void setCurrent(char[] text, int length) { + current = text; + cursor = 0; + limit = length; @@ -778,7 +778,7 @@ index 1b27b96..94f2d4b 100644 // current string - protected StringBuilder current; -+ private char current[]; ++ private char[] current; protected int cursor; protected int limit; @@ -926,7 +926,7 @@ index 1b27b96..94f2d4b 100644 + final int newLength = limit + adjustment; + //resize if necessary + if (newLength > current.length) { -+ char newBuffer[] = new char[oversize(newLength)]; ++ char[] newBuffer = new char[oversize(newLength)]; + System.arraycopy(current, 0, newBuffer, 0, limit); + current = newBuffer; + } diff --git a/lucene/analysis/common/src/generated/checksums/generateClassicTokenizer.json b/lucene/analysis/common/src/generated/checksums/generateClassicTokenizer.json index 2ee9a3e1993..2a24e6928f7 100644 --- a/lucene/analysis/common/src/generated/checksums/generateClassicTokenizer.json +++ b/lucene/analysis/common/src/generated/checksums/generateClassicTokenizer.json @@ -1,5 +1,5 @@ { - "gradle/generation/jflex/skeleton.default.txt": "ca1043249c0eefdf2623a785e2b91f5608bfc3f1", - "lucene/analysis/common/src/java/org/apache/lucene/analysis/classic/ClassicTokenizerImpl.java": "21c2cf7ba0a0cdeb43ebe624101e259c9348f6b0", + "gradle/generation/jflex/skeleton.default.txt": "883b32da9ff37f859964af9c2c665361c621b2c2", + "lucene/analysis/common/src/java/org/apache/lucene/analysis/classic/ClassicTokenizerImpl.java": "50f43f43859e63a5470f3c8249cad3ea9c131dc0", "lucene/analysis/common/src/java/org/apache/lucene/analysis/classic/ClassicTokenizerImpl.jflex": "958b028ef3f0aec36488fb2bb033cdec5858035f" } \ No newline at end of file diff --git a/lucene/analysis/common/src/generated/checksums/generateHTMLStripCharFilter.json b/lucene/analysis/common/src/generated/checksums/generateHTMLStripCharFilter.json index 152d5fd9107..f5c81131b07 100644 --- a/lucene/analysis/common/src/generated/checksums/generateHTMLStripCharFilter.json +++ b/lucene/analysis/common/src/generated/checksums/generateHTMLStripCharFilter.json @@ -1,6 +1,6 @@ { - "gradle/generation/jflex/skeleton.default.txt": "ca1043249c0eefdf2623a785e2b91f5608bfc3f1", + "gradle/generation/jflex/skeleton.default.txt": "883b32da9ff37f859964af9c2c665361c621b2c2", "lucene/analysis/common/src/java/org/apache/lucene/analysis/charfilter/HTMLCharacterEntities.jflex": "d1aa75b9b37646efe31731394f84a063eb7eed9d", - "lucene/analysis/common/src/java/org/apache/lucene/analysis/charfilter/HTMLStripCharFilter.java": "78f5208455706d60a9ce4b63624ed04b0fd32573", - "lucene/analysis/common/src/java/org/apache/lucene/analysis/charfilter/HTMLStripCharFilter.jflex": "71760e2f7abe078109545a0c68aeac9125508d7c" + "lucene/analysis/common/src/java/org/apache/lucene/analysis/charfilter/HTMLStripCharFilter.java": "8470ed427633f58905a8269c78927d7794451e55", + "lucene/analysis/common/src/java/org/apache/lucene/analysis/charfilter/HTMLStripCharFilter.jflex": "44a271b04ad1564284982be166553584d38b5ea0" } \ No newline at end of file diff --git a/lucene/analysis/common/src/generated/checksums/generateUAX29URLEmailTokenizer.json b/lucene/analysis/common/src/generated/checksums/generateUAX29URLEmailTokenizer.json index 0affd5df6a7..30762f21072 100644 --- a/lucene/analysis/common/src/generated/checksums/generateUAX29URLEmailTokenizer.json +++ b/lucene/analysis/common/src/generated/checksums/generateUAX29URLEmailTokenizer.json @@ -1,7 +1,7 @@ { - "gradle/generation/jflex/skeleton.disable.buffer.expansion.txt": "68263ff0a014904c6e89b040d868d8f399408908", + "gradle/generation/jflex/skeleton.disable.buffer.expansion.txt": "1424f4df33c977bb150d7377c3bd61f819113091", "lucene/analysis/common/src/java/org/apache/lucene/analysis/email/ASCIITLD.jflex": "bb3878ea10f85f124a0a9e4ea614d3400d664dae", - "lucene/analysis/common/src/java/org/apache/lucene/analysis/email/UAX29URLEmailTokenizerImpl.java": "2bf3efe1a1bc473eb3fe2456f50521ecd7d9b03b", + "lucene/analysis/common/src/java/org/apache/lucene/analysis/email/UAX29URLEmailTokenizerImpl.java": "b88c349d24028f557f2c014437f3f60c968ad9de", "lucene/analysis/common/src/java/org/apache/lucene/analysis/email/UAX29URLEmailTokenizerImpl.jflex": "56a751d27e481fb55388f91ebf34f5a0cb8cb1b2", "lucene/core/src/data/jflex/UnicodeEmojiProperties.jflex": "7491dd535debc6e9e9ce367c4d3a7217e466dcae" } \ No newline at end of file diff --git a/lucene/analysis/common/src/generated/checksums/generateWikipediaTokenizer.json b/lucene/analysis/common/src/generated/checksums/generateWikipediaTokenizer.json index 63ca19a1bc8..a8c45b8fcb1 100644 --- a/lucene/analysis/common/src/generated/checksums/generateWikipediaTokenizer.json +++ b/lucene/analysis/common/src/generated/checksums/generateWikipediaTokenizer.json @@ -1,5 +1,5 @@ { - "gradle/generation/jflex/skeleton.default.txt": "ca1043249c0eefdf2623a785e2b91f5608bfc3f1", - "lucene/analysis/common/src/java/org/apache/lucene/analysis/wikipedia/WikipediaTokenizerImpl.java": "10b391af6953d2f7bcca86da835a1037705509ec", + "gradle/generation/jflex/skeleton.default.txt": "883b32da9ff37f859964af9c2c665361c621b2c2", + "lucene/analysis/common/src/java/org/apache/lucene/analysis/wikipedia/WikipediaTokenizerImpl.java": "743fb4cc4b88d36242b3d227320c85e89a6868a8", "lucene/analysis/common/src/java/org/apache/lucene/analysis/wikipedia/WikipediaTokenizerImpl.jflex": "a23a4b7cbcdba1fc864c0b85bc2784c8893a0f9f" } \ No newline at end of file diff --git a/lucene/analysis/common/src/generated/checksums/snowball.json b/lucene/analysis/common/src/generated/checksums/snowball.json index b3e42052dba..35224b5228a 100644 --- a/lucene/analysis/common/src/generated/checksums/snowball.json +++ b/lucene/analysis/common/src/generated/checksums/snowball.json @@ -1,6 +1,6 @@ { "lucene/analysis/common/src/java/org/tartarus/snowball/Among.java": "5371973cc30637273366f042e1cff920e0dd14f6", - "lucene/analysis/common/src/java/org/tartarus/snowball/SnowballProgram.java": "4e1caa344c7ac864c467ff0e615c1343e911b06b", + "lucene/analysis/common/src/java/org/tartarus/snowball/SnowballProgram.java": "93d77707ddc746aad94c1308d2f6f4321a1aa003", "lucene/analysis/common/src/java/org/tartarus/snowball/SnowballStemmer.java": "85bfc728393d7804f86f0def0467a12fd4b82fd3", "lucene/analysis/common/src/java/org/tartarus/snowball/ext/ArabicStemmer.java": "2d43c4606bbaf96d9ac5f8be8ccf28e32164b9f0", "lucene/analysis/common/src/java/org/tartarus/snowball/ext/ArmenianStemmer.java": "0be0949fe1f021ef41e3f9a27280b295ab1e998c", diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/ar/ArabicNormalizer.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/ar/ArabicNormalizer.java index bea82f6e5d4..aab7c9d1323 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/ar/ArabicNormalizer.java +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/ar/ArabicNormalizer.java @@ -63,7 +63,7 @@ public class ArabicNormalizer { * @param len length of input buffer * @return length of input buffer after normalization */ - public int normalize(char s[], int len) { + public int normalize(char[] s, int len) { for (int i = 0; i < len; i++) { switch (s[i]) { diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/ar/ArabicStemmer.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/ar/ArabicStemmer.java index 5675bcf53f9..625fa7be6af 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/ar/ArabicStemmer.java +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/ar/ArabicStemmer.java @@ -43,7 +43,7 @@ public class ArabicStemmer { public static final char WAW = '\u0648'; public static final char YEH = '\u064A'; - public static final char prefixes[][] = { + public static final char[][] prefixes = { ("" + ALEF + LAM).toCharArray(), ("" + WAW + ALEF + LAM).toCharArray(), ("" + BEH + ALEF + LAM).toCharArray(), @@ -53,7 +53,7 @@ public class ArabicStemmer { ("" + WAW).toCharArray(), }; - public static final char suffixes[][] = { + public static final char[][] suffixes = { ("" + HEH + ALEF).toCharArray(), ("" + ALEF + NOON).toCharArray(), ("" + ALEF + TEH).toCharArray(), @@ -73,7 +73,7 @@ public class ArabicStemmer { * @param len length of input buffer * @return length of input buffer after normalization */ - public int stem(char s[], int len) { + public int stem(char[] s, int len) { len = stemPrefix(s, len); len = stemSuffix(s, len); @@ -87,7 +87,7 @@ public class ArabicStemmer { * @param len length of input buffer * @return new length of input buffer after stemming. */ - public int stemPrefix(char s[], int len) { + public int stemPrefix(char[] s, int len) { for (int i = 0; i < prefixes.length; i++) if (startsWithCheckLength(s, len, prefixes[i])) return deleteN(s, 0, len, prefixes[i].length); return len; @@ -100,7 +100,7 @@ public class ArabicStemmer { * @param len length of input buffer * @return new length of input buffer after stemming */ - public int stemSuffix(char s[], int len) { + public int stemSuffix(char[] s, int len) { for (int i = 0; i < suffixes.length; i++) if (endsWithCheckLength(s, len, suffixes[i])) len = deleteN(s, len - suffixes[i].length, len, suffixes[i].length); @@ -115,7 +115,7 @@ public class ArabicStemmer { * @param prefix prefix to check * @return true if the prefix matches and can be stemmed */ - boolean startsWithCheckLength(char s[], int len, char prefix[]) { + boolean startsWithCheckLength(char[] s, int len, char[] prefix) { if (prefix.length == 1 && len < 4) { // wa- prefix requires at least 3 characters return false; } else if (len < prefix.length + 2) { // other prefixes require only 2. @@ -139,7 +139,7 @@ public class ArabicStemmer { * @param suffix suffix to check * @return true if the suffix matches and can be stemmed */ - boolean endsWithCheckLength(char s[], int len, char suffix[]) { + boolean endsWithCheckLength(char[] s, int len, char[] suffix) { if (len < suffix.length + 2) { // all suffixes require at least 2 characters after stemming return false; } else { diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/bg/BulgarianStemmer.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/bg/BulgarianStemmer.java index 5f727f8198d..8723183aebc 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/bg/BulgarianStemmer.java +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/bg/BulgarianStemmer.java @@ -33,7 +33,7 @@ public class BulgarianStemmer { * @param len length of input buffer * @return length of input buffer after normalization */ - public int stem(final char s[], int len) { + public int stem(final char[] s, int len) { if (len < 4) // do not stem return len; @@ -76,7 +76,7 @@ public class BulgarianStemmer { * @param len length of input buffer * @return new stemmed length */ - private int removeArticle(final char s[], final int len) { + private int removeArticle(final char[] s, final int len) { if (len > 6 && endsWith(s, len, "ият")) return len - 3; if (len > 5) { @@ -96,7 +96,7 @@ public class BulgarianStemmer { return len; } - private int removePlural(final char s[], final int len) { + private int removePlural(final char[] s, final int len) { if (len > 6) { if (endsWith(s, len, "овци")) return len - 3; // replace with о if (endsWith(s, len, "ове")) return len - 3; diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/bn/BengaliNormalizer.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/bn/BengaliNormalizer.java index c047fbd38f5..e114706936a 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/bn/BengaliNormalizer.java +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/bn/BengaliNormalizer.java @@ -33,7 +33,7 @@ public class BengaliNormalizer { * @param len length of input buffer * @return length of input buffer after normalization */ - public int normalize(char s[], int len) { + public int normalize(char[] s, int len) { for (int i = 0; i < len; i++) { switch (s[i]) { diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/bn/BengaliStemmer.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/bn/BengaliStemmer.java index e07521cd891..e7f8f03fac1 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/bn/BengaliStemmer.java +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/bn/BengaliStemmer.java @@ -28,7 +28,7 @@ import static org.apache.lucene.analysis.util.StemmerUtil.endsWith; * http://members.unine.ch/jacques.savoy/clef/BengaliStemmerLight.java.txt */ public class BengaliStemmer { - public int stem(char buffer[], int len) { + public int stem(char[] buffer, int len) { // 8 if (len > 9 diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/charfilter/BaseCharFilter.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/charfilter/BaseCharFilter.java index b9ce02bb540..0d02ba3b142 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/charfilter/BaseCharFilter.java +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/charfilter/BaseCharFilter.java @@ -28,8 +28,8 @@ import org.apache.lucene.util.ArrayUtil; */ public abstract class BaseCharFilter extends CharFilter { - private int offsets[]; - private int diffs[]; + private int[] offsets; + private int[] diffs; private int size = 0; public BaseCharFilter(Reader in) { diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/charfilter/HTMLStripCharFilter.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/charfilter/HTMLStripCharFilter.java index 699de295ea6..19e92054194 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/charfilter/HTMLStripCharFilter.java +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/charfilter/HTMLStripCharFilter.java @@ -29678,7 +29678,7 @@ public final class HTMLStripCharFilter extends BaseCharFilter { private static final int ZZ_PUSHBACK_2BIG = 2; /* error messages for the codes above */ - private static final String ZZ_ERROR_MSG[] = { + private static final String[] ZZ_ERROR_MSG = { "Unknown internal scanner error", "Error: could not match input", "Error: pushback value was too large" @@ -29806,7 +29806,7 @@ public final class HTMLStripCharFilter extends BaseCharFilter { /** this buffer contains the current text to be matched and is the source of the yytext() string */ - private char zzBuffer[] = new char[ZZ_BUFFERSIZE]; + private char[] zzBuffer = new char[ZZ_BUFFERSIZE]; /** the textposition at the last accepting state */ private int zzMarkedPos; @@ -30017,7 +30017,7 @@ public final class HTMLStripCharFilter extends BaseCharFilter { } @Override - public int read(char cbuf[], int off, int len) throws IOException { + public int read(char[] cbuf, int off, int len) throws IOException { int i = 0; for ( ; i < len ; ++i) { int ch = read(); @@ -30131,7 +30131,7 @@ public final class HTMLStripCharFilter extends BaseCharFilter { /* is the buffer big enough? */ if (zzCurrentPos >= zzBuffer.length - zzFinalHighSurrogate) { /* if not: blow it up */ - char newBuffer[] = new char[zzBuffer.length*2]; + char[] newBuffer = new char[zzBuffer.length*2]; System.arraycopy(zzBuffer, 0, newBuffer, 0, zzBuffer.length); zzBuffer = newBuffer; zzEndRead += zzFinalHighSurrogate; diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/charfilter/HTMLStripCharFilter.jflex b/lucene/analysis/common/src/java/org/apache/lucene/analysis/charfilter/HTMLStripCharFilter.jflex index 1540df656e9..af5e5bb83ff 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/charfilter/HTMLStripCharFilter.jflex +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/charfilter/HTMLStripCharFilter.jflex @@ -212,7 +212,7 @@ InlineElment = ( [aAbBiIqQsSuU] | } @Override - public int read(char cbuf[], int off, int len) throws IOException { + public int read(char[] cbuf, int off, int len) throws IOException { int i = 0; for ( ; i < len ; ++i) { int ch = read(); diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/cjk/CJKBigramFilter.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/cjk/CJKBigramFilter.java index abbd5d29c80..2f9337d242f 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/cjk/CJKBigramFilter.java +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/cjk/CJKBigramFilter.java @@ -93,9 +93,9 @@ public final class CJKBigramFilter extends TokenFilter { private final PositionLengthAttribute posLengthAtt = addAttribute(PositionLengthAttribute.class); // buffers containing codepoint and offsets in parallel - int buffer[] = new int[8]; - int startOffset[] = new int[8]; - int endOffset[] = new int[8]; + int[] buffer = new int[8]; + int[] startOffset = new int[8]; + int[] endOffset = new int[8]; // length of valid buffer int bufferLen; // current buffer index @@ -264,7 +264,7 @@ public final class CJKBigramFilter extends TokenFilter { index -= last; } - char termBuffer[] = termAtt.buffer(); + char[] termBuffer = termAtt.buffer(); int len = termAtt.length(); int start = offsetAtt.startOffset(); int end = offsetAtt.endOffset(); @@ -300,7 +300,7 @@ public final class CJKBigramFilter extends TokenFilter { */ private void flushBigram() { clearAttributes(); - char termBuffer[] = + char[] termBuffer = termAtt.resizeBuffer(4); // maximum bigram length in code units (2 supplementaries) int len1 = Character.toChars(buffer[index], termBuffer, 0); int len2 = len1 + Character.toChars(buffer[index + 1], termBuffer, len1); @@ -322,7 +322,7 @@ public final class CJKBigramFilter extends TokenFilter { */ private void flushUnigram() { clearAttributes(); - char termBuffer[] = termAtt.resizeBuffer(2); // maximum unigram length (2 surrogates) + char[] termBuffer = termAtt.resizeBuffer(2); // maximum unigram length (2 surrogates) int len = Character.toChars(buffer[index], termBuffer, 0); termAtt.setLength(len); offsetAtt.setOffset(startOffset[index], endOffset[index]); diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/cjk/CJKWidthCharFilter.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/cjk/CJKWidthCharFilter.java index 16cd6f2800c..3f9d3cfbb05 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/cjk/CJKWidthCharFilter.java +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/cjk/CJKWidthCharFilter.java @@ -39,7 +39,7 @@ public class CJKWidthCharFilter extends BaseCharFilter { * as a fallback when they cannot properly combine with a preceding * character into a composed form. */ - private static final char KANA_NORM[] = + private static final char[] KANA_NORM = new char[] { 0x30fb, 0x30f2, 0x30a1, 0x30a3, 0x30a5, 0x30a7, 0x30a9, 0x30e3, 0x30e5, 0x30e7, 0x30c3, 0x30fc, 0x30a2, 0x30a4, 0x30a6, 0x30a8, 0x30aa, 0x30ab, @@ -51,7 +51,7 @@ public class CJKWidthCharFilter extends BaseCharFilter { }; /* kana combining diffs: 0x30A6-0x30FD */ - private static final byte KANA_COMBINE_VOICED[] = + private static final byte[] KANA_COMBINE_VOICED = new byte[] { 78, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, @@ -59,7 +59,7 @@ public class CJKWidthCharFilter extends BaseCharFilter { 0, 8, 8, 8, 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1 }; - private static final byte KANA_COMBINE_SEMI_VOICED[] = + private static final byte[] KANA_COMBINE_SEMI_VOICED = new byte[] { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 2, 0, 0, 2, diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/cjk/CJKWidthFilter.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/cjk/CJKWidthFilter.java index 9997576642a..18897d95f25 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/cjk/CJKWidthFilter.java +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/cjk/CJKWidthFilter.java @@ -42,7 +42,7 @@ public final class CJKWidthFilter extends TokenFilter { * as a fallback when they cannot properly combine with a preceding * character into a composed form. */ - private static final char KANA_NORM[] = + private static final char[] KANA_NORM = new char[] { 0x30fb, 0x30f2, 0x30a1, 0x30a3, 0x30a5, 0x30a7, 0x30a9, 0x30e3, 0x30e5, 0x30e7, 0x30c3, 0x30fc, 0x30a2, 0x30a4, 0x30a6, 0x30a8, 0x30aa, 0x30ab, @@ -60,7 +60,7 @@ public final class CJKWidthFilter extends TokenFilter { @Override public boolean incrementToken() throws IOException { if (input.incrementToken()) { - char text[] = termAtt.buffer(); + char[] text = termAtt.buffer(); int length = termAtt.length(); for (int i = 0; i < length; i++) { final char ch = text[i]; @@ -84,14 +84,14 @@ public final class CJKWidthFilter extends TokenFilter { } /* kana combining diffs: 0x30A6-0x30FD */ - private static final byte KANA_COMBINE_VOICED[] = + private static final byte[] KANA_COMBINE_VOICED = new byte[] { 78, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 8, 8, 8, 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1 }; - private static final byte KANA_COMBINE_HALF_VOICED[] = + private static final byte[] KANA_COMBINE_HALF_VOICED = new byte[] { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 2, 0, 0, 2, @@ -100,7 +100,7 @@ public final class CJKWidthFilter extends TokenFilter { }; /** returns true if we successfully combined the voice mark */ - private static boolean combine(char text[], int pos, char ch) { + private static boolean combine(char[] text, int pos, char ch) { final char prev = text[pos - 1]; if (prev >= 0x30A6 && prev <= 0x30FD) { text[pos - 1] += diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/ckb/SoraniNormalizer.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/ckb/SoraniNormalizer.java index 228909d2b2a..7f0b93d3e69 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/ckb/SoraniNormalizer.java +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/ckb/SoraniNormalizer.java @@ -69,7 +69,7 @@ public class SoraniNormalizer { * @param len length of input buffer * @return length of input buffer after normalization */ - public int normalize(char s[], int len) { + public int normalize(char[] s, int len) { for (int i = 0; i < len; i++) { switch (s[i]) { case YEH: diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/ckb/SoraniStemmer.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/ckb/SoraniStemmer.java index b7a8df51439..9b5b50437e3 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/ckb/SoraniStemmer.java +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/ckb/SoraniStemmer.java @@ -28,7 +28,7 @@ public class SoraniStemmer { * @param len length of input buffer * @return length of input buffer after normalization */ - public int stem(char s[], int len) { + public int stem(char[] s, int len) { // postposition if (len > 5 && endsWith(s, len, "دا")) { len -= 2; diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/classic/ClassicTokenizerImpl.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/classic/ClassicTokenizerImpl.java index 55672d5cfb3..2cd2b933a44 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/classic/ClassicTokenizerImpl.java +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/classic/ClassicTokenizerImpl.java @@ -234,7 +234,7 @@ class ClassicTokenizerImpl { private static final int ZZ_PUSHBACK_2BIG = 2; /* error messages for the codes above */ - private static final String ZZ_ERROR_MSG[] = { + private static final String[] ZZ_ERROR_MSG = { "Unknown internal scanner error", "Error: could not match input", "Error: pushback value was too large" @@ -278,7 +278,7 @@ class ClassicTokenizerImpl { /** * this buffer contains the current text to be matched and is the source of the yytext() string */ - private char zzBuffer[] = new char[ZZ_BUFFERSIZE]; + private char[] zzBuffer = new char[ZZ_BUFFERSIZE]; /** the textposition at the last accepting state */ private int zzMarkedPos; @@ -396,7 +396,7 @@ class ClassicTokenizerImpl { /* is the buffer big enough? */ if (zzCurrentPos >= zzBuffer.length - zzFinalHighSurrogate) { /* if not: blow it up */ - char newBuffer[] = new char[zzBuffer.length * 2]; + char[] newBuffer = new char[zzBuffer.length * 2]; System.arraycopy(zzBuffer, 0, newBuffer, 0, zzBuffer.length); zzBuffer = newBuffer; zzEndRead += zzFinalHighSurrogate; diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/commongrams/CommonGramsFilter.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/commongrams/CommonGramsFilter.java index f4c2f86c283..d1a81c17631 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/commongrams/CommonGramsFilter.java +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/commongrams/CommonGramsFilter.java @@ -155,7 +155,7 @@ public final class CommonGramsFilter extends TokenFilter { clearAttributes(); int length = buffer.length(); - char termText[] = termAttribute.buffer(); + char[] termText = termAttribute.buffer(); if (length > termText.length) { termText = termAttribute.resizeBuffer(length); } diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/compound/hyphenation/PatternParser.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/compound/hyphenation/PatternParser.java index 886f3ffaaad..e399c0c9fb6 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/compound/hyphenation/PatternParser.java +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/compound/hyphenation/PatternParser.java @@ -317,7 +317,7 @@ public class PatternParser extends DefaultHandler { /** @see org.xml.sax.ContentHandler#characters(char[], int, int) */ @SuppressWarnings({"unchecked", "rawtypes"}) @Override - public void characters(char ch[], int start, int length) { + public void characters(char[] ch, int start, int length) { StringBuilder chars = new StringBuilder(length); chars.append(ch, start, length); String word = readToken(chars); diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/compound/hyphenation/TernaryTree.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/compound/hyphenation/TernaryTree.java index 15e9ed87775..bcc7f091ff3 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/compound/hyphenation/TernaryTree.java +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/compound/hyphenation/TernaryTree.java @@ -122,7 +122,7 @@ public class TernaryTree implements Cloneable { if (freenode + len > eq.length) { redimNodeArrays(eq.length + BLOCK_SIZE); } - char strkey[] = new char[len--]; + char[] strkey = new char[len--]; key.getChars(0, len, strkey, 0); strkey[len] = 0; root = insert(root, strkey, 0, val); @@ -255,7 +255,7 @@ public class TernaryTree implements Cloneable { public int find(String key) { int len = key.length(); - char strkey[] = new char[len + 1]; + char[] strkey = new char[len + 1]; key.getChars(0, len, strkey, 0); strkey[len] = 0; diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/core/DecimalDigitFilter.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/core/DecimalDigitFilter.java index 6c93c59ecbf..6b7456713ca 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/core/DecimalDigitFilter.java +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/core/DecimalDigitFilter.java @@ -37,7 +37,7 @@ public final class DecimalDigitFilter extends TokenFilter { @Override public boolean incrementToken() throws IOException { if (input.incrementToken()) { - char buffer[] = termAtt.buffer(); + char[] buffer = termAtt.buffer(); int length = termAtt.length(); for (int i = 0; i < length; i++) { diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/cz/CzechStemmer.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/cz/CzechStemmer.java index 0f9dbba3652..de659baba40 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/cz/CzechStemmer.java +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/cz/CzechStemmer.java @@ -34,7 +34,7 @@ public class CzechStemmer { * @return length of input buffer after normalization *

NOTE: Input is expected to be in lowercase, but with diacritical marks */ - public int stem(char s[], int len) { + public int stem(char[] s, int len) { len = removeCase(s, len); len = removePossessives(s, len); if (len > 0) { @@ -43,7 +43,7 @@ public class CzechStemmer { return len; } - private int removeCase(char s[], int len) { + private int removeCase(char[] s, int len) { if (len > 7 && endsWith(s, len, "atech")) return len - 5; if (len > 6 @@ -112,14 +112,14 @@ public class CzechStemmer { return len; } - private int removePossessives(char s[], int len) { + private int removePossessives(char[] s, int len) { if (len > 5 && (endsWith(s, len, "ov") || endsWith(s, len, "in") || endsWith(s, len, "ův"))) return len - 2; return len; } - private int normalize(char s[], int len) { + private int normalize(char[] s, int len) { if (endsWith(s, len, "čt")) { // čt -> ck s[len - 2] = 'c'; s[len - 1] = 'k'; diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/de/GermanLightStemmer.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/de/GermanLightStemmer.java index 4f96a53aea5..c5d2228b6ab 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/de/GermanLightStemmer.java +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/de/GermanLightStemmer.java @@ -59,7 +59,7 @@ package org.apache.lucene.analysis.de; */ public class GermanLightStemmer { - public int stem(char s[], int len) { + public int stem(char[] s, int len) { for (int i = 0; i < len; i++) switch (s[i]) { case 'ä': @@ -110,7 +110,7 @@ public class GermanLightStemmer { } } - private int step1(char s[], int len) { + private int step1(char[] s, int len) { if (len > 5 && s[len - 3] == 'e' && s[len - 2] == 'r' && s[len - 1] == 'n') return len - 3; if (len > 4 && s[len - 2] == 'e') @@ -129,7 +129,7 @@ public class GermanLightStemmer { return len; } - private int step2(char s[], int len) { + private int step2(char[] s, int len) { if (len > 5 && s[len - 3] == 'e' && s[len - 2] == 's' && s[len - 1] == 't') return len - 3; if (len > 4 && s[len - 2] == 'e' && (s[len - 1] == 'r' || s[len - 1] == 'n')) return len - 2; diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/de/GermanMinimalStemmer.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/de/GermanMinimalStemmer.java index b5b8e0b5b86..b806ad77a77 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/de/GermanMinimalStemmer.java +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/de/GermanMinimalStemmer.java @@ -59,7 +59,7 @@ package org.apache.lucene.analysis.de; */ public class GermanMinimalStemmer { - public int stem(char s[], int len) { + public int stem(char[] s, int len) { if (len < 5) return len; for (int i = 0; i < len; i++) diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/de/GermanNormalizationFilter.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/de/GermanNormalizationFilter.java index 551b6994b42..ed5cff30443 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/de/GermanNormalizationFilter.java +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/de/GermanNormalizationFilter.java @@ -53,7 +53,7 @@ public final class GermanNormalizationFilter extends TokenFilter { public boolean incrementToken() throws IOException { if (input.incrementToken()) { int state = N; - char buffer[] = termAtt.buffer(); + char[] buffer = termAtt.buffer(); int length = termAtt.length(); for (int i = 0; i < length; i++) { final char c = buffer[i]; diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/el/GreekStemmer.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/el/GreekStemmer.java index 9b9f399a061..0fb56788586 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/el/GreekStemmer.java +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/el/GreekStemmer.java @@ -39,7 +39,7 @@ public class GreekStemmer { * @param len The length of the char[] array. * @return The new length of the stemmed word. */ - public int stem(char s[], int len) { + public int stem(char[] s, int len) { if (len < 4) // too short return len; @@ -72,7 +72,7 @@ public class GreekStemmer { return rule22(s, len); } - private int rule0(char s[], int len) { + private int rule0(char[] s, int len) { if (len > 9 && (endsWith(s, len, "καθεστωτοσ") || endsWith(s, len, "καθεστωτων"))) return len - 4; @@ -131,7 +131,7 @@ public class GreekStemmer { return len; } - private int rule1(char s[], int len) { + private int rule1(char[] s, int len) { if (len > 4 && (endsWith(s, len, "αδεσ") || endsWith(s, len, "αδων"))) { len -= 4; if (!(endsWith(s, len, "οκ") @@ -148,7 +148,7 @@ public class GreekStemmer { return len; } - private int rule2(char s[], int len) { + private int rule2(char[] s, int len) { if (len > 4 && (endsWith(s, len, "εδεσ") || endsWith(s, len, "εδων"))) { len -= 4; if (endsWith(s, len, "οπ") @@ -163,7 +163,7 @@ public class GreekStemmer { return len; } - private int rule3(char s[], int len) { + private int rule3(char[] s, int len) { if (len > 5 && (endsWith(s, len, "ουδεσ") || endsWith(s, len, "ουδων"))) { len -= 5; if (endsWith(s, len, "αρκ") @@ -188,7 +188,7 @@ public class GreekStemmer { private static final CharArraySet exc4 = new CharArraySet(Arrays.asList("θ", "δ", "ελ", "γαλ", "ν", "π", "ιδ", "παρ"), false); - private int rule4(char s[], int len) { + private int rule4(char[] s, int len) { if (len > 3 && (endsWith(s, len, "εωσ") || endsWith(s, len, "εων"))) { len -= 3; if (exc4.contains(s, 0, len)) len++; // add back -ε @@ -196,7 +196,7 @@ public class GreekStemmer { return len; } - private int rule5(char s[], int len) { + private int rule5(char[] s, int len) { if (len > 2 && endsWith(s, len, "ια")) { len -= 2; if (endsWithVowel(s, len)) len++; // add back -ι @@ -216,7 +216,7 @@ public class GreekStemmer { "συναδ", "τσαμ", "υποδ", "φιλον", "φυλοδ", "χασ"), false); - private int rule6(char s[], int len) { + private int rule6(char[] s, int len) { boolean removed = false; if (len > 3 && (endsWith(s, len, "ικα") || endsWith(s, len, "ικο"))) { len -= 3; @@ -239,7 +239,7 @@ public class GreekStemmer { "χ"), false); - private int rule7(char s[], int len) { + private int rule7(char[] s, int len) { if (len == 5 && endsWith(s, len, "αγαμε")) return len - 1; if (len > 7 && endsWith(s, len, "ηθηκαμε")) len -= 7; @@ -359,7 +359,7 @@ public class GreekStemmer { "ψηλοταβ"), false); - private int rule8(char s[], int len) { + private int rule8(char[] s, int len) { boolean removed = false; if (len > 8 && endsWith(s, len, "ιουντανε")) { @@ -410,7 +410,7 @@ public class GreekStemmer { "θαρρ", "θ"), false); - private int rule9(char s[], int len) { + private int rule9(char[] s, int len) { if (len > 5 && endsWith(s, len, "ησετε")) len -= 5; if (len > 3 && endsWith(s, len, "ετε")) { @@ -455,7 +455,7 @@ public class GreekStemmer { return len; } - private int rule10(char s[], int len) { + private int rule10(char[] s, int len) { if (len > 5 && (endsWith(s, len, "οντασ") || endsWith(s, len, "ωντασ"))) { len -= 5; if (len == 3 && endsWith(s, len, "αρχ")) { @@ -471,7 +471,7 @@ public class GreekStemmer { return len; } - private int rule11(char s[], int len) { + private int rule11(char[] s, int len) { if (len > 6 && endsWith(s, len, "ομαστε")) { len -= 6; if (len == 2 && endsWith(s, len, "ον")) { @@ -498,7 +498,7 @@ public class GreekStemmer { new CharArraySet( Arrays.asList("αλ", "αρ", "εκτελ", "ζ", "μ", "ξ", "παρακαλ", "αρ", "προ", "νισ"), false); - private int rule12(char s[], int len) { + private int rule12(char[] s, int len) { if (len > 5 && endsWith(s, len, "ιεστε")) { len -= 5; if (exc12a.contains(s, 0, len)) len += 4; // add back -ιεστ @@ -515,7 +515,7 @@ public class GreekStemmer { private static final CharArraySet exc13 = new CharArraySet(Arrays.asList("διαθ", "θ", "παρακαταθ", "προσθ", "συνθ"), false); - private int rule13(char s[], int len) { + private int rule13(char[] s, int len) { if (len > 6 && endsWith(s, len, "ηθηκεσ")) { len -= 6; } else if (len > 5 && (endsWith(s, len, "ηθηκα") || endsWith(s, len, "ηθηκε"))) { @@ -576,7 +576,7 @@ public class GreekStemmer { "τσα"), false); - private int rule14(char s[], int len) { + private int rule14(char[] s, int len) { boolean removed = false; if (len > 5 && endsWith(s, len, "ουσεσ")) { @@ -660,7 +660,7 @@ public class GreekStemmer { private static final CharArraySet exc15b = new CharArraySet(Arrays.asList("ψοφ", "ναυλοχ"), false); - private int rule15(char s[], int len) { + private int rule15(char[] s, int len) { boolean removed = false; if (len > 4 && endsWith(s, len, "αγεσ")) { len -= 4; @@ -696,7 +696,7 @@ public class GreekStemmer { new CharArraySet( Arrays.asList("ν", "χερσον", "δωδεκαν", "ερημον", "μεγαλον", "επταν"), false); - private int rule16(char s[], int len) { + private int rule16(char[] s, int len) { boolean removed = false; if (len > 4 && endsWith(s, len, "ησου")) { len -= 4; @@ -717,7 +717,7 @@ public class GreekStemmer { "ασβ", "σβ", "αχρ", "χρ", "απλ", "αειμν", "δυσχρ", "ευχρ", "κοινοχρ", "παλιμψ"), false); - private int rule17(char s[], int len) { + private int rule17(char[] s, int len) { if (len > 4 && endsWith(s, len, "ηστε")) { len -= 4; if (exc17.contains(s, 0, len)) len += 3; // add back the -ηστ @@ -729,7 +729,7 @@ public class GreekStemmer { private static final CharArraySet exc18 = new CharArraySet(Arrays.asList("ν", "ρ", "σπι", "στραβομουτσ", "κακομουτσ", "εξων"), false); - private int rule18(char s[], int len) { + private int rule18(char[] s, int len) { boolean removed = false; if (len > 6 && (endsWith(s, len, "ησουνε") || endsWith(s, len, "ηθουνε"))) { @@ -753,7 +753,7 @@ public class GreekStemmer { new CharArraySet( Arrays.asList("παρασουσ", "φ", "χ", "ωριοπλ", "αζ", "αλλοσουσ", "ασουσ"), false); - private int rule19(char s[], int len) { + private int rule19(char[] s, int len) { boolean removed = false; if (len > 6 && (endsWith(s, len, "ησουμε") || endsWith(s, len, "ηθουμε"))) { @@ -773,13 +773,13 @@ public class GreekStemmer { return len; } - private int rule20(char s[], int len) { + private int rule20(char[] s, int len) { if (len > 5 && (endsWith(s, len, "ματων") || endsWith(s, len, "ματοσ"))) len -= 3; else if (len > 4 && endsWith(s, len, "ματα")) len -= 2; return len; } - private int rule21(char s[], int len) { + private int rule21(char[] s, int len) { if (len > 9 && endsWith(s, len, "ιοντουσαν")) return len - 9; if (len > 8 @@ -877,7 +877,7 @@ public class GreekStemmer { return len; } - private int rule22(char s[], int len) { + private int rule22(char[] s, int len) { if (endsWith(s, len, "εστερ") || endsWith(s, len, "εστατ")) return len - 5; if (endsWith(s, len, "οτερ") @@ -899,7 +899,7 @@ public class GreekStemmer { * @param suffix A {@link String} object to check if the word given ends with these characters. * @return True if the word ends with the suffix given , false otherwise. */ - private boolean endsWith(char s[], int len, String suffix) { + private boolean endsWith(char[] s, int len, String suffix) { final int suffixLen = suffix.length(); if (suffixLen > len) return false; for (int i = suffixLen - 1; i >= 0; i--) @@ -916,7 +916,7 @@ public class GreekStemmer { * @return True if the word contained in the leading portion of char[] array , ends with a vowel , * false otherwise. */ - private boolean endsWithVowel(char s[], int len) { + private boolean endsWithVowel(char[] s, int len) { if (len == 0) return false; switch (s[len - 1]) { case 'α': @@ -940,7 +940,7 @@ public class GreekStemmer { * @return True if the word contained in the leading portion of char[] array , ends with a vowel , * false otherwise. */ - private boolean endsWithVowelNoY(char s[], int len) { + private boolean endsWithVowelNoY(char[] s, int len) { if (len == 0) return false; switch (s[len - 1]) { case 'α': diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/email/UAX29URLEmailTokenizerImpl.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/email/UAX29URLEmailTokenizerImpl.java index 1837c427f8b..f248f5e87ae 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/email/UAX29URLEmailTokenizerImpl.java +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/email/UAX29URLEmailTokenizerImpl.java @@ -38993,7 +38993,7 @@ public final class UAX29URLEmailTokenizerImpl { private static final int ZZ_PUSHBACK_2BIG = 2; /* error messages for the codes above */ - private static final String ZZ_ERROR_MSG[] = { + private static final String[] ZZ_ERROR_MSG = { "Unknown internal scanner error", "Error: could not match input", "Error: pushback value was too large" @@ -39168,7 +39168,7 @@ public final class UAX29URLEmailTokenizerImpl { /** this buffer contains the current text to be matched and is the source of the yytext() string */ - private char zzBuffer[] = new char[ZZ_BUFFERSIZE]; + private char[] zzBuffer = new char[ZZ_BUFFERSIZE]; /** the textposition at the last accepting state */ private int zzMarkedPos; diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/en/EnglishMinimalStemmer.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/en/EnglishMinimalStemmer.java index 693eb638b22..f9e77fd5c39 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/en/EnglishMinimalStemmer.java +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/en/EnglishMinimalStemmer.java @@ -23,7 +23,7 @@ package org.apache.lucene.analysis.en; */ public class EnglishMinimalStemmer { @SuppressWarnings("fallthrough") - public int stem(char s[], int len) { + public int stem(char[] s, int len) { if (len < 3 || s[len - 1] != 's') return len; switch (s[len - 2]) { diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/es/SpanishLightStemmer.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/es/SpanishLightStemmer.java index 7a6d9585d14..15ffb0bb1b4 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/es/SpanishLightStemmer.java +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/es/SpanishLightStemmer.java @@ -59,7 +59,7 @@ package org.apache.lucene.analysis.es; */ public class SpanishLightStemmer { - public int stem(char s[], int len) { + public int stem(char[] s, int len) { if (len < 5) return len; for (int i = 0; i < len; i++) diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/es/SpanishMinimalStemmer.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/es/SpanishMinimalStemmer.java index c60b501e204..44cbfcb578f 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/es/SpanishMinimalStemmer.java +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/es/SpanishMinimalStemmer.java @@ -24,7 +24,7 @@ package org.apache.lucene.analysis.es; */ public class SpanishMinimalStemmer { - public int stem(char s[], int len) { + public int stem(char[] s, int len) { if (len < 4 || s[len - 1] != 's') return len; for (int i = 0; i < len; i++) diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/fa/PersianNormalizer.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/fa/PersianNormalizer.java index aac26e9763a..21f9e156e8c 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/fa/PersianNormalizer.java +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/fa/PersianNormalizer.java @@ -57,7 +57,7 @@ public class PersianNormalizer { * @param len length of input buffer * @return length of input buffer after normalization */ - public int normalize(char s[], int len) { + public int normalize(char[] s, int len) { for (int i = 0; i < len; i++) { switch (s[i]) { diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/fi/FinnishLightStemmer.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/fi/FinnishLightStemmer.java index ce81fbd82bd..776b70f6c90 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/fi/FinnishLightStemmer.java +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/fi/FinnishLightStemmer.java @@ -61,7 +61,7 @@ import static org.apache.lucene.analysis.util.StemmerUtil.*; */ public class FinnishLightStemmer { - public int stem(char s[], int len) { + public int stem(char[] s, int len) { if (len < 4) return len; for (int i = 0; i < len; i++) @@ -83,7 +83,7 @@ public class FinnishLightStemmer { return len; } - private int step1(char s[], int len) { + private int step1(char[] s, int len) { if (len > 8) { if (endsWith(s, len, "kin")) return step1(s, len - 3); if (endsWith(s, len, "ko")) return step1(s, len - 2); @@ -96,7 +96,7 @@ public class FinnishLightStemmer { return len; } - private int step2(char s[], int len) { + private int step2(char[] s, int len) { if (len > 5) { if (endsWith(s, len, "lla") || endsWith(s, len, "tse") || endsWith(s, len, "sti")) return len - 3; @@ -109,7 +109,7 @@ public class FinnishLightStemmer { return len; } - private int step3(char s[], int len) { + private int step3(char[] s, int len) { if (len > 8) { if (endsWith(s, len, "nnen")) { s[len - 4] = 's'; @@ -173,7 +173,7 @@ public class FinnishLightStemmer { return len; } - private int norm1(char s[], int len) { + private int norm1(char[] s, int len) { if (len > 5 && endsWith(s, len, "hde")) { s[len - 3] = 'k'; s[len - 2] = 's'; @@ -198,7 +198,7 @@ public class FinnishLightStemmer { return len; } - private int norm2(char s[], int len) { + private int norm2(char[] s, int len) { if (len > 8) { if (s[len - 1] == 'e' || s[len - 1] == 'o' || s[len - 1] == 'u') len--; } diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/fr/FrenchLightStemmer.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/fr/FrenchLightStemmer.java index e2620c24142..be54195b85a 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/fr/FrenchLightStemmer.java +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/fr/FrenchLightStemmer.java @@ -61,7 +61,7 @@ import static org.apache.lucene.analysis.util.StemmerUtil.*; */ public class FrenchLightStemmer { - public int stem(char s[], int len) { + public int stem(char[] s, int len) { if (len > 5 && s[len - 1] == 'x') { if (s[len - 3] == 'a' && s[len - 2] == 'u' && s[len - 4] != 'e') s[len - 2] = 'l'; len--; @@ -209,7 +209,7 @@ public class FrenchLightStemmer { return norm(s, len); } - private int norm(char s[], int len) { + private int norm(char[] s, int len) { if (len > 4) { for (int i = 0; i < len; i++) switch (s[i]) { diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/fr/FrenchMinimalStemmer.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/fr/FrenchMinimalStemmer.java index 7135f1fe98d..62ceaea073e 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/fr/FrenchMinimalStemmer.java +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/fr/FrenchMinimalStemmer.java @@ -58,7 +58,7 @@ package org.apache.lucene.analysis.fr; * general French corpora. Jacques Savoy. */ public class FrenchMinimalStemmer { - public int stem(char s[], int len) { + public int stem(char[] s, int len) { if (len < 6) return len; if (s[len - 1] == 'x') { diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/gl/GalicianMinimalStemmer.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/gl/GalicianMinimalStemmer.java index 90d5178cd1f..c6a0bfd0104 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/gl/GalicianMinimalStemmer.java +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/gl/GalicianMinimalStemmer.java @@ -31,7 +31,7 @@ public class GalicianMinimalStemmer extends RSLPStemmerBase { private static final Step pluralStep = parse(GalicianMinimalStemmer.class, "galician.rslp").get("Plural"); - public int stem(char s[], int len) { + public int stem(char[] s, int len) { return pluralStep.apply(s, len); } } diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/gl/GalicianStemmer.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/gl/GalicianStemmer.java index a663817fd8b..e3e93a092a9 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/gl/GalicianStemmer.java +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/gl/GalicianStemmer.java @@ -44,7 +44,7 @@ public class GalicianStemmer extends RSLPStemmerBase { * @param len initial valid length of buffer * @return new valid length, stemmed */ - public int stem(char s[], int len) { + public int stem(char[] s, int len) { assert s.length >= len + 1 : "this stemmer requires an oversized array of at least 1"; len = plural.apply(s, len); diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/hi/HindiNormalizer.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/hi/HindiNormalizer.java index 41447493070..8c1384a0d29 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/hi/HindiNormalizer.java +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/hi/HindiNormalizer.java @@ -43,7 +43,7 @@ public class HindiNormalizer { * @param len length of input buffer * @return length of input buffer after normalization */ - public int normalize(char s[], int len) { + public int normalize(char[] s, int len) { for (int i = 0; i < len; i++) { switch (s[i]) { diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/hi/HindiStemmer.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/hi/HindiStemmer.java index 70682382142..cede10c1850 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/hi/HindiStemmer.java +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/hi/HindiStemmer.java @@ -26,7 +26,7 @@ import static org.apache.lucene.analysis.util.StemmerUtil.*; * http://computing.open.ac.uk/Sites/EACLSouthAsia/Papers/p6-Ramanathan.pdf */ public class HindiStemmer { - public int stem(char buffer[], int len) { + public int stem(char[] buffer, int len) { // 5 if ((len > 6) && (endsWith(buffer, len, "ाएंगी") diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/hu/HungarianLightStemmer.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/hu/HungarianLightStemmer.java index 87a0a5fd469..12b3c257a4c 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/hu/HungarianLightStemmer.java +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/hu/HungarianLightStemmer.java @@ -60,7 +60,7 @@ import static org.apache.lucene.analysis.util.StemmerUtil.*; * Portuguese, German and Hungarian Languages Jacques Savoy */ public class HungarianLightStemmer { - public int stem(char s[], int len) { + public int stem(char[] s, int len) { for (int i = 0; i < len; i++) switch (s[i]) { case 'á': @@ -94,7 +94,7 @@ public class HungarianLightStemmer { return normalize(s, len); } - private int removeCase(char s[], int len) { + private int removeCase(char[] s, int len) { if (len > 6 && endsWith(s, len, "kent")) return len - 4; if (len > 5) { @@ -147,7 +147,7 @@ public class HungarianLightStemmer { return len; } - private int removePossessive(char s[], int len) { + private int removePossessive(char[] s, int len) { if (len > 6) { if (!isVowel(s[len - 5]) && (endsWith(s, len, "atok") || endsWith(s, len, "otok") || endsWith(s, len, "etek"))) @@ -202,7 +202,7 @@ public class HungarianLightStemmer { } @SuppressWarnings("fallthrough") - private int removePlural(char s[], int len) { + private int removePlural(char[] s, int len) { if (len > 3 && s[len - 1] == 'k') switch (s[len - 2]) { case 'a': @@ -215,7 +215,7 @@ public class HungarianLightStemmer { return len; } - private int normalize(char s[], int len) { + private int normalize(char[] s, int len) { if (len > 3) switch (s[len - 1]) { case 'a': diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/hunspell/HunspellStemFilterFactory.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/hunspell/HunspellStemFilterFactory.java index 63ae9826506..e4bab11874f 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/hunspell/HunspellStemFilterFactory.java +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/hunspell/HunspellStemFilterFactory.java @@ -94,7 +94,7 @@ public class HunspellStemFilterFactory extends TokenFilterFactory implements Res @Override public void inform(ResourceLoader loader) throws IOException { - String dicts[] = dictionaryFiles.split(","); + String[] dicts = dictionaryFiles.split(","); InputStream affix = null; List dictionaries = new ArrayList<>(); diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/hunspell/ISO8859_14Decoder.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/hunspell/ISO8859_14Decoder.java index 52a9c10bcd1..7e2630e81c8 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/hunspell/ISO8859_14Decoder.java +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/hunspell/ISO8859_14Decoder.java @@ -25,7 +25,7 @@ import java.nio.charset.StandardCharsets; // many hunspell dictionaries use this encoding, yet java does not have it?!?! final class ISO8859_14Decoder extends CharsetDecoder { - static final char TABLE[] = + static final char[] TABLE = new char[] { 0x00A0, 0x1E02, 0x1E03, 0x00A3, 0x010A, 0x010B, 0x1E0A, 0x00A7, 0x1E80, 0x00A9, 0x1E82, 0x1E0B, 0x1EF2, 0x00AD, 0x00AE, 0x0178, diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/id/IndonesianStemmer.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/id/IndonesianStemmer.java index eb9ec190dfe..31151a09969 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/id/IndonesianStemmer.java +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/id/IndonesianStemmer.java @@ -42,7 +42,7 @@ public class IndonesianStemmer { *

Use stemDerivational to control whether full stemming or only light * inflectional stemming is done. */ - public int stem(char text[], int length, boolean stemDerivational) { + public int stem(char[] text, int length, boolean stemDerivational) { flags = 0; numSyllables = 0; for (int i = 0; i < length; i++) if (isVowel(text[i])) numSyllables++; @@ -54,7 +54,7 @@ public class IndonesianStemmer { return length; } - private int stemDerivational(char text[], int length) { + private int stemDerivational(char[] text, int length) { int oldLength = length; if (numSyllables > 2) length = removeFirstOrderPrefix(text, length); if (oldLength != length) { // a rule is fired @@ -82,7 +82,7 @@ public class IndonesianStemmer { } } - private int removeParticle(char text[], int length) { + private int removeParticle(char[] text, int length) { if (endsWith(text, length, "kah") || endsWith(text, length, "lah") || endsWith(text, length, "pun")) { @@ -93,7 +93,7 @@ public class IndonesianStemmer { return length; } - private int removePossessivePronoun(char text[], int length) { + private int removePossessivePronoun(char[] text, int length) { if (endsWith(text, length, "ku") || endsWith(text, length, "mu")) { numSyllables--; return length - 2; @@ -107,7 +107,7 @@ public class IndonesianStemmer { return length; } - private int removeFirstOrderPrefix(char text[], int length) { + private int removeFirstOrderPrefix(char[] text, int length) { if (startsWith(text, length, "meng")) { flags |= REMOVED_MENG; numSyllables--; @@ -198,7 +198,7 @@ public class IndonesianStemmer { return length; } - private int removeSecondOrderPrefix(char text[], int length) { + private int removeSecondOrderPrefix(char[] text, int length) { if (startsWith(text, length, "ber")) { flags |= REMOVED_BER; numSyllables--; @@ -240,7 +240,7 @@ public class IndonesianStemmer { return length; } - private int removeSuffix(char text[], int length) { + private int removeSuffix(char[] text, int length) { if (endsWith(text, length, "kan") && (flags & REMOVED_KE) == 0 && (flags & REMOVED_PENG) == 0 diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/in/IndicNormalizer.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/in/IndicNormalizer.java index 27f712db55c..3f20289d4b9 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/in/IndicNormalizer.java +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/in/IndicNormalizer.java @@ -71,7 +71,7 @@ public class IndicNormalizer { *

the columns are: ch1, ch2, ch3, res, flags ch1, ch2, and ch3 are the decomposition res is * the composition, and flags are the scripts to which it applies. */ - private static final int decompositions[][] = { + private static final int[][] decompositions = { /* devanagari, gujarati vowel candra O */ {0x05, 0x3E, 0x45, 0x11, flag(DEVANAGARI) | flag(GUJARATI)}, /* devanagari short O */ @@ -243,7 +243,7 @@ public class IndicNormalizer { * @param len valid length * @return normalized length */ - public int normalize(char text[], int len) { + public int normalize(char[] text, int len) { for (int i = 0; i < len; i++) { final Character.UnicodeBlock block = Character.UnicodeBlock.of(text[i]); final ScriptData sd = scripts.get(block); @@ -257,7 +257,7 @@ public class IndicNormalizer { /** Compose into standard form any compositions in the decompositions table. */ private int compose( - int ch0, Character.UnicodeBlock block0, ScriptData sd, char text[], int pos, int len) { + int ch0, Character.UnicodeBlock block0, ScriptData sd, char[] text, int pos, int len) { if (pos + 1 >= len) /* need at least 2 chars! */ return len; final int ch1 = text[pos + 1] - sd.base; diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/it/ItalianLightStemmer.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/it/ItalianLightStemmer.java index d134d14131a..eb684130958 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/it/ItalianLightStemmer.java +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/it/ItalianLightStemmer.java @@ -59,7 +59,7 @@ package org.apache.lucene.analysis.it; */ public class ItalianLightStemmer { - public int stem(char s[], int len) { + public int stem(char[] s, int len) { if (len < 6) return len; for (int i = 0; i < len; i++) diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/lv/LatvianStemmer.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/lv/LatvianStemmer.java index 9798a212ed4..bee38460419 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/lv/LatvianStemmer.java +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/lv/LatvianStemmer.java @@ -34,7 +34,7 @@ import static org.apache.lucene.analysis.util.StemmerUtil.*; */ public class LatvianStemmer { /** Stem a latvian word. returns the new adjusted length. */ - public int stem(char s[], int len) { + public int stem(char[] s, int len) { int numVowels = numVowels(s, len); for (int i = 0; i < affixes.length; i++) { @@ -48,7 +48,7 @@ public class LatvianStemmer { return len; } - static final Affix affixes[] = { + static final Affix[] affixes = { new Affix("ajiem", 3, false), new Affix("ajai", 3, false), new Affix("ajam", 2, false), new Affix("ajām", 2, false), new Affix("ajos", 2, false), new Affix("ajās", 2, false), @@ -71,7 +71,7 @@ public class LatvianStemmer { }; static class Affix { - char affix[]; // suffix + char[] affix; // suffix int vc; // vowel count of the suffix boolean palatalizes; // true if we should fire palatalization rules. @@ -92,7 +92,7 @@ public class LatvianStemmer { *

  • z -> ž * */ - private int unpalatalize(char s[], int len) { + private int unpalatalize(char[] s, int len) { // we check the character removed: if it's -u then // it's 2,5, or 6 gen pl., and these two can only apply then. if (s[len] == 'u') { @@ -160,7 +160,7 @@ public class LatvianStemmer { * Count the vowels in the string, we always require at least one in the remaining stem to accept * it. */ - private int numVowels(char s[], int len) { + private int numVowels(char[] s, int len) { int n = 0; for (int i = 0; i < len; i++) { switch (s[i]) { diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/ASCIIFoldingFilter.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/ASCIIFoldingFilter.java index 78f86405656..cbe9bf173f8 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/ASCIIFoldingFilter.java +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/ASCIIFoldingFilter.java @@ -188,7 +188,7 @@ public final class ASCIIFoldingFilter extends TokenFilter { * @lucene.internal */ public static final int foldToASCII( - char input[], int inputPos, char output[], int outputPos, int length) { + char[] input, int inputPos, char[] output, int outputPos, int length) { final int end = inputPos + length; for (int pos = inputPos; pos < end; ++pos) { final char c = input[pos]; diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/FingerprintFilter.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/FingerprintFilter.java index 522e4c0bb13..af4dcc1e3f4 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/FingerprintFilter.java +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/FingerprintFilter.java @@ -88,7 +88,7 @@ public class FingerprintFilter extends TokenFilter { private final boolean buildSingleOutputToken() throws IOException { inputEnded = false; - char clonedLastTerm[] = null; + char[] clonedLastTerm = null; uniqueTerms = new CharArraySet(8, false); int outputTokenSize = 0; while (input.incrementToken()) { @@ -96,7 +96,7 @@ public class FingerprintFilter extends TokenFilter { continue; } - final char term[] = termAttribute.buffer(); + final char[] term = termAttribute.buffer(); final int length = termAttribute.length(); if (!uniqueTerms.contains(term, 0, length)) { @@ -150,8 +150,8 @@ public class FingerprintFilter extends TokenFilter { new Comparator() { @Override public int compare(Object o1, Object o2) { - char v1[] = (char[]) o1; - char v2[] = (char[]) o2; + char[] v1 = (char[]) o1; + char[] v2 = (char[]) o2; int len1 = v1.length; int len2 = v2.length; int lim = Math.min(len1, len2); diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/HyphenatedWordsFilter.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/HyphenatedWordsFilter.java index caab47f5825..47fef0937e8 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/HyphenatedWordsFilter.java +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/HyphenatedWordsFilter.java @@ -121,7 +121,7 @@ public final class HyphenatedWordsFilter extends TokenFilter { restoreState(savedState); savedState = null; - char term[] = termAttribute.buffer(); + char[] term = termAttribute.buffer(); int length = hyphenated.length(); if (length > termAttribute.length()) { term = termAttribute.resizeBuffer(length); diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/RemoveDuplicatesTokenFilter.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/RemoveDuplicatesTokenFilter.java index d08270cc3d1..4f83d028562 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/RemoveDuplicatesTokenFilter.java +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/RemoveDuplicatesTokenFilter.java @@ -47,7 +47,7 @@ public final class RemoveDuplicatesTokenFilter extends TokenFilter { @Override public boolean incrementToken() throws IOException { while (input.incrementToken()) { - final char term[] = termAttribute.buffer(); + final char[] term = termAttribute.buffer(); final int length = termAttribute.length(); final int posIncrement = posIncAttribute.getPositionIncrement(); @@ -58,7 +58,7 @@ public final class RemoveDuplicatesTokenFilter extends TokenFilter { boolean duplicate = (posIncrement == 0 && previous.contains(term, 0, length)); // clone the term, and add to the set of seen terms. - char saved[] = new char[length]; + char[] saved = new char[length]; System.arraycopy(term, 0, saved, 0, length); previous.add(saved); diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/WordDelimiterFilter.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/WordDelimiterFilter.java index 742f09821a9..d19a97f06d5 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/WordDelimiterFilter.java +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/WordDelimiterFilter.java @@ -183,7 +183,7 @@ public final class WordDelimiterFilter extends TokenFilter { // used for accumulating position increment gaps private int accumPosInc = 0; - private char savedBuffer[] = new char[1024]; + private char[] savedBuffer = new char[1024]; private int savedStartOffset; private int savedEndOffset; private String savedType; @@ -377,9 +377,9 @@ public final class WordDelimiterFilter extends TokenFilter { first = true; } - private AttributeSource.State buffered[] = new AttributeSource.State[8]; - private int startOff[] = new int[8]; - private int posInc[] = new int[8]; + private AttributeSource.State[] buffered = new AttributeSource.State[8]; + private int[] startOff = new int[8]; + private int[] posInc = new int[8]; private int bufferedLen = 0; private int bufferedPos = 0; private boolean first; @@ -616,7 +616,7 @@ public final class WordDelimiterFilter extends TokenFilter { * @param offset Offset in the concetenation to add the text * @param length Length of the text to append */ - void append(char text[], int offset, int length) { + void append(char[] text, int offset, int length) { buffer.append(text, offset, length); subwordCount++; } @@ -627,7 +627,7 @@ public final class WordDelimiterFilter extends TokenFilter { if (termAttribute.length() < buffer.length()) { termAttribute.resizeBuffer(buffer.length()); } - char termbuffer[] = termAttribute.buffer(); + char[] termbuffer = termAttribute.buffer(); buffer.getChars(0, buffer.length(), termbuffer, 0); termAttribute.setLength(buffer.length()); diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/WordDelimiterFilterFactory.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/WordDelimiterFilterFactory.java index f513cab9719..ca9fac447ab 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/WordDelimiterFilterFactory.java +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/WordDelimiterFilterFactory.java @@ -173,7 +173,7 @@ public class WordDelimiterFilterFactory extends TokenFilterFactory implements Re } // ensure the table is always at least as big as DEFAULT_WORD_DELIM_TABLE for performance - byte types[] = + byte[] types = new byte [Math.max( typeMap.lastKey() + 1, WordDelimiterIterator.DEFAULT_WORD_DELIM_TABLE.length)]; diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/WordDelimiterGraphFilter.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/WordDelimiterGraphFilter.java index 8a66216f08a..2971704297b 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/WordDelimiterGraphFilter.java +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/WordDelimiterGraphFilter.java @@ -608,7 +608,7 @@ public final class WordDelimiterGraphFilter extends TokenFilter { * @param offset Offset in the concetenation to add the text * @param length Length of the text to append */ - void append(char text[], int offset, int length) { + void append(char[] text, int offset, int length) { buffer.append(text, offset, length); subwordCount++; } diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/WordDelimiterGraphFilterFactory.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/WordDelimiterGraphFilterFactory.java index 78d038d687c..09ec073bd9b 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/WordDelimiterGraphFilterFactory.java +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/WordDelimiterGraphFilterFactory.java @@ -160,7 +160,7 @@ public class WordDelimiterGraphFilterFactory extends TokenFilterFactory } // ensure the table is always at least as big as DEFAULT_WORD_DELIM_TABLE for performance - byte types[] = + byte[] types = new byte [Math.max( typeMap.lastKey() + 1, WordDelimiterIterator.DEFAULT_WORD_DELIM_TABLE.length)]; diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/WordDelimiterIterator.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/WordDelimiterIterator.java index e9e4e6be0e4..8b928d1f5d6 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/WordDelimiterIterator.java +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/WordDelimiterIterator.java @@ -40,7 +40,7 @@ public final class WordDelimiterIterator { public static final byte[] DEFAULT_WORD_DELIM_TABLE; - char text[]; + char[] text; int length; /** start position of text, excluding leading delimiters */ @@ -207,7 +207,7 @@ public final class WordDelimiterIterator { * @param text New text * @param length length of the text */ - void setText(char text[], int length) { + void setText(char[] text, int length) { this.text = text; this.length = this.endBounds = length; current = startBounds = end = 0; diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/no/NorwegianLightStemmer.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/no/NorwegianLightStemmer.java index 37583e58ae2..398f3fc4daf 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/no/NorwegianLightStemmer.java +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/no/NorwegianLightStemmer.java @@ -82,7 +82,7 @@ public class NorwegianLightStemmer { useNynorsk = (flags & NYNORSK) != 0; } - public int stem(char s[], int len) { + public int stem(char[] s, int len) { // Remove posessive -s (bilens -> bilen) and continue checking if (len > 4 && s[len - 1] == 's') len--; diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/no/NorwegianMinimalStemmer.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/no/NorwegianMinimalStemmer.java index 3f9311cf0b0..dbc6d5774cb 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/no/NorwegianMinimalStemmer.java +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/no/NorwegianMinimalStemmer.java @@ -78,7 +78,7 @@ public class NorwegianMinimalStemmer { useNynorsk = (flags & NYNORSK) != 0; } - public int stem(char s[], int len) { + public int stem(char[] s, int len) { // Remove genitiv s if (len > 4 && s[len - 1] == 's') len--; diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/pt/PortugueseLightStemmer.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/pt/PortugueseLightStemmer.java index 28390ff7e6c..216921f0860 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/pt/PortugueseLightStemmer.java +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/pt/PortugueseLightStemmer.java @@ -61,7 +61,7 @@ import static org.apache.lucene.analysis.util.StemmerUtil.*; */ public class PortugueseLightStemmer { - public int stem(char s[], int len) { + public int stem(char[] s, int len) { if (len < 4) return len; len = removeSuffix(s, len); @@ -119,7 +119,7 @@ public class PortugueseLightStemmer { return len; } - private int removeSuffix(char s[], int len) { + private int removeSuffix(char[] s, int len) { if (len > 4 && endsWith(s, len, "es")) switch (s[len - 3]) { case 'r': @@ -169,7 +169,7 @@ public class PortugueseLightStemmer { return len; } - private int normFeminine(char s[], int len) { + private int normFeminine(char[] s, int len) { if (len > 7 && (endsWith(s, len, "inha") || endsWith(s, len, "iaca") || endsWith(s, len, "eira"))) { s[len - 1] = 'o'; diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/pt/PortugueseMinimalStemmer.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/pt/PortugueseMinimalStemmer.java index bac066d4fd2..a8a81c27737 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/pt/PortugueseMinimalStemmer.java +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/pt/PortugueseMinimalStemmer.java @@ -31,7 +31,7 @@ public class PortugueseMinimalStemmer extends RSLPStemmerBase { private static final Step pluralStep = parse(PortugueseMinimalStemmer.class, "portuguese.rslp").get("Plural"); - public int stem(char s[], int len) { + public int stem(char[] s, int len) { return pluralStep.apply(s, len); } } diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/pt/PortugueseStemmer.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/pt/PortugueseStemmer.java index 7bfed9c09d8..5d6a82bc7b7 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/pt/PortugueseStemmer.java +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/pt/PortugueseStemmer.java @@ -43,7 +43,7 @@ public class PortugueseStemmer extends RSLPStemmerBase { * @param len initial valid length of buffer * @return new valid length, stemmed */ - public int stem(char s[], int len) { + public int stem(char[] s, int len) { assert s.length >= len + 1 : "this stemmer requires an oversized array of at least 1"; len = plural.apply(s, len); diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/pt/RSLPStemmerBase.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/pt/RSLPStemmerBase.java index 86af81fd82c..f9d550b012d 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/pt/RSLPStemmerBase.java +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/pt/RSLPStemmerBase.java @@ -94,8 +94,8 @@ public abstract class RSLPStemmerBase { /** A basic rule, with no exceptions. */ protected static class Rule { - protected final char suffix[]; - protected final char replacement[]; + protected final char[] suffix; + protected final char[] replacement; protected final int min; /** @@ -112,12 +112,12 @@ public abstract class RSLPStemmerBase { } /** @return true if the word matches this rule. */ - public boolean matches(char s[], int len) { + public boolean matches(char[] s, int len) { return (len - suffix.length >= min && endsWith(s, len, suffix)); } /** @return new valid length of the string after firing this rule. */ - public int replace(char s[], int len) { + public int replace(char[] s, int len) { if (replacement.length > 0) { System.arraycopy(replacement, 0, s, len - suffix.length, replacement.length); } @@ -140,7 +140,7 @@ public abstract class RSLPStemmerBase { } @Override - public boolean matches(char s[], int len) { + public boolean matches(char[] s, int len) { return super.matches(s, len) && !exceptions.contains(s, 0, len); } } @@ -167,7 +167,7 @@ public abstract class RSLPStemmerBase { } @Override - public boolean matches(char s[], int len) { + public boolean matches(char[] s, int len) { if (!super.matches(s, len)) return false; for (int i = 0; i < exceptions.length; i++) if (endsWith(s, len, exceptions[i])) return false; @@ -179,7 +179,7 @@ public abstract class RSLPStemmerBase { /** A step containing a list of rules. */ protected static class Step { protected final String name; - protected final Rule rules[]; + protected final Rule[] rules; protected final int min; protected final char[][] suffixes; @@ -191,7 +191,7 @@ public abstract class RSLPStemmerBase { * @param min minimum word size. if this is 0 it is automatically calculated. * @param suffixes optional list of conditional suffixes. may be null. */ - public Step(String name, Rule rules[], int min, String suffixes[]) { + public Step(String name, Rule[] rules, int min, String[] suffixes) { this.name = name; this.rules = rules; if (min == 0) { @@ -209,7 +209,7 @@ public abstract class RSLPStemmerBase { } /** @return new valid length of the string after applying the entire step. */ - public int apply(char s[], int len) { + public int apply(char[] s, int len) { if (len < min) return len; if (suffixes != null) { @@ -275,8 +275,8 @@ public abstract class RSLPStemmerBase { String name = matcher.group(1); int min = Integer.parseInt(matcher.group(2)); int type = Integer.parseInt(matcher.group(3)); - String suffixes[] = parseList(matcher.group(4)); - Rule rules[] = parseRules(r, type); + String[] suffixes = parseList(matcher.group(4)); + Rule[] rules = parseRules(r, type); return new Step(name, rules, min, suffixes); } @@ -322,7 +322,7 @@ public abstract class RSLPStemmerBase { private static String[] parseList(String s) { if (s.length() == 0) return null; - String list[] = s.split(","); + String[] list = s.split(","); for (int i = 0; i < list.length; i++) list[i] = parseString(list[i].trim()); return list; } diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/ru/RussianLightStemmer.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/ru/RussianLightStemmer.java index ecdaf9a72b7..e337830dc4d 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/ru/RussianLightStemmer.java +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/ru/RussianLightStemmer.java @@ -61,12 +61,12 @@ import static org.apache.lucene.analysis.util.StemmerUtil.*; */ public class RussianLightStemmer { - public int stem(char s[], int len) { + public int stem(char[] s, int len) { len = removeCase(s, len); return normalize(s, len); } - private int normalize(char s[], int len) { + private int normalize(char[] s, int len) { if (len > 3) switch (s[len - 1]) { case 'ь': @@ -78,7 +78,7 @@ public class RussianLightStemmer { return len; } - private int removeCase(char s[], int len) { + private int removeCase(char[] s, int len) { if (len > 6 && (endsWith(s, len, "иями") || endsWith(s, len, "оями"))) return len - 4; if (len > 5 diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/snowball/SnowballFilter.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/snowball/SnowballFilter.java index a81e3f57e37..6ee8aae5d39 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/snowball/SnowballFilter.java +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/snowball/SnowballFilter.java @@ -85,11 +85,11 @@ public final class SnowballFilter extends TokenFilter { public final boolean incrementToken() throws IOException { if (input.incrementToken()) { if (!keywordAttr.isKeyword()) { - char termBuffer[] = termAtt.buffer(); + char[] termBuffer = termAtt.buffer(); final int length = termAtt.length(); stemmer.setCurrent(termBuffer, length); stemmer.stem(); - final char finalTerm[] = stemmer.getCurrentBuffer(); + final char[] finalTerm = stemmer.getCurrentBuffer(); final int newLength = stemmer.getCurrentBufferLength(); if (finalTerm != termBuffer) termAtt.copyBuffer(finalTerm, 0, newLength); else termAtt.setLength(newLength); diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/sr/SerbianNormalizationFilter.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/sr/SerbianNormalizationFilter.java index 553a4ff6df7..5273f228151 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/sr/SerbianNormalizationFilter.java +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/sr/SerbianNormalizationFilter.java @@ -40,7 +40,7 @@ public final class SerbianNormalizationFilter extends TokenFilter { @Override public boolean incrementToken() throws IOException { if (input.incrementToken()) { - char buffer[] = termAtt.buffer(); + char[] buffer = termAtt.buffer(); int length = termAtt.length(); for (int i = 0; i < length; i++) { final char c = buffer[i]; diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/sr/SerbianNormalizationRegularFilter.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/sr/SerbianNormalizationRegularFilter.java index 02e23125b4e..3380113ff67 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/sr/SerbianNormalizationRegularFilter.java +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/sr/SerbianNormalizationRegularFilter.java @@ -37,7 +37,7 @@ public final class SerbianNormalizationRegularFilter extends TokenFilter { @Override public boolean incrementToken() throws IOException { if (input.incrementToken()) { - char buffer[] = termAtt.buffer(); + char[] buffer = termAtt.buffer(); int length = termAtt.length(); for (int i = 0; i < length; i++) { final char c = buffer[i]; diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/sv/SwedishLightStemmer.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/sv/SwedishLightStemmer.java index 1ebce0fc6ce..1f666010c67 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/sv/SwedishLightStemmer.java +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/sv/SwedishLightStemmer.java @@ -61,7 +61,7 @@ import static org.apache.lucene.analysis.util.StemmerUtil.*; */ public class SwedishLightStemmer { - public int stem(char s[], int len) { + public int stem(char[] s, int len) { if (len > 4 && s[len - 1] == 's') len--; if (len > 7 && (endsWith(s, len, "elser") || endsWith(s, len, "heten"))) return len - 5; diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/sv/SwedishMinimalStemmer.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/sv/SwedishMinimalStemmer.java index c564a9d5a7b..456c04f26b0 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/sv/SwedishMinimalStemmer.java +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/sv/SwedishMinimalStemmer.java @@ -62,7 +62,7 @@ import static org.apache.lucene.analysis.util.StemmerUtil.endsWith; */ public class SwedishMinimalStemmer { - public int stem(char s[], int len) { + public int stem(char[] s, int len) { if (len > 4 && s[len - 1] == 's') len--; if (len > 6 diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/synonym/SolrSynonymParser.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/synonym/SolrSynonymParser.java index d198e4ecf3a..32b5ff00d48 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/synonym/SolrSynonymParser.java +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/synonym/SolrSynonymParser.java @@ -87,19 +87,19 @@ public class SolrSynonymParser extends SynonymMap.Parser { } // TODO: we could process this more efficiently. - String sides[] = split(line, "=>"); + String[] sides = split(line, "=>"); if (sides.length > 1) { // explicit mapping if (sides.length != 2) { throw new IllegalArgumentException( "more than one explicit mapping specified on the same line"); } - String inputStrings[] = split(sides[0], ","); + String[] inputStrings = split(sides[0], ","); CharsRef[] inputs = new CharsRef[inputStrings.length]; for (int i = 0; i < inputs.length; i++) { inputs[i] = analyze(unescape(inputStrings[i]).trim(), new CharsRefBuilder()); } - String outputStrings[] = split(sides[1], ","); + String[] outputStrings = split(sides[1], ","); CharsRef[] outputs = new CharsRef[outputStrings.length]; for (int i = 0; i < outputs.length; i++) { outputs[i] = analyze(unescape(outputStrings[i]).trim(), new CharsRefBuilder()); @@ -111,7 +111,7 @@ public class SolrSynonymParser extends SynonymMap.Parser { } } } else { - String inputStrings[] = split(line, ","); + String[] inputStrings = split(line, ","); CharsRef[] inputs = new CharsRef[inputStrings.length]; for (int i = 0; i < inputs.length; i++) { inputs[i] = analyze(unescape(inputStrings[i]).trim(), new CharsRefBuilder()); diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/synonym/SynonymMap.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/synonym/SynonymMap.java index 864bc948265..3a31cf8b996 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/synonym/SynonymMap.java +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/synonym/SynonymMap.java @@ -235,7 +235,7 @@ public class SynonymMap { final byte[] spare = new byte[5]; Set keys = workingSet.keySet(); - CharsRef sortedKeys[] = keys.toArray(new CharsRef[keys.size()]); + CharsRef[] sortedKeys = keys.toArray(new CharsRef[keys.size()]); Arrays.sort(sortedKeys, CharsRef.getUTF16SortedAsUTF8Comparator()); final IntsRefBuilder scratchIntsRef = new IntsRefBuilder(); diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/synonym/WordnetSynonymParser.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/synonym/WordnetSynonymParser.java index 595b10171bd..f0e24ae7959 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/synonym/WordnetSynonymParser.java +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/synonym/WordnetSynonymParser.java @@ -47,7 +47,7 @@ public class WordnetSynonymParser extends SynonymMap.Parser { try { String line = null; String lastSynSetID = ""; - CharsRef synset[] = new CharsRef[8]; + CharsRef[] synset = new CharsRef[8]; int synsetSize = 0; while ((line = br.readLine()) != null) { @@ -88,7 +88,7 @@ public class WordnetSynonymParser extends SynonymMap.Parser { return analyze(text, reuse); } - private void addInternal(CharsRef synset[], int size) { + private void addInternal(CharsRef[] synset, int size) { if (size <= 1) { return; // nothing to do } diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/tr/TurkishLowerCaseFilter.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/tr/TurkishLowerCaseFilter.java index 3bc43f6aa84..f1c73df4e68 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/tr/TurkishLowerCaseFilter.java +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/tr/TurkishLowerCaseFilter.java @@ -90,7 +90,7 @@ public final class TurkishLowerCaseFilter extends TokenFilter { } /** lookahead for a combining dot above. other NSMs may be in between. */ - private boolean isBeforeDot(char s[], int pos, int len) { + private boolean isBeforeDot(char[] s, int pos, int len) { for (int i = pos; i < len; ) { final int ch = Character.codePointAt(s, i, len); if (Character.getType(ch) != Character.NON_SPACING_MARK) return false; @@ -104,7 +104,7 @@ public final class TurkishLowerCaseFilter extends TokenFilter { /** * delete a character in-place. rarely happens, only if COMBINING_DOT_ABOVE is found after an i */ - private int delete(char s[], int pos, int len) { + private int delete(char[] s, int pos, int len) { if (pos < len) System.arraycopy(s, pos + 1, s, pos, len - pos - 1); return len - 1; diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/util/CharArrayIterator.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/util/CharArrayIterator.java index e0717ff702d..5d1ece026f7 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/util/CharArrayIterator.java +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/util/CharArrayIterator.java @@ -26,7 +26,7 @@ import java.util.Locale; * @lucene.internal */ public abstract class CharArrayIterator implements CharacterIterator { - private char array[]; + private char[] array; private int start; private int index; private int length; @@ -51,7 +51,7 @@ public abstract class CharArrayIterator implements CharacterIterator { * @param start offset into buffer * @param length maximum length to examine */ - public void setText(final char array[], int start, int length) { + public void setText(final char[] array, int start, int length) { this.array = array; this.start = start; this.index = start; diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/util/OpenStringBuilder.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/util/OpenStringBuilder.java index 61fe741e906..feca305f5b1 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/util/OpenStringBuilder.java +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/util/OpenStringBuilder.java @@ -101,13 +101,13 @@ public class OpenStringBuilder implements Appendable, CharSequence { unsafeWrite((char) b); } - public void unsafeWrite(char b[], int off, int len) { + public void unsafeWrite(char[] b, int off, int len) { System.arraycopy(b, off, buf, this.len, len); this.len += len; } protected void resize(int len) { - char newbuf[] = new char[Math.max(buf.length << 1, len)]; + char[] newbuf = new char[Math.max(buf.length << 1, len)]; System.arraycopy(buf, 0, newbuf, 0, size()); buf = newbuf; } @@ -131,7 +131,7 @@ public class OpenStringBuilder implements Appendable, CharSequence { write(b, 0, b.length); } - public void write(char b[], int off, int len) { + public void write(char[] b, int off, int len) { reserve(len); unsafeWrite(b, off, len); } @@ -153,7 +153,7 @@ public class OpenStringBuilder implements Appendable, CharSequence { } public char[] toCharArray() { - char newbuf[] = new char[size()]; + char[] newbuf = new char[size()]; System.arraycopy(buf, 0, newbuf, 0, size()); return newbuf; } diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/util/SegmentingTokenizerBase.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/util/SegmentingTokenizerBase.java index f193e21a552..f82ff7575a4 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/util/SegmentingTokenizerBase.java +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/util/SegmentingTokenizerBase.java @@ -37,7 +37,7 @@ import org.apache.lucene.util.AttributeFactory; */ public abstract class SegmentingTokenizerBase extends Tokenizer { protected static final int BUFFERMAX = 1024; - protected final char buffer[] = new char[BUFFERMAX]; + protected final char[] buffer = new char[BUFFERMAX]; /** true length of text in the buffer */ private int length = 0; /** length in buffer that can be evaluated safely, up to a safe end point */ diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/util/StemmerUtil.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/util/StemmerUtil.java index 21e653547f3..42d7806b8d5 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/util/StemmerUtil.java +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/util/StemmerUtil.java @@ -33,7 +33,7 @@ public class StemmerUtil { * @param prefix Prefix string to test * @return true if s starts with prefix */ - public static boolean startsWith(char s[], int len, String prefix) { + public static boolean startsWith(char[] s, int len, String prefix) { final int prefixLen = prefix.length(); if (prefixLen > len) return false; for (int i = 0; i < prefixLen; i++) if (s[i] != prefix.charAt(i)) return false; @@ -48,7 +48,7 @@ public class StemmerUtil { * @param suffix Suffix string to test * @return true if s ends with suffix */ - public static boolean endsWith(char s[], int len, String suffix) { + public static boolean endsWith(char[] s, int len, String suffix) { final int suffixLen = suffix.length(); if (suffixLen > len) return false; for (int i = suffixLen - 1; i >= 0; i--) @@ -65,7 +65,7 @@ public class StemmerUtil { * @param suffix Suffix string to test * @return true if s ends with suffix */ - public static boolean endsWith(char s[], int len, char suffix[]) { + public static boolean endsWith(char[] s, int len, char[] suffix) { final int suffixLen = suffix.length; if (suffixLen > len) return false; for (int i = suffixLen - 1; i >= 0; i--) @@ -82,7 +82,7 @@ public class StemmerUtil { * @param len length of input buffer * @return length of input buffer after deletion */ - public static int delete(char s[], int pos, int len) { + public static int delete(char[] s, int pos, int len) { assert pos < len; if (pos < len - 1) { // don't arraycopy if asked to delete last character System.arraycopy(s, pos + 1, s, pos, len - pos - 1); @@ -99,7 +99,7 @@ public class StemmerUtil { * @param nChars number of characters to delete * @return length of input buffer after deletion */ - public static int deleteN(char s[], int pos, int len, int nChars) { + public static int deleteN(char[] s, int pos, int len, int nChars) { assert pos + nChars <= len; if (pos + nChars < len) { // don't arraycopy if asked to delete the last characters System.arraycopy(s, pos + nChars, s, pos, len - pos - nChars); diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/wikipedia/WikipediaTokenizerImpl.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/wikipedia/WikipediaTokenizerImpl.java index 3e97b54ac63..d6639e9b353 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/wikipedia/WikipediaTokenizerImpl.java +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/wikipedia/WikipediaTokenizerImpl.java @@ -326,7 +326,7 @@ class WikipediaTokenizerImpl { private static final int ZZ_PUSHBACK_2BIG = 2; /* error messages for the codes above */ - private static final String ZZ_ERROR_MSG[] = { + private static final String[] ZZ_ERROR_MSG = { "Unknown internal scanner error", "Error: could not match input", "Error: pushback value was too large" @@ -377,7 +377,7 @@ class WikipediaTokenizerImpl { /** * this buffer contains the current text to be matched and is the source of the yytext() string */ - private char zzBuffer[] = new char[ZZ_BUFFERSIZE]; + private char[] zzBuffer = new char[ZZ_BUFFERSIZE]; /** the textposition at the last accepting state */ private int zzMarkedPos; @@ -537,7 +537,7 @@ class WikipediaTokenizerImpl { /* is the buffer big enough? */ if (zzCurrentPos >= zzBuffer.length - zzFinalHighSurrogate) { /* if not: blow it up */ - char newBuffer[] = new char[zzBuffer.length * 2]; + char[] newBuffer = new char[zzBuffer.length * 2]; System.arraycopy(zzBuffer, 0, newBuffer, 0, zzBuffer.length); zzBuffer = newBuffer; zzEndRead += zzFinalHighSurrogate; diff --git a/lucene/analysis/common/src/java/org/tartarus/snowball/SnowballProgram.java b/lucene/analysis/common/src/java/org/tartarus/snowball/SnowballProgram.java index 930c68bc392..78dc51a1b8a 100644 --- a/lucene/analysis/common/src/java/org/tartarus/snowball/SnowballProgram.java +++ b/lucene/analysis/common/src/java/org/tartarus/snowball/SnowballProgram.java @@ -65,7 +65,7 @@ public class SnowballProgram implements Serializable { * @param text character array containing input * @param length valid length of text. */ - public void setCurrent(char text[], int length) { + public void setCurrent(char[] text, int length) { current = text; cursor = 0; limit = length; @@ -101,7 +101,7 @@ public class SnowballProgram implements Serializable { } // current string - private char current[]; + private char[] current; protected int cursor; protected int limit; @@ -346,7 +346,7 @@ public class SnowballProgram implements Serializable { final int newLength = limit + adjustment; // resize if necessary if (newLength > current.length) { - char newBuffer[] = new char[oversize(newLength)]; + char[] newBuffer = new char[oversize(newLength)]; System.arraycopy(current, 0, newBuffer, 0, limit); current = newBuffer; } diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/core/TestAnalyzers.java b/lucene/analysis/common/src/test/org/apache/lucene/analysis/core/TestAnalyzers.java index 31bb9f13f85..305c9f9543e 100644 --- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/core/TestAnalyzers.java +++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/core/TestAnalyzers.java @@ -200,7 +200,7 @@ public class TestAnalyzers extends BaseTokenStreamTestCase { /** blast some random strings through the analyzer */ public void testRandomStrings() throws Exception { - Analyzer analyzers[] = + Analyzer[] analyzers = new Analyzer[] { new WhitespaceAnalyzer(), new SimpleAnalyzer(), @@ -215,7 +215,7 @@ public class TestAnalyzers extends BaseTokenStreamTestCase { /** blast some random large strings through the analyzer */ public void testRandomHugeStrings() throws Exception { - Analyzer analyzers[] = + Analyzer[] analyzers = new Analyzer[] { new WhitespaceAnalyzer(), new SimpleAnalyzer(), diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/core/TestBugInSomething.java b/lucene/analysis/common/src/test/org/apache/lucene/analysis/core/TestBugInSomething.java index 5cb8d1586d1..a1a754bceb9 100644 --- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/core/TestBugInSomething.java +++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/core/TestBugInSomething.java @@ -283,7 +283,7 @@ public class TestBugInSomething extends BaseTokenStreamTestCase { new HashSet<>( Arrays.asList("rrdpafa", "pupmmlu", "xlq", "dyy", "zqrxrrck", "o", "hsrlfvcha")), false); - final byte table[] = + final byte[] table = new byte[] { -57, 26, 1, 48, 63, -23, 55, -84, 18, 120, -97, 103, 58, 13, 84, 89, 57, -13, -63, 5, 28, 97, -54, -94, 102, -108, -5, 5, 46, 40, 43, 78, 43, -72, 36, 29, 124, -106, -22, -51, 65, diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/core/TestRandomChains.java b/lucene/analysis/common/src/test/org/apache/lucene/analysis/core/TestRandomChains.java index f015b70dee7..99061d50e20 100644 --- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/core/TestRandomChains.java +++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/core/TestRandomChains.java @@ -379,7 +379,7 @@ public class TestRandomChains extends BaseTokenStreamTestCase { put( byte[].class, random -> { - byte bytes[] = new byte[random.nextInt(256)]; + byte[] bytes = new byte[random.nextInt(256)]; random.nextBytes(bytes); return bytes; }); @@ -802,7 +802,7 @@ public class TestRandomChains extends BaseTokenStreamTestCase { final Constructor ctor = tokenizers.get(random.nextInt(tokenizers.size())); final StringBuilder descr = new StringBuilder(); - final Object args[] = newTokenizerArgs(random, ctor.getParameterTypes()); + final Object[] args = newTokenizerArgs(random, ctor.getParameterTypes()); if (broken(ctor, args)) { continue; } @@ -823,7 +823,7 @@ public class TestRandomChains extends BaseTokenStreamTestCase { while (true) { final Constructor ctor = charfilters.get(random.nextInt(charfilters.size())); - final Object args[] = newCharFilterArgs(random, spec.reader, ctor.getParameterTypes()); + final Object[] args = newCharFilterArgs(random, spec.reader, ctor.getParameterTypes()); if (broken(ctor, args)) { continue; } @@ -860,7 +860,7 @@ public class TestRandomChains extends BaseTokenStreamTestCase { new ConditionalTokenFilter( spec.stream, in -> { - final Object args[] = newFilterArgs(random, in, ctor.getParameterTypes()); + final Object[] args = newFilterArgs(random, in, ctor.getParameterTypes()); if (broken(ctor, args)) { return in; } @@ -885,7 +885,7 @@ public class TestRandomChains extends BaseTokenStreamTestCase { }; break; } else { - final Object args[] = newFilterArgs(random, spec.stream, ctor.getParameterTypes()); + final Object[] args = newFilterArgs(random, spec.stream, ctor.getParameterTypes()); if (broken(ctor, args)) { continue; } diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/core/TestStopAnalyzer.java b/lucene/analysis/common/src/test/org/apache/lucene/analysis/core/TestStopAnalyzer.java index cecaf8ad494..a012e44ed5e 100644 --- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/core/TestStopAnalyzer.java +++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/core/TestStopAnalyzer.java @@ -86,7 +86,7 @@ public class TestStopAnalyzer extends BaseTokenStreamTestCase { CharArraySet stopWordsSet = new CharArraySet(asSet("good", "test", "analyzer"), false); StopAnalyzer newStop = new StopAnalyzer(stopWordsSet); String s = "This is a good test of the english stop analyzer with positions"; - int expectedIncr[] = {1, 1, 1, 3, 1, 1, 1, 2, 1}; + int[] expectedIncr = {1, 1, 1, 3, 1, 1, 1, 2, 1}; try (TokenStream stream = newStop.tokenStream("test", s)) { assertNotNull(stream); int i = 0; diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/core/TestTypeTokenFilter.java b/lucene/analysis/common/src/test/org/apache/lucene/analysis/core/TestTypeTokenFilter.java index 18af9438487..32ce49624e2 100644 --- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/core/TestTypeTokenFilter.java +++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/core/TestTypeTokenFilter.java @@ -51,7 +51,7 @@ public class TestTypeTokenFilter extends BaseTokenStreamTestCase { } } log(sb.toString()); - String stopTypes[] = new String[] {""}; + String[] stopTypes = new String[] {""}; Set stopSet = asSet(stopTypes); // with increments diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/email/TestUAX29URLEmailAnalyzer.java b/lucene/analysis/common/src/test/org/apache/lucene/analysis/email/TestUAX29URLEmailAnalyzer.java index 8b7150f7b15..90586037084 100644 --- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/email/TestUAX29URLEmailAnalyzer.java +++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/email/TestUAX29URLEmailAnalyzer.java @@ -39,7 +39,7 @@ public class TestUAX29URLEmailAnalyzer extends BaseTokenStreamTestCase { public void testHugeDoc() throws IOException { StringBuilder sb = new StringBuilder(); - char whitespace[] = new char[4094]; + char[] whitespace = new char[4094]; Arrays.fill(whitespace, ' '); sb.append(whitespace); sb.append("testing 1234"); diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/email/TestUAX29URLEmailTokenizer.java b/lucene/analysis/common/src/test/org/apache/lucene/analysis/email/TestUAX29URLEmailTokenizer.java index e4dbcd517dd..7e77b67f3c6 100644 --- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/email/TestUAX29URLEmailTokenizer.java +++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/email/TestUAX29URLEmailTokenizer.java @@ -81,7 +81,7 @@ public class TestUAX29URLEmailTokenizer extends BaseTokenStreamTestCase { public void testHugeDoc() throws IOException { StringBuilder sb = new StringBuilder(); - char whitespace[] = new char[4094]; + char[] whitespace = new char[4094]; Arrays.fill(whitespace, ' '); sb.append(whitespace); sb.append("testing 1234"); diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestCapitalizationFilter.java b/lucene/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestCapitalizationFilter.java index 3394712b0ce..88d3d3200b7 100644 --- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestCapitalizationFilter.java +++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestCapitalizationFilter.java @@ -192,7 +192,7 @@ public class TestCapitalizationFilter extends BaseTokenStreamTestCase { static void assertCapitalizesTo( Tokenizer tokenizer, - String expected[], + String[] expected, boolean onlyFirstWord, CharArraySet keep, boolean forceFirstLetter, @@ -216,7 +216,7 @@ public class TestCapitalizationFilter extends BaseTokenStreamTestCase { static void assertCapitalizesTo( String input, - String expected[], + String[] expected, boolean onlyFirstWord, CharArraySet keep, boolean forceFirstLetter, diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/query/TestQueryAutoStopWordAnalyzer.java b/lucene/analysis/common/src/test/org/apache/lucene/analysis/query/TestQueryAutoStopWordAnalyzer.java index 0b1e29baa5f..3a41880f4e2 100644 --- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/query/TestQueryAutoStopWordAnalyzer.java +++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/query/TestQueryAutoStopWordAnalyzer.java @@ -30,10 +30,10 @@ import org.apache.lucene.store.ByteBuffersDirectory; import org.apache.lucene.store.Directory; public class TestQueryAutoStopWordAnalyzer extends BaseTokenStreamTestCase { - String variedFieldValues[] = { + String[] variedFieldValues = { "the", "quick", "brown", "fox", "jumped", "over", "the", "lazy", "boring", "dog" }; - String repetitiveFieldValues[] = {"boring", "boring", "vaguelyboring"}; + String[] repetitiveFieldValues = {"boring", "boring", "vaguelyboring"}; Directory dir; Analyzer appAnalyzer; IndexReader reader; diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/shingle/TestShingleFilter.java b/lucene/analysis/common/src/test/org/apache/lucene/analysis/shingle/TestShingleFilter.java index 5a597ef56cb..5f3a44c1858 100644 --- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/shingle/TestShingleFilter.java +++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/shingle/TestShingleFilter.java @@ -1194,9 +1194,9 @@ public class TestShingleFilter extends BaseTokenStreamTestCase { protected void shingleFilterTestCommon( ShingleFilter filter, Token[] tokensToCompare, int[] positionIncrements, String[] types) throws IOException { - String text[] = new String[tokensToCompare.length]; - int startOffsets[] = new int[tokensToCompare.length]; - int endOffsets[] = new int[tokensToCompare.length]; + String[] text = new String[tokensToCompare.length]; + int[] startOffsets = new int[tokensToCompare.length]; + int[] endOffsets = new int[tokensToCompare.length]; for (int i = 0; i < tokensToCompare.length; i++) { text[i] = new String(tokensToCompare[i].buffer(), 0, tokensToCompare[i].length()); diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/util/TestCharArrayIterator.java b/lucene/analysis/common/src/test/org/apache/lucene/analysis/util/TestCharArrayIterator.java index a399da83717..e448a8cf26f 100644 --- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/util/TestCharArrayIterator.java +++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/util/TestCharArrayIterator.java @@ -33,7 +33,7 @@ public class TestCharArrayIterator extends LuceneTestCase { BreakIterator bi = BreakIterator.getWordInstance(Locale.getDefault()); CharArrayIterator ci = CharArrayIterator.newWordInstance(); for (int i = 0; i < 10000; i++) { - char text[] = TestUtil.randomUnicodeString(random()).toCharArray(); + char[] text = TestUtil.randomUnicodeString(random()).toCharArray(); ci.setText(text, 0, text.length); consume(bi, ci); } @@ -45,7 +45,7 @@ public class TestCharArrayIterator extends LuceneTestCase { BreakIterator bi = BreakIterator.getWordInstance(Locale.getDefault()); Segment ci = new Segment(); for (int i = 0; i < 10000; i++) { - char text[] = _TestUtil.randomUnicodeString(random).toCharArray(); + char[] text = _TestUtil.randomUnicodeString(random).toCharArray(); ci.array = text; ci.offset = 0; ci.count = text.length; @@ -63,7 +63,7 @@ public class TestCharArrayIterator extends LuceneTestCase { BreakIterator bi = BreakIterator.getSentenceInstance(Locale.getDefault()); CharArrayIterator ci = CharArrayIterator.newSentenceInstance(); for (int i = 0; i < 10000; i++) { - char text[] = TestUtil.randomUnicodeString(random()).toCharArray(); + char[] text = TestUtil.randomUnicodeString(random()).toCharArray(); ci.setText(text, 0, text.length); consume(bi, ci); } @@ -75,7 +75,7 @@ public class TestCharArrayIterator extends LuceneTestCase { BreakIterator bi = BreakIterator.getSentenceInstance(Locale.getDefault()); Segment ci = new Segment(); for (int i = 0; i < 10000; i++) { - char text[] = _TestUtil.randomUnicodeString(random).toCharArray(); + char[] text = _TestUtil.randomUnicodeString(random).toCharArray(); ci.array = text; ci.offset = 0; ci.count = text.length; @@ -145,7 +145,7 @@ public class TestCharArrayIterator extends LuceneTestCase { }); // clone() - char text[] = "testing".toCharArray(); + char[] text = "testing".toCharArray(); ci.setText(text, 0, text.length); ci.next(); CharArrayIterator ci2 = ci.clone(); diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/util/TestSegmentingTokenizerBase.java b/lucene/analysis/common/src/test/org/apache/lucene/analysis/util/TestSegmentingTokenizerBase.java index 8b08681e67a..ffe8863139b 100644 --- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/util/TestSegmentingTokenizerBase.java +++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/util/TestSegmentingTokenizerBase.java @@ -116,7 +116,7 @@ public class TestSegmentingTokenizerBase extends BaseTokenStreamTestCase { /** Tests terms which span across boundaries */ public void testHugeDoc() throws IOException { StringBuilder sb = new StringBuilder(); - char whitespace[] = new char[4094]; + char[] whitespace = new char[4094]; Arrays.fill(whitespace, '\n'); sb.append(whitespace); sb.append("testing 1234"); @@ -131,10 +131,10 @@ public class TestSegmentingTokenizerBase extends BaseTokenStreamTestCase { sb.append('a'); } String input = sb.toString(); - char token[] = new char[1024]; + char[] token = new char[1024]; Arrays.fill(token, 'a'); String expectedToken = new String(token); - String expected[] = { + String[] expected = { expectedToken, expectedToken, expectedToken, expectedToken, expectedToken, expectedToken, expectedToken, expectedToken, expectedToken, diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/wikipedia/TestWikipediaTokenizer.java b/lucene/analysis/common/src/test/org/apache/lucene/analysis/wikipedia/TestWikipediaTokenizer.java index de32cc4ad7b..98055cd6c66 100644 --- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/wikipedia/TestWikipediaTokenizer.java +++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/wikipedia/TestWikipediaTokenizer.java @@ -413,7 +413,7 @@ public class TestWikipediaTokenizer extends BaseTokenStreamTestCase { // now check the flags, TODO: add way to check flags from BaseTokenStreamTestCase? tf = new WikipediaTokenizer(newAttributeFactory(), WikipediaTokenizer.BOTH, untoks); tf.setReader(new StringReader(test)); - int expectedFlags[] = + int[] expectedFlags = new int[] { UNTOKENIZED_TOKEN_FLAG, 0, diff --git a/lucene/analysis/icu/src/java/org/apache/lucene/analysis/icu/ICUTransformFilter.java b/lucene/analysis/icu/src/java/org/apache/lucene/analysis/icu/ICUTransformFilter.java index a615226cd73..afc54fcb6eb 100644 --- a/lucene/analysis/icu/src/java/org/apache/lucene/analysis/icu/ICUTransformFilter.java +++ b/lucene/analysis/icu/src/java/org/apache/lucene/analysis/icu/ICUTransformFilter.java @@ -113,7 +113,7 @@ public final class ICUTransformFilter extends TokenFilter { /** Wrap a {@link CharTermAttribute} with the Replaceable API. */ static final class ReplaceableTermAttribute implements Replaceable { - private char buffer[]; + private char[] buffer; private int length; private CharTermAttribute token; @@ -135,7 +135,7 @@ public final class ICUTransformFilter extends TokenFilter { @Override public void copy(int start, int limit, int dest) { - char text[] = new char[limit - start]; + char[] text = new char[limit - start]; getChars(start, limit, text, 0); replace(dest, dest, text, 0, limit - start); } diff --git a/lucene/analysis/icu/src/java/org/apache/lucene/analysis/icu/segmentation/BreakIteratorWrapper.java b/lucene/analysis/icu/src/java/org/apache/lucene/analysis/icu/segmentation/BreakIteratorWrapper.java index bbbc8a925eb..ea8ebef709b 100644 --- a/lucene/analysis/icu/src/java/org/apache/lucene/analysis/icu/segmentation/BreakIteratorWrapper.java +++ b/lucene/analysis/icu/src/java/org/apache/lucene/analysis/icu/segmentation/BreakIteratorWrapper.java @@ -30,7 +30,7 @@ import com.ibm.icu.text.UnicodeSet; final class BreakIteratorWrapper { private final CharArrayIterator textIterator = new CharArrayIterator(); private final RuleBasedBreakIterator rbbi; - private char text[]; + private char[] text; private int start; private int status; @@ -90,7 +90,7 @@ final class BreakIteratorWrapper { return false; } - void setText(char text[], int start, int length) { + void setText(char[] text, int start, int length) { this.text = text; this.start = start; textIterator.setText(text, start, length); diff --git a/lucene/analysis/icu/src/java/org/apache/lucene/analysis/icu/segmentation/CharArrayIterator.java b/lucene/analysis/icu/src/java/org/apache/lucene/analysis/icu/segmentation/CharArrayIterator.java index 3f8af81deac..d0c6fe2a655 100644 --- a/lucene/analysis/icu/src/java/org/apache/lucene/analysis/icu/segmentation/CharArrayIterator.java +++ b/lucene/analysis/icu/src/java/org/apache/lucene/analysis/icu/segmentation/CharArrayIterator.java @@ -24,7 +24,7 @@ import java.text.CharacterIterator; * @lucene.experimental */ final class CharArrayIterator implements CharacterIterator { - private char array[]; + private char[] array; private int start; private int index; private int length; @@ -49,7 +49,7 @@ final class CharArrayIterator implements CharacterIterator { * @param start offset into buffer * @param length maximum length to examine */ - void setText(final char array[], int start, int length) { + void setText(final char[] array, int start, int length) { this.array = array; this.start = start; this.index = start; diff --git a/lucene/analysis/icu/src/java/org/apache/lucene/analysis/icu/segmentation/CompositeBreakIterator.java b/lucene/analysis/icu/src/java/org/apache/lucene/analysis/icu/segmentation/CompositeBreakIterator.java index 8759eadf289..502004e0b26 100644 --- a/lucene/analysis/icu/src/java/org/apache/lucene/analysis/icu/segmentation/CompositeBreakIterator.java +++ b/lucene/analysis/icu/src/java/org/apache/lucene/analysis/icu/segmentation/CompositeBreakIterator.java @@ -39,13 +39,13 @@ import com.ibm.icu.text.BreakIterator; */ final class CompositeBreakIterator { private final ICUTokenizerConfig config; - private final BreakIteratorWrapper wordBreakers[] = + private final BreakIteratorWrapper[] wordBreakers = new BreakIteratorWrapper[1 + UCharacter.getIntPropertyMaxValue(UProperty.SCRIPT)]; private BreakIteratorWrapper rbbi; private final ScriptIterator scriptIterator; - private char text[]; + private char[] text; CompositeBreakIterator(ICUTokenizerConfig config) { this.config = config; @@ -111,7 +111,7 @@ final class CompositeBreakIterator { * @param start offset into buffer * @param length maximum length to examine */ - void setText(final char text[], int start, int length) { + void setText(final char[] text, int start, int length) { this.text = text; scriptIterator.setText(text, start, length); if (scriptIterator.next()) { diff --git a/lucene/analysis/icu/src/java/org/apache/lucene/analysis/icu/segmentation/ICUTokenizer.java b/lucene/analysis/icu/src/java/org/apache/lucene/analysis/icu/segmentation/ICUTokenizer.java index 5e35f6af24e..41918d17ee4 100644 --- a/lucene/analysis/icu/src/java/org/apache/lucene/analysis/icu/segmentation/ICUTokenizer.java +++ b/lucene/analysis/icu/src/java/org/apache/lucene/analysis/icu/segmentation/ICUTokenizer.java @@ -39,7 +39,7 @@ import org.apache.lucene.util.AttributeFactory; */ public final class ICUTokenizer extends Tokenizer { private static final int IOBUFFER = 4096; - private final char buffer[] = new char[IOBUFFER]; + private final char[] buffer = new char[IOBUFFER]; /** true length of text in the buffer */ private int length = 0; /** length in buffer that can be evaluated safely, up to a safe end point */ diff --git a/lucene/analysis/icu/src/java/org/apache/lucene/analysis/icu/segmentation/ICUTokenizerFactory.java b/lucene/analysis/icu/src/java/org/apache/lucene/analysis/icu/segmentation/ICUTokenizerFactory.java index bd79767b1b4..1f5d72e17de 100644 --- a/lucene/analysis/icu/src/java/org/apache/lucene/analysis/icu/segmentation/ICUTokenizerFactory.java +++ b/lucene/analysis/icu/src/java/org/apache/lucene/analysis/icu/segmentation/ICUTokenizerFactory.java @@ -111,7 +111,7 @@ public class ICUTokenizerFactory extends TokenizerFactory implements ResourceLoa if (tailored.isEmpty()) { config = new DefaultICUTokenizerConfig(cjkAsWords, myanmarAsWords); } else { - final BreakIterator breakers[] = + final BreakIterator[] breakers = new BreakIterator[1 + UCharacter.getIntPropertyMaxValue(UProperty.SCRIPT)]; for (Map.Entry entry : tailored.entrySet()) { int code = entry.getKey(); diff --git a/lucene/analysis/icu/src/java/org/apache/lucene/analysis/icu/segmentation/ScriptIterator.java b/lucene/analysis/icu/src/java/org/apache/lucene/analysis/icu/segmentation/ScriptIterator.java index 4cbd0d5f06e..52a4aa436ae 100644 --- a/lucene/analysis/icu/src/java/org/apache/lucene/analysis/icu/segmentation/ScriptIterator.java +++ b/lucene/analysis/icu/src/java/org/apache/lucene/analysis/icu/segmentation/ScriptIterator.java @@ -51,7 +51,7 @@ import com.ibm.icu.text.UTF16; * @lucene.experimental */ final class ScriptIterator { - private char text[]; + private char[] text; private int start; private int limit; private int index; @@ -150,7 +150,7 @@ final class ScriptIterator { * @param start offset into buffer * @param length maximum length to examine */ - void setText(char text[], int start, int length) { + void setText(char[] text, int start, int length) { this.text = text; this.start = start; this.index = start; @@ -161,7 +161,7 @@ final class ScriptIterator { } /** linear fast-path for basic latin case */ - private static final int basicLatin[] = new int[128]; + private static final int[] basicLatin = new int[128]; static { for (int i = 0; i < basicLatin.length; i++) { diff --git a/lucene/analysis/icu/src/test/org/apache/lucene/analysis/icu/segmentation/TestCharArrayIterator.java b/lucene/analysis/icu/src/test/org/apache/lucene/analysis/icu/segmentation/TestCharArrayIterator.java index 49beec82b9b..4a0057f20f2 100644 --- a/lucene/analysis/icu/src/test/org/apache/lucene/analysis/icu/segmentation/TestCharArrayIterator.java +++ b/lucene/analysis/icu/src/test/org/apache/lucene/analysis/icu/segmentation/TestCharArrayIterator.java @@ -92,7 +92,7 @@ public class TestCharArrayIterator extends LuceneTestCase { } public void testClone() { - char text[] = "testing".toCharArray(); + char[] text = "testing".toCharArray(); CharArrayIterator ci = new CharArrayIterator(); ci.setText(text, 0, text.length); ci.next(); diff --git a/lucene/analysis/icu/src/test/org/apache/lucene/analysis/icu/segmentation/TestICUTokenizer.java b/lucene/analysis/icu/src/test/org/apache/lucene/analysis/icu/segmentation/TestICUTokenizer.java index 1dd5ca0de3a..bd9403a0b10 100644 --- a/lucene/analysis/icu/src/test/org/apache/lucene/analysis/icu/segmentation/TestICUTokenizer.java +++ b/lucene/analysis/icu/src/test/org/apache/lucene/analysis/icu/segmentation/TestICUTokenizer.java @@ -33,7 +33,7 @@ public class TestICUTokenizer extends BaseTokenStreamTestCase { public void testHugeDoc() throws IOException { StringBuilder sb = new StringBuilder(); - char whitespace[] = new char[4094]; + char[] whitespace = new char[4094]; Arrays.fill(whitespace, ' '); sb.append(whitespace); sb.append("testing 1234"); @@ -53,10 +53,10 @@ public class TestICUTokenizer extends BaseTokenStreamTestCase { ICUTokenizer tokenizer = new ICUTokenizer(newAttributeFactory(), new DefaultICUTokenizerConfig(false, true)); tokenizer.setReader(new StringReader(input)); - char token[] = new char[4096]; + char[] token = new char[4096]; Arrays.fill(token, 'a'); String expectedToken = new String(token); - String expected[] = { + String[] expected = { expectedToken, expectedToken, expectedToken, expectedToken, expectedToken, expectedToken, expectedToken, expectedToken, expectedToken, @@ -547,7 +547,7 @@ public class TestICUTokenizer extends BaseTokenStreamTestCase { public void testICUConcurrency() throws Exception { int numThreads = 8; final CountDownLatch startingGun = new CountDownLatch(1); - Thread threads[] = new Thread[numThreads]; + Thread[] threads = new Thread[numThreads]; for (int i = 0; i < threads.length; i++) { threads[i] = new Thread() { diff --git a/lucene/analysis/icu/src/tools/java/org/apache/lucene/analysis/icu/GenerateUTR30DataFiles.java b/lucene/analysis/icu/src/tools/java/org/apache/lucene/analysis/icu/GenerateUTR30DataFiles.java index 9bf46a0aae7..c48ed42d5a7 100644 --- a/lucene/analysis/icu/src/tools/java/org/apache/lucene/analysis/icu/GenerateUTR30DataFiles.java +++ b/lucene/analysis/icu/src/tools/java/org/apache/lucene/analysis/icu/GenerateUTR30DataFiles.java @@ -71,7 +71,7 @@ public class GenerateUTR30DataFiles { private static final Pattern NUMERIC_VALUE_PATTERN = Pattern.compile("Numeric[-\\s_]*Value", Pattern.CASE_INSENSITIVE); - public static void main(String args[]) { + public static void main(String[] args) { try { if (args.length != 1) { throw new IllegalArgumentException( diff --git a/lucene/analysis/icu/src/tools/java/org/apache/lucene/analysis/icu/RBBIRuleCompiler.java b/lucene/analysis/icu/src/tools/java/org/apache/lucene/analysis/icu/RBBIRuleCompiler.java index 7797ae84485..a1ce01a57cc 100644 --- a/lucene/analysis/icu/src/tools/java/org/apache/lucene/analysis/icu/RBBIRuleCompiler.java +++ b/lucene/analysis/icu/src/tools/java/org/apache/lucene/analysis/icu/RBBIRuleCompiler.java @@ -88,7 +88,7 @@ public class RBBIRuleCompiler { } } - public static void main(String args[]) throws Exception { + public static void main(String[] args) throws Exception { if (args.length < 2) { System.err.println("Usage: RBBIRuleComputer "); System.exit(1); diff --git a/lucene/analysis/kuromoji/src/java/org/apache/lucene/analysis/ja/JapaneseAnalyzer.java b/lucene/analysis/kuromoji/src/java/org/apache/lucene/analysis/ja/JapaneseAnalyzer.java index 6d1dff3c1fa..0ad2b6e9cbd 100644 --- a/lucene/analysis/kuromoji/src/java/org/apache/lucene/analysis/ja/JapaneseAnalyzer.java +++ b/lucene/analysis/kuromoji/src/java/org/apache/lucene/analysis/ja/JapaneseAnalyzer.java @@ -82,7 +82,7 @@ public class JapaneseAnalyzer extends StopwordAnalyzerBase { loadStopwordSet(false, JapaneseAnalyzer.class, "stoptags.txt", "#"); DEFAULT_STOP_TAGS = new HashSet<>(); for (Object element : tagset) { - char chars[] = (char[]) element; + char[] chars = (char[]) element; DEFAULT_STOP_TAGS.add(new String(chars)); } } catch (IOException ex) { diff --git a/lucene/analysis/kuromoji/src/java/org/apache/lucene/analysis/ja/JapanesePartOfSpeechStopFilterFactory.java b/lucene/analysis/kuromoji/src/java/org/apache/lucene/analysis/ja/JapanesePartOfSpeechStopFilterFactory.java index 503ad934983..3e9b981258f 100644 --- a/lucene/analysis/kuromoji/src/java/org/apache/lucene/analysis/ja/JapanesePartOfSpeechStopFilterFactory.java +++ b/lucene/analysis/kuromoji/src/java/org/apache/lucene/analysis/ja/JapanesePartOfSpeechStopFilterFactory.java @@ -75,7 +75,7 @@ public class JapanesePartOfSpeechStopFilterFactory extends TokenFilterFactory if (cas != null) { stopTags = new HashSet<>(); for (Object element : cas) { - char chars[] = (char[]) element; + char[] chars = (char[]) element; stopTags.add(new String(chars)); } } diff --git a/lucene/analysis/kuromoji/src/java/org/apache/lucene/analysis/ja/dict/BinaryDictionary.java b/lucene/analysis/kuromoji/src/java/org/apache/lucene/analysis/ja/dict/BinaryDictionary.java index 73fa7f3f786..ccc4be3ac70 100644 --- a/lucene/analysis/kuromoji/src/java/org/apache/lucene/analysis/ja/dict/BinaryDictionary.java +++ b/lucene/analysis/kuromoji/src/java/org/apache/lucene/analysis/ja/dict/BinaryDictionary.java @@ -234,13 +234,13 @@ public abstract class BinaryDictionary implements Dictionary { } @Override - public String getBaseForm(int wordId, char surfaceForm[], int off, int len) { + public String getBaseForm(int wordId, char[] surfaceForm, int off, int len) { if (hasBaseFormData(wordId)) { int offset = baseFormOffset(wordId); int data = buffer.get(offset++) & 0xff; int prefix = data >>> 4; int suffix = data & 0xF; - char text[] = new char[prefix + suffix]; + char[] text = new char[prefix + suffix]; System.arraycopy(surfaceForm, off, text, 0, prefix); for (int i = 0; i < suffix; i++) { text[prefix + i] = buffer.getChar(offset + (i << 1)); @@ -252,14 +252,14 @@ public abstract class BinaryDictionary implements Dictionary { } @Override - public String getReading(int wordId, char surface[], int off, int len) { + public String getReading(int wordId, char[] surface, int off, int len) { if (hasReadingData(wordId)) { int offset = readingOffset(wordId); int readingData = buffer.get(offset++) & 0xff; return readString(offset, readingData >>> 1, (readingData & 1) == 1); } else { // the reading is the surface form, with hiragana shifted to katakana - char text[] = new char[len]; + char[] text = new char[len]; for (int i = 0; i < len; i++) { char ch = surface[off + i]; if (ch > 0x3040 && ch < 0x3097) { @@ -278,7 +278,7 @@ public abstract class BinaryDictionary implements Dictionary { } @Override - public String getPronunciation(int wordId, char surface[], int off, int len) { + public String getPronunciation(int wordId, char[] surface, int off, int len) { if (hasPronunciationData(wordId)) { int offset = pronunciationOffset(wordId); int pronunciationData = buffer.get(offset++) & 0xff; @@ -341,7 +341,7 @@ public abstract class BinaryDictionary implements Dictionary { } private String readString(int offset, int length, boolean kana) { - char text[] = new char[length]; + char[] text = new char[length]; if (kana) { for (int i = 0; i < length; i++) { text[i] = (char) (0x30A0 + (buffer.get(offset + i) & 0xff)); diff --git a/lucene/analysis/kuromoji/src/java/org/apache/lucene/analysis/ja/dict/Dictionary.java b/lucene/analysis/kuromoji/src/java/org/apache/lucene/analysis/ja/dict/Dictionary.java index f56daac17fc..cfe11b30cd9 100644 --- a/lucene/analysis/kuromoji/src/java/org/apache/lucene/analysis/ja/dict/Dictionary.java +++ b/lucene/analysis/kuromoji/src/java/org/apache/lucene/analysis/ja/dict/Dictionary.java @@ -56,7 +56,7 @@ public interface Dictionary { * @param wordId word ID of token * @return Reading of the token */ - public String getReading(int wordId, char surface[], int off, int len); + public String getReading(int wordId, char[] surface, int off, int len); /** * Get base form of word @@ -64,7 +64,7 @@ public interface Dictionary { * @param wordId word ID of token * @return Base form (only different for inflected words, otherwise null) */ - public String getBaseForm(int wordId, char surface[], int off, int len); + public String getBaseForm(int wordId, char[] surface, int off, int len); /** * Get pronunciation of tokens @@ -72,7 +72,7 @@ public interface Dictionary { * @param wordId word ID of token * @return Pronunciation of the token */ - public String getPronunciation(int wordId, char surface[], int off, int len); + public String getPronunciation(int wordId, char[] surface, int off, int len); /** * Get inflection type of tokens diff --git a/lucene/analysis/kuromoji/src/java/org/apache/lucene/analysis/ja/dict/TokenInfoFST.java b/lucene/analysis/kuromoji/src/java/org/apache/lucene/analysis/ja/dict/TokenInfoFST.java index 9704121f3ea..2c6a5c6ecad 100644 --- a/lucene/analysis/kuromoji/src/java/org/apache/lucene/analysis/ja/dict/TokenInfoFST.java +++ b/lucene/analysis/kuromoji/src/java/org/apache/lucene/analysis/ja/dict/TokenInfoFST.java @@ -34,7 +34,7 @@ public final class TokenInfoFST { // false: 191 arcs // true: 28,607 arcs (costs ~1.5MB) private final int cacheCeiling; - private final FST.Arc rootCache[]; + private final FST.Arc[] rootCache; public final Long NO_OUTPUT; @@ -47,7 +47,7 @@ public final class TokenInfoFST { @SuppressWarnings({"rawtypes", "unchecked"}) private FST.Arc[] cacheRootArcs() throws IOException { - FST.Arc rootCache[] = new FST.Arc[1 + (cacheCeiling - 0x3040)]; + FST.Arc[] rootCache = new FST.Arc[1 + (cacheCeiling - 0x3040)]; FST.Arc firstArc = new FST.Arc<>(); fst.getFirstArc(firstArc); FST.Arc arc = new FST.Arc<>(); diff --git a/lucene/analysis/kuromoji/src/java/org/apache/lucene/analysis/ja/dict/UnknownDictionary.java b/lucene/analysis/kuromoji/src/java/org/apache/lucene/analysis/ja/dict/UnknownDictionary.java index 3c449efdd26..2463dbae06e 100644 --- a/lucene/analysis/kuromoji/src/java/org/apache/lucene/analysis/ja/dict/UnknownDictionary.java +++ b/lucene/analysis/kuromoji/src/java/org/apache/lucene/analysis/ja/dict/UnknownDictionary.java @@ -61,7 +61,7 @@ public final class UnknownDictionary extends BinaryDictionary { } @Override - public String getReading(int wordId, char surface[], int off, int len) { + public String getReading(int wordId, char[] surface, int off, int len) { return null; } diff --git a/lucene/analysis/kuromoji/src/java/org/apache/lucene/analysis/ja/dict/UserDictionary.java b/lucene/analysis/kuromoji/src/java/org/apache/lucene/analysis/ja/dict/UserDictionary.java index 1e1d76687f6..859806e5ef0 100644 --- a/lucene/analysis/kuromoji/src/java/org/apache/lucene/analysis/ja/dict/UserDictionary.java +++ b/lucene/analysis/kuromoji/src/java/org/apache/lucene/analysis/ja/dict/UserDictionary.java @@ -38,10 +38,10 @@ public final class UserDictionary implements Dictionary { private final TokenInfoFST fst; // holds wordid, length, length... indexed by phrase ID - private final int segmentations[][]; + private final int[][] segmentations; // holds readings and POS, indexed by wordid - private final String data[]; + private final String[] data; private static final int CUSTOM_DICTIONARY_WORD_ID_OFFSET = 100000000; @@ -239,7 +239,7 @@ public final class UserDictionary implements Dictionary { } @Override - public String getReading(int wordId, char surface[], int off, int len) { + public String getReading(int wordId, char[] surface, int off, int len) { return getFeature(wordId, 0); } @@ -249,12 +249,12 @@ public final class UserDictionary implements Dictionary { } @Override - public String getBaseForm(int wordId, char surface[], int off, int len) { + public String getBaseForm(int wordId, char[] surface, int off, int len) { return null; // TODO: add support? } @Override - public String getPronunciation(int wordId, char surface[], int off, int len) { + public String getPronunciation(int wordId, char[] surface, int off, int len) { return null; // TODO: add support? } diff --git a/lucene/analysis/morfologik/src/java/org/apache/lucene/analysis/morfologik/MorfologikFilter.java b/lucene/analysis/morfologik/src/java/org/apache/lucene/analysis/morfologik/MorfologikFilter.java index 74da91444d2..648bb403ceb 100644 --- a/lucene/analysis/morfologik/src/java/org/apache/lucene/analysis/morfologik/MorfologikFilter.java +++ b/lucene/analysis/morfologik/src/java/org/apache/lucene/analysis/morfologik/MorfologikFilter.java @@ -142,7 +142,7 @@ public class MorfologikFilter extends TokenFilter { scratch.setLength(length); scratch.grow(length); - char buffer[] = scratch.chars(); + char[] buffer = scratch.chars(); for (int i = 0; i < length; ) { i += Character.toChars(Character.toLowerCase(Character.codePointAt(chs, i)), buffer, i); } diff --git a/lucene/analysis/phonetic/src/test/org/apache/lucene/analysis/phonetic/TestPhoneticFilter.java b/lucene/analysis/phonetic/src/test/org/apache/lucene/analysis/phonetic/TestPhoneticFilter.java index 182c6d178c3..e740cc32fcd 100644 --- a/lucene/analysis/phonetic/src/test/org/apache/lucene/analysis/phonetic/TestPhoneticFilter.java +++ b/lucene/analysis/phonetic/src/test/org/apache/lucene/analysis/phonetic/TestPhoneticFilter.java @@ -112,7 +112,7 @@ public class TestPhoneticFilter extends BaseTokenStreamTestCase { /** blast some random strings through the analyzer */ public void testRandomStrings() throws IOException { - Encoder encoders[] = + Encoder[] encoders = new Encoder[] { new Metaphone(), new DoubleMetaphone(), @@ -149,7 +149,7 @@ public class TestPhoneticFilter extends BaseTokenStreamTestCase { } public void testEmptyTerm() throws IOException { - Encoder encoders[] = + Encoder[] encoders = new Encoder[] { new Metaphone(), new DoubleMetaphone(), diff --git a/lucene/analysis/smartcn/src/java/org/apache/lucene/analysis/cn/smart/hhmm/AbstractDictionary.java b/lucene/analysis/smartcn/src/java/org/apache/lucene/analysis/cn/smart/hhmm/AbstractDictionary.java index 63f1a83bc06..28a98502d25 100644 --- a/lucene/analysis/smartcn/src/java/org/apache/lucene/analysis/cn/smart/hhmm/AbstractDictionary.java +++ b/lucene/analysis/smartcn/src/java/org/apache/lucene/analysis/cn/smart/hhmm/AbstractDictionary.java @@ -140,7 +140,7 @@ abstract class AbstractDictionary { * @param carray character array * @return hashcode */ - public long hash1(char carray[]) { + public long hash1(char[] carray) { final long p = 1099511628211L; long hash = 0xcbf29ce484222325L; for (int i = 0; i < carray.length; i++) { @@ -185,7 +185,7 @@ abstract class AbstractDictionary { * @param carray character array * @return hashcode */ - public int hash2(char carray[]) { + public int hash2(char[] carray) { int hash = 5381; /* hash 33 + c */ diff --git a/lucene/analysis/smartcn/src/java/org/apache/lucene/analysis/cn/smart/hhmm/BigramDictionary.java b/lucene/analysis/smartcn/src/java/org/apache/lucene/analysis/cn/smart/hhmm/BigramDictionary.java index f0149d4978b..b06eefee83b 100644 --- a/lucene/analysis/smartcn/src/java/org/apache/lucene/analysis/cn/smart/hhmm/BigramDictionary.java +++ b/lucene/analysis/smartcn/src/java/org/apache/lucene/analysis/cn/smart/hhmm/BigramDictionary.java @@ -181,7 +181,7 @@ class BigramDictionary extends AbstractDictionary { if (i != 3755 + GB2312_FIRST_CHAR) { tmpword = currentStr + tmpword; } - char carray[] = tmpword.toCharArray(); + char[] carray = tmpword.toCharArray(); long hashId = hash1(carray); int index = getAvaliableIndex(hashId, carray); if (index != -1) { @@ -199,7 +199,7 @@ class BigramDictionary extends AbstractDictionary { // log.info("load dictionary done! " + dctFilePath + " total:" + total); } - private int getAvaliableIndex(long hashId, char carray[]) { + private int getAvaliableIndex(long hashId, char[] carray) { int hash1 = (int) (hashId % PRIME_BIGRAM_LENGTH); int hash2 = hash2(carray) % PRIME_BIGRAM_LENGTH; if (hash1 < 0) hash1 = PRIME_BIGRAM_LENGTH + hash1; @@ -223,7 +223,7 @@ class BigramDictionary extends AbstractDictionary { /* * lookup the index into the frequency array. */ - private int getBigramItemIndex(char carray[]) { + private int getBigramItemIndex(char[] carray) { long hashId = hash1(carray); int hash1 = (int) (hashId % PRIME_BIGRAM_LENGTH); int hash2 = hash2(carray) % PRIME_BIGRAM_LENGTH; diff --git a/lucene/analysis/smartcn/src/test/org/apache/lucene/analysis/cn/smart/TestSmartChineseAnalyzer.java b/lucene/analysis/smartcn/src/test/org/apache/lucene/analysis/cn/smart/TestSmartChineseAnalyzer.java index 447aa733e81..79a89b0f820 100644 --- a/lucene/analysis/smartcn/src/test/org/apache/lucene/analysis/cn/smart/TestSmartChineseAnalyzer.java +++ b/lucene/analysis/smartcn/src/test/org/apache/lucene/analysis/cn/smart/TestSmartChineseAnalyzer.java @@ -28,7 +28,7 @@ public class TestSmartChineseAnalyzer extends BaseTokenStreamTestCase { public void testChineseStopWordsDefault() throws Exception { Analyzer ca = new SmartChineseAnalyzer(); /* will load stopwords */ String sentence = "我购买了道具和服装。"; - String result[] = {"我", "购买", "了", "道具", "和", "服装"}; + String[] result = {"我", "购买", "了", "道具", "和", "服装"}; assertAnalyzesTo(ca, sentence, result); ca.close(); // set stop-words from the outer world - must yield same behavior @@ -44,7 +44,7 @@ public class TestSmartChineseAnalyzer extends BaseTokenStreamTestCase { public void testChineseStopWordsDefaultTwoPhrases() throws Exception { Analyzer ca = new SmartChineseAnalyzer(); /* will load stopwords */ String sentence = "我购买了道具和服装。 我购买了道具和服装。"; - String result[] = {"我", "购买", "了", "道具", "和", "服装", "我", "购买", "了", "道具", "和", "服装"}; + String[] result = {"我", "购买", "了", "道具", "和", "服装", "我", "购买", "了", "道具", "和", "服装"}; assertAnalyzesTo(ca, sentence, result); ca.close(); } @@ -65,7 +65,7 @@ public class TestSmartChineseAnalyzer extends BaseTokenStreamTestCase { "\uD872\uDF2D", "\u9FD4") .collect(Collectors.joining()); - String result[] = { + String[] result = { "\uD872\uDF3B", "\uD872\uDF4A", "\uD872\uDF73", @@ -86,7 +86,7 @@ public class TestSmartChineseAnalyzer extends BaseTokenStreamTestCase { public void testChineseStopWordsDefaultTwoPhrasesIdeoSpace() throws Exception { Analyzer ca = new SmartChineseAnalyzer(); /* will load stopwords */ String sentence = "我购买了道具和服装 我购买了道具和服装。"; - String result[] = {"我", "购买", "了", "道具", "和", "服装", "我", "购买", "了", "道具", "和", "服装"}; + String[] result = {"我", "购买", "了", "道具", "和", "服装", "我", "购买", "了", "道具", "和", "服装"}; assertAnalyzesTo(ca, sentence, result); ca.close(); } @@ -104,7 +104,7 @@ public class TestSmartChineseAnalyzer extends BaseTokenStreamTestCase { new SmartChineseAnalyzer(null) /* sets stopwords to empty set */ }; String sentence = "我购买了道具和服装。"; - String result[] = {"我", "购买", "了", "道具", "和", "服装", ","}; + String[] result = {"我", "购买", "了", "道具", "和", "服装", ","}; for (Analyzer analyzer : analyzers) { assertAnalyzesTo(analyzer, sentence, result); assertAnalyzesTo(analyzer, sentence, result); @@ -119,10 +119,10 @@ public class TestSmartChineseAnalyzer extends BaseTokenStreamTestCase { public void testChineseStopWords2() throws Exception { Analyzer ca = new SmartChineseAnalyzer(); /* will load stopwords */ String sentence = "Title:San"; // : is a stopword - String result[] = {"titl", "san"}; - int startOffsets[] = {0, 6}; - int endOffsets[] = {5, 9}; - int posIncr[] = {1, 2}; + String[] result = {"titl", "san"}; + int[] startOffsets = {0, 6}; + int[] endOffsets = {5, 9}; + int[] posIncr = {1, 2}; assertAnalyzesTo(ca, sentence, result, startOffsets, endOffsets, posIncr); ca.close(); } diff --git a/lucene/analysis/stempel/src/java/org/egothor/stemmer/Compile.java b/lucene/analysis/stempel/src/java/org/egothor/stemmer/Compile.java index 1d9ebb014e8..b6d1afe9639 100644 --- a/lucene/analysis/stempel/src/java/org/egothor/stemmer/Compile.java +++ b/lucene/analysis/stempel/src/java/org/egothor/stemmer/Compile.java @@ -107,7 +107,7 @@ public class Compile { String charset = System.getProperty("egothor.stemmer.charset", "UTF-8"); - char optimizer[] = new char[args[0].length() - qq]; + char[] optimizer = new char[args[0].length() - qq]; for (int i = 0; i < optimizer.length; i++) { optimizer[i] = args[0].charAt(qq + i); } diff --git a/lucene/analysis/stempel/src/java/org/egothor/stemmer/Diff.java b/lucene/analysis/stempel/src/java/org/egothor/stemmer/Diff.java index f8e345d8aec..9f12b76c6db 100644 --- a/lucene/analysis/stempel/src/java/org/egothor/stemmer/Diff.java +++ b/lucene/analysis/stempel/src/java/org/egothor/stemmer/Diff.java @@ -65,8 +65,8 @@ package org.egothor.stemmer; public class Diff { int sizex = 0; int sizey = 0; - int net[][]; - int way[][]; + int[][] net; + int[][] way; int INSERT; int DELETE; @@ -167,7 +167,7 @@ public class Diff { int y; int maxx; int maxy; - int go[] = new int[4]; + int[] go = new int[4]; final int X = 1; final int Y = 2; final int R = 3; diff --git a/lucene/analysis/stempel/src/java/org/egothor/stemmer/Gener.java b/lucene/analysis/stempel/src/java/org/egothor/stemmer/Gener.java index 4d4618317bd..8b2144d56f1 100644 --- a/lucene/analysis/stempel/src/java/org/egothor/stemmer/Gener.java +++ b/lucene/analysis/stempel/src/java/org/egothor/stemmer/Gener.java @@ -78,7 +78,7 @@ public class Gener extends Reduce { List cmds = orig.cmds; List rows = new ArrayList<>(); List orows = orig.rows; - int remap[] = new int[orows.size()]; + int[] remap = new int[orows.size()]; Arrays.fill(remap, 1); for (int j = orows.size() - 1; j >= 0; j--) { @@ -101,7 +101,7 @@ public class Gener extends Reduce { * @return true if the Row should remain, false * otherwise */ - public boolean eat(Row in, int remap[]) { + public boolean eat(Row in, int[] remap) { int sum = 0; for (Iterator i = in.cells.values().iterator(); i.hasNext(); ) { Cell c = i.next(); diff --git a/lucene/analysis/stempel/src/java/org/egothor/stemmer/Lift.java b/lucene/analysis/stempel/src/java/org/egothor/stemmer/Lift.java index afe8b720ff2..3e7ee057734 100644 --- a/lucene/analysis/stempel/src/java/org/egothor/stemmer/Lift.java +++ b/lucene/analysis/stempel/src/java/org/egothor/stemmer/Lift.java @@ -89,7 +89,7 @@ public class Lift extends Reduce { List cmds = orig.cmds; List rows = new ArrayList<>(); List orows = orig.rows; - int remap[] = new int[orows.size()]; + int[] remap = new int[orows.size()]; for (int j = orows.size() - 1; j >= 0; j--) { liftUp(orows.get(j), orows); diff --git a/lucene/analysis/stempel/src/java/org/egothor/stemmer/MultiTrie2.java b/lucene/analysis/stempel/src/java/org/egothor/stemmer/MultiTrie2.java index 3f706005323..21f432fb7cf 100644 --- a/lucene/analysis/stempel/src/java/org/egothor/stemmer/MultiTrie2.java +++ b/lucene/analysis/stempel/src/java/org/egothor/stemmer/MultiTrie2.java @@ -98,7 +98,7 @@ public class MultiTrie2 extends MultiTrie { StringBuilder result = new StringBuilder(tries.size() * 2); try { CharSequence lastkey = key; - CharSequence p[] = new CharSequence[tries.size()]; + CharSequence[] p = new CharSequence[tries.size()]; char lastch = ' '; for (int i = 0; i < tries.size(); i++) { CharSequence r = tries.get(i).getFully(lastkey); @@ -142,7 +142,7 @@ public class MultiTrie2 extends MultiTrie { StringBuilder result = new StringBuilder(tries.size() * 2); try { CharSequence lastkey = key; - CharSequence p[] = new CharSequence[tries.size()]; + CharSequence[] p = new CharSequence[tries.size()]; char lastch = ' '; for (int i = 0; i < tries.size(); i++) { CharSequence r = tries.get(i).getLastOnPath(lastkey); @@ -201,7 +201,7 @@ public class MultiTrie2 extends MultiTrie { return; } // System.err.println( cmd ); - CharSequence p[] = decompose(cmd); + CharSequence[] p = decompose(cmd); int levels = p.length; // System.err.println("levels "+key+" cmd "+cmd+"|"+levels); while (levels >= tries.size()) { @@ -255,7 +255,7 @@ public class MultiTrie2 extends MultiTrie { } } - CharSequence part[] = new CharSequence[parts]; + CharSequence[] part = new CharSequence[parts]; int x = 0; for (int i = 0; 0 <= i && i < cmd.length(); ) { diff --git a/lucene/analysis/stempel/src/java/org/egothor/stemmer/Optimizer.java b/lucene/analysis/stempel/src/java/org/egothor/stemmer/Optimizer.java index 4d6cedbbe10..f22d658039d 100644 --- a/lucene/analysis/stempel/src/java/org/egothor/stemmer/Optimizer.java +++ b/lucene/analysis/stempel/src/java/org/egothor/stemmer/Optimizer.java @@ -79,7 +79,7 @@ public class Optimizer extends Reduce { List cmds = orig.cmds; List rows = new ArrayList<>(); List orows = orig.rows; - int remap[] = new int[orows.size()]; + int[] remap = new int[orows.size()]; for (int j = orows.size() - 1; j >= 0; j--) { Row now = new Remap(orows.get(j), remap); diff --git a/lucene/analysis/stempel/src/java/org/egothor/stemmer/Reduce.java b/lucene/analysis/stempel/src/java/org/egothor/stemmer/Reduce.java index 783f1198bef..da6a3c09f7a 100644 --- a/lucene/analysis/stempel/src/java/org/egothor/stemmer/Reduce.java +++ b/lucene/analysis/stempel/src/java/org/egothor/stemmer/Reduce.java @@ -75,7 +75,7 @@ public class Reduce { List cmds = orig.cmds; List rows = new ArrayList<>(); List orows = orig.rows; - int remap[] = new int[orows.size()]; + int[] remap = new int[orows.size()]; Arrays.fill(remap, -1); rows = removeGaps(orig.root, rows, new ArrayList(), remap); @@ -83,7 +83,7 @@ public class Reduce { return new Trie(orig.forward, remap[orig.root], cmds, rows); } - List removeGaps(int ind, List old, List to, int remap[]) { + List removeGaps(int ind, List old, List to, int[] remap) { remap[ind] = to.size(); Row now = old.get(ind); @@ -107,7 +107,7 @@ public class Reduce { * @param old Description of the Parameter * @param remap Description of the Parameter */ - public Remap(Row old, int remap[]) { + public Remap(Row old, int[] remap) { super(); Iterator i = old.cells.keySet().iterator(); for (; i.hasNext(); ) { diff --git a/lucene/analysis/stempel/src/java/org/egothor/stemmer/Trie.java b/lucene/analysis/stempel/src/java/org/egothor/stemmer/Trie.java index f09656e27fb..8251e4c4b78 100644 --- a/lucene/analysis/stempel/src/java/org/egothor/stemmer/Trie.java +++ b/lucene/analysis/stempel/src/java/org/egothor/stemmer/Trie.java @@ -126,7 +126,7 @@ public class Trie { * @return The all value */ public CharSequence[] getAll(CharSequence key) { - int res[] = new int[key.length()]; + int[] res = new int[key.length()]; int resc = 0; Row now = getRow(root); int w; @@ -175,7 +175,7 @@ public class Trie { if (resc < 1) { return null; } - CharSequence R[] = new CharSequence[resc]; + CharSequence[] R = new CharSequence[resc]; for (int j = 0; j < resc; j++) { R[j] = cmds.get(res[j]); } diff --git a/lucene/analysis/stempel/src/test/org/egothor/stemmer/TestStemmer.java b/lucene/analysis/stempel/src/test/org/egothor/stemmer/TestStemmer.java index 481fdfd2b3c..b0d06c4d27f 100644 --- a/lucene/analysis/stempel/src/test/org/egothor/stemmer/TestStemmer.java +++ b/lucene/analysis/stempel/src/test/org/egothor/stemmer/TestStemmer.java @@ -62,8 +62,8 @@ public class TestStemmer extends LuceneTestCase { public void testTrie() { Trie t = new Trie(true); - String keys[] = {"a", "ba", "bb", "c"}; - String vals[] = {"1", "2", "2", "4"}; + String[] keys = {"a", "ba", "bb", "c"}; + String[] vals = {"1", "2", "2", "4"}; for (int i = 0; i < keys.length; i++) { t.add(keys[i], vals[i]); @@ -78,8 +78,8 @@ public class TestStemmer extends LuceneTestCase { public void testTrieBackwards() { Trie t = new Trie(false); - String keys[] = {"a", "ba", "bb", "c"}; - String vals[] = {"1", "2", "2", "4"}; + String[] keys = {"a", "ba", "bb", "c"}; + String[] vals = {"1", "2", "2", "4"}; for (int i = 0; i < keys.length; i++) { t.add(keys[i], vals[i]); @@ -91,8 +91,8 @@ public class TestStemmer extends LuceneTestCase { public void testMultiTrie() { Trie t = new MultiTrie(true); - String keys[] = {"a", "ba", "bb", "c"}; - String vals[] = {"1", "2", "2", "4"}; + String[] keys = {"a", "ba", "bb", "c"}; + String[] vals = {"1", "2", "2", "4"}; for (int i = 0; i < keys.length; i++) { t.add(keys[i], vals[i]); @@ -104,8 +104,8 @@ public class TestStemmer extends LuceneTestCase { public void testMultiTrieBackwards() { Trie t = new MultiTrie(false); - String keys[] = {"a", "ba", "bb", "c"}; - String vals[] = {"1", "2", "2", "4"}; + String[] keys = {"a", "ba", "bb", "c"}; + String[] vals = {"1", "2", "2", "4"}; for (int i = 0; i < keys.length; i++) { t.add(keys[i], vals[i]); @@ -117,12 +117,12 @@ public class TestStemmer extends LuceneTestCase { public void testMultiTrie2() { Trie t = new MultiTrie2(true); - String keys[] = {"a", "ba", "bb", "c"}; + String[] keys = {"a", "ba", "bb", "c"}; /* * short vals won't work, see line 155 for example * the IOOBE is caught (wierd), but shouldnt affect patch cmds? */ - String vals[] = {"1111", "2222", "2223", "4444"}; + String[] vals = {"1111", "2222", "2223", "4444"}; for (int i = 0; i < keys.length; i++) { t.add(keys[i], vals[i]); @@ -134,12 +134,12 @@ public class TestStemmer extends LuceneTestCase { public void testMultiTrie2Backwards() { Trie t = new MultiTrie2(false); - String keys[] = {"a", "ba", "bb", "c"}; + String[] keys = {"a", "ba", "bb", "c"}; /* * short vals won't work, see line 155 for example * the IOOBE is caught (wierd), but shouldnt affect patch cmds? */ - String vals[] = {"1111", "2222", "2223", "4444"}; + String[] vals = {"1111", "2222", "2223", "4444"}; for (int i = 0; i < keys.length; i++) { t.add(keys[i], vals[i]); @@ -148,7 +148,7 @@ public class TestStemmer extends LuceneTestCase { assertTrieContents(t, keys, vals); } - private static void assertTrieContents(Trie trie, String keys[], String vals[]) { + private static void assertTrieContents(Trie trie, String[] keys, String[] vals) { Trie[] tries = new Trie[] { trie, diff --git a/lucene/backward-codecs/src/java/org/apache/lucene/backward_codecs/lucene50/Lucene50LiveDocsFormat.java b/lucene/backward-codecs/src/java/org/apache/lucene/backward_codecs/lucene50/Lucene50LiveDocsFormat.java index 5b1571a704c..b0187975742 100644 --- a/lucene/backward-codecs/src/java/org/apache/lucene/backward_codecs/lucene50/Lucene50LiveDocsFormat.java +++ b/lucene/backward-codecs/src/java/org/apache/lucene/backward_codecs/lucene50/Lucene50LiveDocsFormat.java @@ -101,7 +101,7 @@ public final class Lucene50LiveDocsFormat extends LiveDocsFormat { } private FixedBitSet readFixedBitSet(IndexInput input, int length) throws IOException { - long data[] = new long[FixedBitSet.bits2words(length)]; + long[] data = new long[FixedBitSet.bits2words(length)]; for (int i = 0; i < data.length; i++) { data[i] = input.readLong(); } diff --git a/lucene/backward-codecs/src/java/org/apache/lucene/backward_codecs/lucene50/Lucene50SkipReader.java b/lucene/backward-codecs/src/java/org/apache/lucene/backward_codecs/lucene50/Lucene50SkipReader.java index 5824c36a448..a74eacdd21d 100644 --- a/lucene/backward-codecs/src/java/org/apache/lucene/backward_codecs/lucene50/Lucene50SkipReader.java +++ b/lucene/backward-codecs/src/java/org/apache/lucene/backward_codecs/lucene50/Lucene50SkipReader.java @@ -52,11 +52,11 @@ import org.apache.lucene.store.IndexInput; */ class Lucene50SkipReader extends MultiLevelSkipListReader { private final int version; - private long docPointer[]; - private long posPointer[]; - private long payPointer[]; - private int posBufferUpto[]; - private int payloadByteUpto[]; + private long[] docPointer; + private long[] posPointer; + private long[] payPointer; + private int[] posBufferUpto; + private int[] payloadByteUpto; private long lastPosPointer; private long lastPayPointer; diff --git a/lucene/backward-codecs/src/java/org/apache/lucene/backward_codecs/lucene84/Lucene84SkipReader.java b/lucene/backward-codecs/src/java/org/apache/lucene/backward_codecs/lucene84/Lucene84SkipReader.java index 8b40d4acd4e..78b49e28195 100644 --- a/lucene/backward-codecs/src/java/org/apache/lucene/backward_codecs/lucene84/Lucene84SkipReader.java +++ b/lucene/backward-codecs/src/java/org/apache/lucene/backward_codecs/lucene84/Lucene84SkipReader.java @@ -49,11 +49,11 @@ import org.apache.lucene.store.IndexInput; *

    Therefore, we'll trim df before passing it to the interface. see trim(int) */ class Lucene84SkipReader extends MultiLevelSkipListReader { - private long docPointer[]; - private long posPointer[]; - private long payPointer[]; - private int posBufferUpto[]; - private int payloadByteUpto[]; + private long[] docPointer; + private long[] posPointer; + private long[] payPointer; + private int[] posBufferUpto; + private int[] payloadByteUpto; private long lastPosPointer; private long lastPayPointer; diff --git a/lucene/backward-codecs/src/java/org/apache/lucene/backward_codecs/packed/LegacyDirectWriter.java b/lucene/backward-codecs/src/java/org/apache/lucene/backward_codecs/packed/LegacyDirectWriter.java index 320fc2b1c1e..124bd7feacb 100644 --- a/lucene/backward-codecs/src/java/org/apache/lucene/backward_codecs/packed/LegacyDirectWriter.java +++ b/lucene/backward-codecs/src/java/org/apache/lucene/backward_codecs/packed/LegacyDirectWriter.java @@ -172,6 +172,6 @@ public final class LegacyDirectWriter { return roundBits(PackedInts.unsignedBitsRequired(maxValue)); } - static final int SUPPORTED_BITS_PER_VALUE[] = + static final int[] SUPPORTED_BITS_PER_VALUE = new int[] {1, 2, 4, 8, 12, 16, 20, 24, 28, 32, 40, 48, 56, 64}; } diff --git a/lucene/backward-codecs/src/test/org/apache/lucene/backward_codecs/lucene50/TestBlockPostingsFormat3.java b/lucene/backward-codecs/src/test/org/apache/lucene/backward_codecs/lucene50/TestBlockPostingsFormat3.java index db979191bd6..71b3c82a397 100644 --- a/lucene/backward-codecs/src/test/org/apache/lucene/backward_codecs/lucene50/TestBlockPostingsFormat3.java +++ b/lucene/backward-codecs/src/test/org/apache/lucene/backward_codecs/lucene50/TestBlockPostingsFormat3.java @@ -235,7 +235,7 @@ public class TestBlockPostingsFormat3 extends LuceneTestCase { } } else if (code == 2) { // term, but ensure a non-zero offset - byte newbytes[] = new byte[term.length + 5]; + byte[] newbytes = new byte[term.length + 5]; System.arraycopy(term.bytes, term.offset, newbytes, 5, term.length); tests.add(new BytesRef(newbytes, 5, term.length)); } diff --git a/lucene/backward-codecs/src/test/org/apache/lucene/backward_codecs/lucene70/TestLucene70DocValuesFormat.java b/lucene/backward-codecs/src/test/org/apache/lucene/backward_codecs/lucene70/TestLucene70DocValuesFormat.java index 47c41be0beb..894b2c151f4 100644 --- a/lucene/backward-codecs/src/test/org/apache/lucene/backward_codecs/lucene70/TestLucene70DocValuesFormat.java +++ b/lucene/backward-codecs/src/test/org/apache/lucene/backward_codecs/lucene70/TestLucene70DocValuesFormat.java @@ -596,7 +596,7 @@ public class TestLucene70DocValuesFormat extends BaseCompressingDocValuesFormatT Document doc = new Document(); int valueCount = (int) counts.getAsLong(); - long valueArray[] = new long[valueCount]; + long[] valueArray = new long[valueCount]; for (int j = 0; j < valueCount; j++) { long value = values.getAsLong(); valueArray[j] = value; @@ -625,11 +625,11 @@ public class TestLucene70DocValuesFormat extends BaseCompressingDocValuesFormatT if (i > docValues.docID()) { docValues.nextDoc(); } - String expected[] = r.document(i).getValues("stored"); + String[] expected = r.document(i).getValues("stored"); if (i < docValues.docID()) { assertEquals(0, expected.length); } else { - String actual[] = new String[docValues.docValueCount()]; + String[] actual = new String[docValues.docValueCount()]; for (int j = 0; j < actual.length; j++) { actual[j] = Long.toString(docValues.nextValue()); } diff --git a/lucene/backward-codecs/src/test/org/apache/lucene/backward_codecs/lucene80/BaseLucene80DocValuesFormatTestCase.java b/lucene/backward-codecs/src/test/org/apache/lucene/backward_codecs/lucene80/BaseLucene80DocValuesFormatTestCase.java index 0473375732d..5ae55493f70 100644 --- a/lucene/backward-codecs/src/test/org/apache/lucene/backward_codecs/lucene80/BaseLucene80DocValuesFormatTestCase.java +++ b/lucene/backward-codecs/src/test/org/apache/lucene/backward_codecs/lucene80/BaseLucene80DocValuesFormatTestCase.java @@ -645,7 +645,7 @@ public abstract class BaseLucene80DocValuesFormatTestCase Document doc = new Document(); int valueCount = (int) counts.getAsLong(); - long valueArray[] = new long[valueCount]; + long[] valueArray = new long[valueCount]; for (int j = 0; j < valueCount; j++) { long value = values.getAsLong(); valueArray[j] = value; @@ -675,12 +675,12 @@ public abstract class BaseLucene80DocValuesFormatTestCase if (i > docValues.docID()) { docValues.nextDoc(); } - String expectedStored[] = r.document(i).getValues("stored"); + String[] expectedStored = r.document(i).getValues("stored"); if (i < docValues.docID()) { assertEquals(0, expectedStored.length); } else { long[] readValueArray = new long[docValues.docValueCount()]; - String actualDocValue[] = new String[docValues.docValueCount()]; + String[] actualDocValue = new String[docValues.docValueCount()]; for (int j = 0; j < docValues.docValueCount(); ++j) { long actualDV = docValues.nextValue(); readValueArray[j] = actualDV; diff --git a/lucene/backward-codecs/src/test/org/apache/lucene/backward_codecs/packed/TestLegacyDirectPacked.java b/lucene/backward-codecs/src/test/org/apache/lucene/backward_codecs/packed/TestLegacyDirectPacked.java index eb71a8b5201..dca7293a620 100644 --- a/lucene/backward-codecs/src/test/org/apache/lucene/backward_codecs/packed/TestLegacyDirectPacked.java +++ b/lucene/backward-codecs/src/test/org/apache/lucene/backward_codecs/packed/TestLegacyDirectPacked.java @@ -97,7 +97,7 @@ public class TestLegacyDirectPacked extends LuceneTestCase { MyRandom random = new MyRandom(random().nextLong()); int numIters = TEST_NIGHTLY ? 100 : 10; for (int i = 0; i < numIters; i++) { - long original[] = randomLongs(random, bpv); + long[] original = randomLongs(random, bpv); int bitsRequired = bpv == 64 ? 64 : LegacyDirectWriter.bitsRequired(1L << (bpv - 1)); String name = "bpv" + bpv + "_" + i; IndexOutput output = EndiannessReverserUtil.createOutput(directory, name, IOContext.DEFAULT); @@ -124,7 +124,7 @@ public class TestLegacyDirectPacked extends LuceneTestCase { private long[] randomLongs(MyRandom random, int bpv) { int amount = random.nextInt(5000); - long longs[] = new long[amount]; + long[] longs = new long[amount]; for (int i = 0; i < longs.length; i++) { longs[i] = random.nextLong(bpv); } @@ -133,7 +133,7 @@ public class TestLegacyDirectPacked extends LuceneTestCase { // java.util.Random only returns 48bits of randomness in nextLong... static class MyRandom extends Random { - byte buffer[] = new byte[8]; + byte[] buffer = new byte[8]; ByteArrayDataInput input = new ByteArrayDataInput(); MyRandom(long seed) { diff --git a/lucene/backward-codecs/src/test/org/apache/lucene/backward_index/TestBackwardsCompatibility.java b/lucene/backward-codecs/src/test/org/apache/lucene/backward_index/TestBackwardsCompatibility.java index 4b1c4057548..3b5eaf1b2c0 100644 --- a/lucene/backward-codecs/src/test/org/apache/lucene/backward_index/TestBackwardsCompatibility.java +++ b/lucene/backward-codecs/src/test/org/apache/lucene/backward_index/TestBackwardsCompatibility.java @@ -1142,7 +1142,7 @@ public class TestBackwardsCompatibility extends LuceneTestCase { assertEquals(i, dvByte.nextDoc()); assertEquals(id, dvByte.longValue()); - byte bytes[] = + byte[] bytes = new byte[] {(byte) (id >>> 24), (byte) (id >>> 16), (byte) (id >>> 8), (byte) id}; BytesRef expectedRef = new BytesRef(bytes); @@ -1426,7 +1426,7 @@ public class TestBackwardsCompatibility extends LuceneTestCase { // add docvalues fields doc.add(new NumericDocValuesField("dvByte", (byte) id)); - byte bytes[] = + byte[] bytes = new byte[] {(byte) (id >>> 24), (byte) (id >>> 16), (byte) (id >>> 8), (byte) id}; BytesRef ref = new BytesRef(bytes); doc.add(new BinaryDocValuesField("dvBytesDerefFixed", ref)); diff --git a/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/SimpleSloppyPhraseQueryMaker.java b/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/SimpleSloppyPhraseQueryMaker.java index 66dc1b3a184..790f8647682 100644 --- a/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/SimpleSloppyPhraseQueryMaker.java +++ b/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/SimpleSloppyPhraseQueryMaker.java @@ -33,7 +33,7 @@ public class SimpleSloppyPhraseQueryMaker extends SimpleQueryMaker { @Override protected Query[] prepareQueries() throws Exception { // extract some 100 words from doc text to an array - String words[]; + String[] words; ArrayList w = new ArrayList<>(); StringTokenizer st = new StringTokenizer(SingleDocSource.DOC_TEXT); while (st.hasMoreTokens() && w.size() < 100) { diff --git a/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/TrecContentSource.java b/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/TrecContentSource.java index f1e79654862..5993b1950c9 100644 --- a/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/TrecContentSource.java +++ b/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/TrecContentSource.java @@ -67,7 +67,7 @@ public class TrecContentSource extends ContentSource { /** separator between lines in the byffer */ public static final String NEW_LINE = System.getProperty("line.separator"); - private static final String DATE_FORMATS[] = { + private static final String[] DATE_FORMATS = { "EEE, dd MMM yyyy kk:mm:ss z", // Tue, 09 Dec 2003 22:39:08 GMT "EEE MMM dd kk:mm:ss yyyy z", // Tue Dec 09 16:45:08 2003 EST "EEE, dd-MMM-':'y kk:mm:ss z", // Tue, 09 Dec 2003 22:39:08 GMT diff --git a/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/TrecDocParser.java b/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/TrecDocParser.java index 4114364e4d0..23e21d997e2 100644 --- a/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/TrecDocParser.java +++ b/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/TrecDocParser.java @@ -124,7 +124,7 @@ public abstract class TrecDocParser { * @return text of interest or null if not found */ public static String extract( - StringBuilder buf, String startTag, String endTag, int maxPos, String noisePrefixes[]) { + StringBuilder buf, String startTag, String endTag, int maxPos, String[] noisePrefixes) { int k1 = buf.indexOf(startTag); if (k1 >= 0 && (maxPos < 0 || k1 < maxPos)) { k1 += startTag.length(); diff --git a/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/AddIndexesTask.java b/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/AddIndexesTask.java index c7462cb1e4b..a39f9742b98 100644 --- a/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/AddIndexesTask.java +++ b/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/AddIndexesTask.java @@ -64,7 +64,7 @@ public class AddIndexesTask extends PerfTask { writer.addIndexes(inputDir); } else { try (IndexReader r = DirectoryReader.open(inputDir)) { - CodecReader leaves[] = new CodecReader[r.leaves().size()]; + CodecReader[] leaves = new CodecReader[r.leaves().size()]; int i = 0; for (LeafReaderContext leaf : r.leaves()) { leaves[i++] = SlowCodecReaderWrapper.wrap(leaf.reader()); diff --git a/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/PerfTask.java b/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/PerfTask.java index 532187b2b41..fa998ff16ac 100644 --- a/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/PerfTask.java +++ b/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/PerfTask.java @@ -186,7 +186,7 @@ public abstract class PerfTask implements Cloneable { // compute a blank string padding for printing this task indented by its depth String getPadding() { - char c[] = new char[4 * getDepth()]; + char[] c = new char[4 * getDepth()]; for (int i = 0; i < c.length; i++) c[i] = ' '; return new String(c); } diff --git a/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/ReportTask.java b/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/ReportTask.java index fe8e3c92558..4d379fa1e0b 100644 --- a/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/ReportTask.java +++ b/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/ReportTask.java @@ -62,7 +62,7 @@ public abstract class ReportTask extends PerfTask { protected static final String ELAPSED = " elapsedSec"; protected static final String USEDMEM = " avgUsedMem"; protected static final String TOTMEM = " avgTotalMem"; - protected static final String COLS[] = {RUNCNT, RECCNT, RECSEC, ELAPSED, USEDMEM, TOTMEM}; + protected static final String[] COLS = {RUNCNT, RECCNT, RECSEC, ELAPSED, USEDMEM, TOTMEM}; /** * Compute a title line for a report table diff --git a/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/TaskSequence.java b/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/TaskSequence.java index bb049d490ff..0292b954b7f 100644 --- a/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/TaskSequence.java +++ b/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/TaskSequence.java @@ -340,7 +340,7 @@ public class TaskSequence extends PerfTask { final TaskStats stats = getRunData().getPoints().getCurrentStats(); initTasksArray(); - ParallelTask t[] = runningParallelTasks = new ParallelTask[repetitions * tasks.size()]; + ParallelTask[] t = runningParallelTasks = new ParallelTask[repetitions * tasks.size()]; // Get number of parallel threads from algo file and set it to use in ReuersContentSource.java's // docCountArrInit() this.getRunData().getConfig().setNumThreads(t.length); diff --git a/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/utils/Config.java b/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/utils/Config.java index 5eafb553fcf..041f5e43838 100644 --- a/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/utils/Config.java +++ b/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/utils/Config.java @@ -141,7 +141,7 @@ public class Config { * @return a string property. */ public String get(String name, String dflt) { - String vals[] = (String[]) valByRound.get(name); + String[] vals = (String[]) valByRound.get(name); if (vals != null) { return vals[roundNumber % vals.length]; } @@ -191,7 +191,7 @@ public class Config { */ public int get(String name, int dflt) { // use value by round if already parsed - int vals[] = (int[]) valByRound.get(name); + int[] vals = (int[]) valByRound.get(name); if (vals != null) { return vals[roundNumber % vals.length]; } @@ -221,7 +221,7 @@ public class Config { */ public double get(String name, double dflt) { // use value by round if already parsed - double vals[] = (double[]) valByRound.get(name); + double[] vals = (double[]) valByRound.get(name); if (vals != null) { return vals[roundNumber % vals.length]; } @@ -251,7 +251,7 @@ public class Config { */ public boolean get(String name, boolean dflt) { // use value by round if already parsed - boolean vals[] = (boolean[]) valByRound.get(name); + boolean[] vals = (boolean[]) valByRound.get(name); if (vals != null) { return vals[roundNumber % vals.length]; } @@ -288,22 +288,22 @@ public class Config { final String name = entry.getKey(); Object a = entry.getValue(); if (a instanceof int[]) { - int ai[] = (int[]) a; + int[] ai = (int[]) a; int n1 = (roundNumber - 1) % ai.length; int n2 = roundNumber % ai.length; sb.append(" ").append(name).append(":").append(ai[n1]).append("-->").append(ai[n2]); } else if (a instanceof double[]) { - double ad[] = (double[]) a; + double[] ad = (double[]) a; int n1 = (roundNumber - 1) % ad.length; int n2 = roundNumber % ad.length; sb.append(" ").append(name).append(":").append(ad[n1]).append("-->").append(ad[n2]); } else if (a instanceof String[]) { - String ad[] = (String[]) a; + String[] ad = (String[]) a; int n1 = (roundNumber - 1) % ad.length; int n2 = roundNumber % ad.length; sb.append(" ").append(name).append(":").append(ad[n1]).append("-->").append(ad[n2]); } else { - boolean ab[] = (boolean[]) a; + boolean[] ab = (boolean[]) a; int n1 = (roundNumber - 1) % ab.length; int n2 = roundNumber % ab.length; sb.append(" ").append(name).append(":").append(ab[n1]).append("-->").append(ab[n2]); @@ -344,7 +344,7 @@ public class Config { String t = st.nextToken(); a.add(Integer.valueOf(t)); } - int res[] = new int[a.size()]; + int[] res = new int[a.size()]; for (int i = 0; i < a.size(); i++) { res[i] = a.get(i).intValue(); } @@ -363,7 +363,7 @@ public class Config { String t = st.nextToken(); a.add(Double.valueOf(t)); } - double res[] = new double[a.size()]; + double[] res = new double[a.size()]; for (int i = 0; i < a.size(); i++) { res[i] = a.get(i).doubleValue(); } @@ -382,7 +382,7 @@ public class Config { String t = st.nextToken(); a.add(Boolean.valueOf(t)); } - boolean res[] = new boolean[a.size()]; + boolean[] res = new boolean[a.size()]; for (int i = 0; i < a.size(); i++) { res[i] = a.get(i).booleanValue(); } @@ -419,19 +419,19 @@ public class Config { // append actual values, for that round Object a = valByRound.get(valByRoundName); if (a instanceof int[]) { - int ai[] = (int[]) a; + int[] ai = (int[]) a; int n = roundNum % ai.length; sb.append(Format.format(ai[n], template)); } else if (a instanceof double[]) { - double ad[] = (double[]) a; + double[] ad = (double[]) a; int n = roundNum % ad.length; sb.append(Format.format(2, ad[n], template)); } else if (a instanceof String[]) { - String ad[] = (String[]) a; + String[] ad = (String[]) a; int n = roundNum % ad.length; sb.append(ad[n]); } else { - boolean ab[] = (boolean[]) a; + boolean[] ab = (boolean[]) a; int n = roundNum % ab.length; sb.append(Format.formatPaddLeft("" + ab[n], template)); } diff --git a/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/utils/Format.java b/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/utils/Format.java index bc2750574d6..e3e5f8e7afd 100644 --- a/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/utils/Format.java +++ b/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/utils/Format.java @@ -22,7 +22,7 @@ import java.util.Locale; /** Formatting utilities (for reports). */ public class Format { - private static NumberFormat numFormat[] = { + private static NumberFormat[] numFormat = { NumberFormat.getInstance(Locale.ROOT), NumberFormat.getInstance(Locale.ROOT), NumberFormat.getInstance(Locale.ROOT), diff --git a/lucene/benchmark/src/java/org/apache/lucene/benchmark/quality/Judge.java b/lucene/benchmark/src/java/org/apache/lucene/benchmark/quality/Judge.java index adafa85e2d5..6b35c5c5701 100644 --- a/lucene/benchmark/src/java/org/apache/lucene/benchmark/quality/Judge.java +++ b/lucene/benchmark/src/java/org/apache/lucene/benchmark/quality/Judge.java @@ -40,7 +40,7 @@ public interface Judge { * @param logger if not null, validation issues are logged. * @return true if perfectly valid, false if not. */ - public boolean validateData(QualityQuery qq[], PrintWriter logger); + public boolean validateData(QualityQuery[] qq, PrintWriter logger); /** * Return the maximal recall for the input quality query. It is the number of relevant docs this diff --git a/lucene/benchmark/src/java/org/apache/lucene/benchmark/quality/QualityBenchmark.java b/lucene/benchmark/src/java/org/apache/lucene/benchmark/quality/QualityBenchmark.java index 25ccb9bc32d..0b3eacb4484 100644 --- a/lucene/benchmark/src/java/org/apache/lucene/benchmark/quality/QualityBenchmark.java +++ b/lucene/benchmark/src/java/org/apache/lucene/benchmark/quality/QualityBenchmark.java @@ -42,7 +42,7 @@ import org.apache.lucene.search.TopDocs; public class QualityBenchmark { /** Quality Queries that this quality benchmark would execute. */ - protected QualityQuery qualityQueries[]; + protected QualityQuery[] qualityQueries; /** Parser for turning QualityQueries into Lucene Queries. */ protected QualityQueryParser qqParser; @@ -72,7 +72,7 @@ public class QualityBenchmark { * name for search results, and is important for judging the results. */ public QualityBenchmark( - QualityQuery qqs[], + QualityQuery[] qqs, QualityQueryParser qqParser, IndexSearcher searcher, String docNameField) { @@ -95,7 +95,7 @@ public class QualityBenchmark { public QualityStats[] execute(Judge judge, SubmissionReport submitRep, PrintWriter qualityLog) throws Exception { int nQueries = Math.min(maxQueries, qualityQueries.length); - QualityStats stats[] = new QualityStats[nQueries]; + QualityStats[] stats = new QualityStats[nQueries]; for (int i = 0; i < nQueries; i++) { QualityQuery qq = qualityQueries[i]; // generate query @@ -123,7 +123,7 @@ public class QualityBenchmark { QualityQuery qq, Query q, TopDocs td, Judge judge, PrintWriter logger, long searchTime) throws IOException { QualityStats stts = new QualityStats(judge.maxRecall(qq), searchTime); - ScoreDoc sd[] = td.scoreDocs; + ScoreDoc[] sd = td.scoreDocs; long t1 = System.currentTimeMillis(); // extraction of first doc name we measure also construction of // doc name extractor, just in case. diff --git a/lucene/benchmark/src/java/org/apache/lucene/benchmark/quality/QualityStats.java b/lucene/benchmark/src/java/org/apache/lucene/benchmark/quality/QualityStats.java index 9f7643b7c9d..bda1e55779f 100644 --- a/lucene/benchmark/src/java/org/apache/lucene/benchmark/quality/QualityStats.java +++ b/lucene/benchmark/src/java/org/apache/lucene/benchmark/quality/QualityStats.java @@ -29,7 +29,7 @@ public class QualityStats { private double maxGoodPoints; private double recall; - private double pAt[]; + private double[] pAt; private double pReleventSum = 0; private double numPoints = 0; private double numGoodPoints = 0; diff --git a/lucene/benchmark/src/java/org/apache/lucene/benchmark/quality/trec/QueryDriver.java b/lucene/benchmark/src/java/org/apache/lucene/benchmark/quality/trec/QueryDriver.java index 052561807ad..05354c12da8 100644 --- a/lucene/benchmark/src/java/org/apache/lucene/benchmark/quality/trec/QueryDriver.java +++ b/lucene/benchmark/src/java/org/apache/lucene/benchmark/quality/trec/QueryDriver.java @@ -70,7 +70,7 @@ public class QueryDriver { // use trec utilities to read trec topics into quality queries TrecTopicsReader qReader = new TrecTopicsReader(); - QualityQuery qqs[] = + QualityQuery[] qqs = qReader.readQueries(Files.newBufferedReader(topicsFile, StandardCharsets.UTF_8)); // prepare judge, with trec utilities that read from a QRels file @@ -90,7 +90,7 @@ public class QueryDriver { // run the benchmark QualityBenchmark qrun = new QualityBenchmark(qqs, qqParser, searcher, docNameField); qrun.setMaxResults(maxResults); - QualityStats stats[] = qrun.execute(judge, submitLog, logger); + QualityStats[] stats = qrun.execute(judge, submitLog, logger); // print an avarage sum of the results QualityStats avg = QualityStats.average(stats); diff --git a/lucene/benchmark/src/java/org/apache/lucene/benchmark/quality/trec/Trec1MQReader.java b/lucene/benchmark/src/java/org/apache/lucene/benchmark/quality/trec/Trec1MQReader.java index 97dd130ca18..4334f7aa8fb 100644 --- a/lucene/benchmark/src/java/org/apache/lucene/benchmark/quality/trec/Trec1MQReader.java +++ b/lucene/benchmark/src/java/org/apache/lucene/benchmark/quality/trec/Trec1MQReader.java @@ -82,7 +82,7 @@ public class Trec1MQReader { reader.close(); } // sort result array (by ID) - QualityQuery qq[] = res.toArray(new QualityQuery[0]); + QualityQuery[] qq = res.toArray(new QualityQuery[0]); Arrays.sort(qq); return qq; } diff --git a/lucene/benchmark/src/java/org/apache/lucene/benchmark/quality/trec/TrecTopicsReader.java b/lucene/benchmark/src/java/org/apache/lucene/benchmark/quality/trec/TrecTopicsReader.java index 5287f780692..3557d3e2e33 100644 --- a/lucene/benchmark/src/java/org/apache/lucene/benchmark/quality/trec/TrecTopicsReader.java +++ b/lucene/benchmark/src/java/org/apache/lucene/benchmark/quality/trec/TrecTopicsReader.java @@ -104,7 +104,7 @@ public class TrecTopicsReader { reader.close(); } // sort result array (by ID) - QualityQuery qq[] = res.toArray(new QualityQuery[0]); + QualityQuery[] qq = res.toArray(new QualityQuery[0]); Arrays.sort(qq); return qq; } diff --git a/lucene/benchmark/src/java/org/apache/lucene/benchmark/quality/utils/QualityQueriesFinder.java b/lucene/benchmark/src/java/org/apache/lucene/benchmark/quality/utils/QualityQueriesFinder.java index 21f87e5f497..8de01bf74b1 100644 --- a/lucene/benchmark/src/java/org/apache/lucene/benchmark/quality/utils/QualityQueriesFinder.java +++ b/lucene/benchmark/src/java/org/apache/lucene/benchmark/quality/utils/QualityQueriesFinder.java @@ -55,17 +55,17 @@ public class QualityQueriesFinder { System.exit(1); } QualityQueriesFinder qqf = new QualityQueriesFinder(FSDirectory.open(Paths.get(args[0]))); - String q[] = qqf.bestQueries("body", 20); + String[] q = qqf.bestQueries("body", 20); for (int i = 0; i < q.length; i++) { System.out.println(newline + formatQueryAsTrecTopic(i, q[i], null, null)); } } private String[] bestQueries(String field, int numQueries) throws IOException { - String words[] = bestTerms("body", 4 * numQueries); + String[] words = bestTerms("body", 4 * numQueries); int n = words.length; int m = n / 4; - String res[] = new String[m]; + String[] res = new String[m]; for (int i = 0; i < res.length; i++) { res[i] = words[i] + " " + words[m + i] + " " + words[n - 1 - m - i] + " " + words[n - 1 - i]; // System.out.println("query["+i+"]: "+res[i]); @@ -117,7 +117,7 @@ public class QualityQueriesFinder { } finally { ir.close(); } - String res[] = new String[pq.size()]; + String[] res = new String[pq.size()]; int i = 0; while (pq.size() > 0) { TermDf tdf = pq.pop(); diff --git a/lucene/benchmark/src/java/org/apache/lucene/benchmark/quality/utils/SimpleQQParser.java b/lucene/benchmark/src/java/org/apache/lucene/benchmark/quality/utils/SimpleQQParser.java index 3cc3abf613f..dc8fc48bfe8 100644 --- a/lucene/benchmark/src/java/org/apache/lucene/benchmark/quality/utils/SimpleQQParser.java +++ b/lucene/benchmark/src/java/org/apache/lucene/benchmark/quality/utils/SimpleQQParser.java @@ -32,7 +32,7 @@ import org.apache.lucene.search.Query; */ public class SimpleQQParser implements QualityQueryParser { - private String qqNames[]; + private String[] qqNames; private String indexField; ThreadLocal queryParser = new ThreadLocal<>(); @@ -42,7 +42,7 @@ public class SimpleQQParser implements QualityQueryParser { * @param qqNames name-value pairs of quality query to use for creating the query * @param indexField corresponding index field */ - public SimpleQQParser(String qqNames[], String indexField) { + public SimpleQQParser(String[] qqNames, String indexField) { this.qqNames = qqNames; this.indexField = indexField; } diff --git a/lucene/benchmark/src/java/org/apache/lucene/benchmark/quality/utils/SubmissionReport.java b/lucene/benchmark/src/java/org/apache/lucene/benchmark/quality/utils/SubmissionReport.java index 2364d6fc041..cbde9e9e2fe 100644 --- a/lucene/benchmark/src/java/org/apache/lucene/benchmark/quality/utils/SubmissionReport.java +++ b/lucene/benchmark/src/java/org/apache/lucene/benchmark/quality/utils/SubmissionReport.java @@ -63,7 +63,7 @@ public class SubmissionReport { if (logger == null) { return; } - ScoreDoc sd[] = td.scoreDocs; + ScoreDoc[] sd = td.scoreDocs; String sep = " \t "; DocNameExtractor xt = new DocNameExtractor(docNameField); for (int i = 0; i < sd.length; i++) { diff --git a/lucene/benchmark/src/test/org/apache/lucene/benchmark/BenchmarkTestCase.java b/lucene/benchmark/src/test/org/apache/lucene/benchmark/BenchmarkTestCase.java index fba86f8d9fe..f8287fbfa5a 100644 --- a/lucene/benchmark/src/test/org/apache/lucene/benchmark/BenchmarkTestCase.java +++ b/lucene/benchmark/src/test/org/apache/lucene/benchmark/BenchmarkTestCase.java @@ -75,7 +75,7 @@ public abstract class BenchmarkTestCase extends LuceneTestCase { } // properties in effect in all tests here - final String propLines[] = { + final String[] propLines = { "work.dir=" + getWorkDirPath(), "directory=ByteBuffersDirectory", "print.props=false", }; diff --git a/lucene/benchmark/src/test/org/apache/lucene/benchmark/byTask/TestPerfTasksLogic.java b/lucene/benchmark/src/test/org/apache/lucene/benchmark/byTask/TestPerfTasksLogic.java index 4a62d2276ab..eae50b71da2 100644 --- a/lucene/benchmark/src/test/org/apache/lucene/benchmark/byTask/TestPerfTasksLogic.java +++ b/lucene/benchmark/src/test/org/apache/lucene/benchmark/byTask/TestPerfTasksLogic.java @@ -71,7 +71,7 @@ public class TestPerfTasksLogic extends BenchmarkTestCase { /** Test index creation logic */ public void testIndexAndSearchTasks() throws Exception { // 1. alg definition (required in every "logic" test) - String algLines[] = { + String[] algLines = { "ResetSystemErase", "CreateIndex", "{ AddDoc } : 1000", @@ -110,7 +110,7 @@ public class TestPerfTasksLogic extends BenchmarkTestCase { /** Test timed sequence task. */ public void testTimedSearchTask() throws Exception { - String algLines[] = { + String[] algLines = { "log.step=100000", "ResetSystemErase", "CreateIndex", @@ -132,7 +132,7 @@ public class TestPerfTasksLogic extends BenchmarkTestCase { // disabled until we fix BG thread prio -- this test // causes build to hang public void testBGSearchTaskThreads() throws Exception { - String algLines[] = { + String[] algLines = { "log.time.step.msec = 100", "log.step=100000", "ResetSystemErase", @@ -162,7 +162,7 @@ public class TestPerfTasksLogic extends BenchmarkTestCase { /** Test Exhasting Doc Maker logic */ public void testExhaustContentSource() throws Exception { // 1. alg definition (required in every "logic" test) - String algLines[] = { + String[] algLines = { "# ----- properties ", "content.source=org.apache.lucene.benchmark.byTask.feeds.SingleDocSource", "content.source.log.step=1", @@ -210,7 +210,7 @@ public class TestPerfTasksLogic extends BenchmarkTestCase { // LUCENE-1994: test thread safety of SortableSingleDocMaker public void testDocMakerThreadSafety() throws Exception { // 1. alg definition (required in every "logic" test) - String algLines[] = { + String[] algLines = { "# ----- properties ", "content.source=org.apache.lucene.benchmark.byTask.feeds.SortableSingleDocSource", "doc.term.vector=false", @@ -246,7 +246,7 @@ public class TestPerfTasksLogic extends BenchmarkTestCase { /** Test Parallel Doc Maker logic (for LUCENE-940) */ public void testParallelDocMaker() throws Exception { // 1. alg definition (required in every "logic" test) - String algLines[] = { + String[] algLines = { "# ----- properties ", "content.source=org.apache.lucene.benchmark.byTask.feeds.LineDocSource", "docs.file=" + getReuters20LinesFile(), @@ -280,7 +280,7 @@ public class TestPerfTasksLogic extends BenchmarkTestCase { final int NUM_TRY_DOCS = 50; // Creates a line file with first 50 docs from SingleDocSource - String algLines1[] = { + String[] algLines1 = { "# ----- properties ", "content.source=org.apache.lucene.benchmark.byTask.feeds.SingleDocSource", "content.source.forever=true", @@ -308,7 +308,7 @@ public class TestPerfTasksLogic extends BenchmarkTestCase { numLines); // Index the line docs - String algLines2[] = { + String[] algLines2 = { "# ----- properties ", "analyzer=org.apache.lucene.analysis.core.WhitespaceAnalyzer", "content.source=org.apache.lucene.benchmark.byTask.feeds.LineDocSource", @@ -349,7 +349,7 @@ public class TestPerfTasksLogic extends BenchmarkTestCase { // Read tokens from first NUM_DOCS docs from Reuters and // then build index from the same docs - String algLines1[] = { + String[] algLines1 = { "# ----- properties ", "analyzer=org.apache.lucene.analysis.core.WhitespaceAnalyzer", "content.source=org.apache.lucene.benchmark.byTask.feeds.LineDocSource", @@ -411,7 +411,7 @@ public class TestPerfTasksLogic extends BenchmarkTestCase { /** Test that " {[AddDoc(4000)]: 4} : * " works corrcetly (for LUCENE-941) */ public void testParallelExhausted() throws Exception { // 1. alg definition (required in every "logic" test) - String algLines[] = { + String[] algLines = { "# ----- properties ", "content.source=org.apache.lucene.benchmark.byTask.feeds.LineDocSource", "docs.file=" + getReuters20LinesFile(), @@ -443,7 +443,7 @@ public class TestPerfTasksLogic extends BenchmarkTestCase { /** Test that exhaust in loop works as expected (LUCENE-1115). */ public void testExhaustedLooped() throws Exception { // 1. alg definition (required in every "logic" test) - String algLines[] = { + String[] algLines = { "# ----- properties ", "content.source=org.apache.lucene.benchmark.byTask.feeds.LineDocSource", "docs.file=" + getReuters20LinesFile(), @@ -476,7 +476,7 @@ public class TestPerfTasksLogic extends BenchmarkTestCase { /** Test that we can close IndexWriter with argument "false". */ public void testCloseIndexFalse() throws Exception { // 1. alg definition (required in every "logic" test) - String algLines[] = { + String[] algLines = { "# ----- properties ", "content.source=org.apache.lucene.benchmark.byTask.feeds.LineDocSource", "docs.file=" + getReuters20LinesFile(), @@ -520,7 +520,7 @@ public class TestPerfTasksLogic extends BenchmarkTestCase { /** Test that we can set merge scheduler". */ public void testMergeScheduler() throws Exception { // 1. alg definition (required in every "logic" test) - String algLines[] = { + String[] algLines = { "# ----- properties ", "content.source=org.apache.lucene.benchmark.byTask.feeds.LineDocSource", "docs.file=" + getReuters20LinesFile(), @@ -566,7 +566,7 @@ public class TestPerfTasksLogic extends BenchmarkTestCase { /** Test that we can set merge policy". */ public void testMergePolicy() throws Exception { // 1. alg definition (required in every "logic" test) - String algLines[] = { + String[] algLines = { "# ----- properties ", "content.source=org.apache.lucene.benchmark.byTask.feeds.LineDocSource", "docs.file=" + getReuters20LinesFile(), @@ -606,7 +606,7 @@ public class TestPerfTasksLogic extends BenchmarkTestCase { /** Test that IndexWriter settings stick. */ public void testIndexWriterSettings() throws Exception { // 1. alg definition (required in every "logic" test) - String algLines[] = { + String[] algLines = { "# ----- properties ", "content.source=org.apache.lucene.benchmark.byTask.feeds.LineDocSource", "docs.file=" + getReuters20LinesFile(), @@ -650,7 +650,7 @@ public class TestPerfTasksLogic extends BenchmarkTestCase { /** Test indexing with facets tasks. */ public void testIndexingWithFacets() throws Exception { // 1. alg definition (required in every "logic" test) - String algLines[] = { + String[] algLines = { "# ----- properties ", "content.source=org.apache.lucene.benchmark.byTask.feeds.LineDocSource", "docs.file=" + getReuters20LinesFile(), @@ -686,7 +686,7 @@ public class TestPerfTasksLogic extends BenchmarkTestCase { /** Test that we can call forceMerge(maxNumSegments). */ public void testForceMerge() throws Exception { // 1. alg definition (required in every "logic" test) - String algLines[] = { + String[] algLines = { "# ----- properties ", "content.source=org.apache.lucene.benchmark.byTask.feeds.LineDocSource", "docs.file=" + getReuters20LinesFile(), @@ -732,7 +732,7 @@ public class TestPerfTasksLogic extends BenchmarkTestCase { private void doTestDisableCounting(boolean disable) throws Exception { // 1. alg definition (required in every "logic" test) - String algLines[] = disableCountingLines(disable); + String[] algLines = disableCountingLines(disable); // 2. execute the algorithm (required in every "logic" test) Benchmark benchmark = execBenchmark(algLines); @@ -804,7 +804,7 @@ public class TestPerfTasksLogic extends BenchmarkTestCase { } private String[] getLocaleConfig(String localeParam) { - String algLines[] = { + String[] algLines = { "# ----- properties ", "content.source=org.apache.lucene.benchmark.byTask.feeds.LineDocSource", "docs.file=" + getReuters20LinesFile(), @@ -865,7 +865,7 @@ public class TestPerfTasksLogic extends BenchmarkTestCase { } private String[] getCollatorConfig(String localeParam, String collationParam) { - String algLines[] = { + String[] algLines = { "# ----- properties ", "content.source=org.apache.lucene.benchmark.byTask.feeds.LineDocSource", "docs.file=" + getReuters20LinesFile(), @@ -953,7 +953,7 @@ public class TestPerfTasksLogic extends BenchmarkTestCase { private String[] getAnalyzerFactoryConfig(String name, String params) { final String singleQuoteEscapedName = name.replaceAll("'", "\\\\'"); - String algLines[] = { + String[] algLines = { "content.source=org.apache.lucene.benchmark.byTask.feeds.LineDocSource", "docs.file=" + getReuters20LinesFile(), "work.dir=" diff --git a/lucene/benchmark/src/test/org/apache/lucene/benchmark/quality/TestQualityRun.java b/lucene/benchmark/src/test/org/apache/lucene/benchmark/quality/TestQualityRun.java index 152dd570a3b..25138156637 100644 --- a/lucene/benchmark/src/test/org/apache/lucene/benchmark/quality/TestQualityRun.java +++ b/lucene/benchmark/src/test/org/apache/lucene/benchmark/quality/TestQualityRun.java @@ -62,7 +62,7 @@ public class TestQualityRun extends BenchmarkTestCase { // prepare topics InputStream topics = getClass().getResourceAsStream("trecTopics.txt"); TrecTopicsReader qReader = new TrecTopicsReader(); - QualityQuery qqs[] = + QualityQuery[] qqs = qReader.readQueries( new BufferedReader(new InputStreamReader(topics, StandardCharsets.UTF_8))); @@ -83,7 +83,7 @@ public class TestQualityRun extends BenchmarkTestCase { SubmissionReport submitLog = VERBOSE ? new SubmissionReport(logger, "TestRun") : null; qrun.setMaxResults(maxResults); - QualityStats stats[] = qrun.execute(judge, submitLog, logger); + QualityStats[] stats = qrun.execute(judge, submitLog, logger); // --------- verify by the way judgments were altered for this test: // for some queries, depending on m = qnum % 8 @@ -160,7 +160,7 @@ public class TestQualityRun extends BenchmarkTestCase { // prepare topics InputStream topicsFile = getClass().getResourceAsStream("trecTopics.txt"); TrecTopicsReader qReader = new TrecTopicsReader(); - QualityQuery qqs[] = + QualityQuery[] qqs = qReader.readQueries( new BufferedReader(new InputStreamReader(topicsFile, StandardCharsets.UTF_8))); @@ -188,7 +188,7 @@ public class TestQualityRun extends BenchmarkTestCase { // use benchmark logic to create the mini Reuters index private void createReutersIndex() throws Exception { // 1. alg definition - String algLines[] = { + String[] algLines = { "# ----- properties ", "content.source=org.apache.lucene.benchmark.byTask.feeds.LineDocSource", "analyzer=org.apache.lucene.analysis.classic.ClassicAnalyzer", diff --git a/lucene/codecs/src/java/org/apache/lucene/codecs/bloom/FuzzySet.java b/lucene/codecs/src/java/org/apache/lucene/codecs/bloom/FuzzySet.java index b7b9e0f157b..5ccb04203da 100644 --- a/lucene/codecs/src/java/org/apache/lucene/codecs/bloom/FuzzySet.java +++ b/lucene/codecs/src/java/org/apache/lucene/codecs/bloom/FuzzySet.java @@ -79,7 +79,7 @@ public class FuzzySet implements Accountable { // a large bitset and then mapped to a smaller set can be looked up using a single // AND operation of the query term's hash rather than needing to perform a 2-step // translation of the query term that mirrors the stored content's reprojections. - static final int usableBitSetSizes[]; + static final int[] usableBitSetSizes; static { usableBitSetSizes = new int[30]; diff --git a/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextCompoundFormat.java b/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextCompoundFormat.java index 0cda6a6ce5c..bfb5888a56b 100644 --- a/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextCompoundFormat.java +++ b/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextCompoundFormat.java @@ -80,9 +80,9 @@ public class SimpleTextCompoundFormat extends CompoundFormat { assert StringHelper.startsWith(scratch.get(), TABLE); int numEntries = Integer.parseInt(stripPrefix(scratch, TABLE)); - final String fileNames[] = new String[numEntries]; - final long startOffsets[] = new long[numEntries]; - final long endOffsets[] = new long[numEntries]; + final String[] fileNames = new String[numEntries]; + final long[] startOffsets = new long[numEntries]; + final long[] endOffsets = new long[numEntries]; for (int i = 0; i < numEntries; i++) { SimpleTextUtil.readLine(in, scratch); @@ -160,10 +160,10 @@ public class SimpleTextCompoundFormat extends CompoundFormat { String dataFile = IndexFileNames.segmentFileName(si.name, "", DATA_EXTENSION); int numFiles = si.files().size(); - String names[] = si.files().toArray(new String[numFiles]); + String[] names = si.files().toArray(new String[numFiles]); Arrays.sort(names); - long startOffsets[] = new long[numFiles]; - long endOffsets[] = new long[numFiles]; + long[] startOffsets = new long[numFiles]; + long[] endOffsets = new long[numFiles]; BytesRefBuilder scratch = new BytesRefBuilder(); @@ -233,7 +233,7 @@ public class SimpleTextCompoundFormat extends CompoundFormat { static { int numDigits = Long.toString(Long.MAX_VALUE).length(); - char pattern[] = new char[numDigits]; + char[] pattern = new char[numDigits]; Arrays.fill(pattern, '0'); OFFSETPATTERN = new String(pattern); } diff --git a/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextDocValuesReader.java b/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextDocValuesReader.java index 471a3f6d7b9..a9b0fd5943e 100644 --- a/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextDocValuesReader.java +++ b/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextDocValuesReader.java @@ -419,7 +419,7 @@ class SimpleTextDocValuesReader extends DocValuesProducer { throw new CorruptIndexException("failed to parse int length", in, pe); } // skip past bytes - byte bytes[] = new byte[len]; + byte[] bytes = new byte[len]; in.readBytes(bytes, 0, len); SimpleTextUtil.readLine(in, scratch); // newline SimpleTextUtil.readLine(in, scratch); // 'T' or 'F' @@ -453,7 +453,7 @@ class SimpleTextDocValuesReader extends DocValuesProducer { throw new CorruptIndexException("failed to parse int length", in, pe); } // skip past bytes - byte bytes[] = new byte[len]; + byte[] bytes = new byte[len]; in.readBytes(bytes, 0, len); SimpleTextUtil.readLine(in, scratch); // newline SimpleTextUtil.readLine(in, scratch); // 'T' or 'F' @@ -616,7 +616,7 @@ class SimpleTextDocValuesReader extends DocValuesProducer { return false; } - long values[]; + long[] values; int index; private void setCurrentDoc() throws IOException { @@ -627,7 +627,7 @@ class SimpleTextDocValuesReader extends DocValuesProducer { if (csv.length() == 0) { values = new long[0]; } else { - String s[] = csv.split(","); + String[] s = csv.split(","); values = new long[s.length]; for (int i = 0; i < values.length; i++) { values[i] = Long.parseLong(s[i]); diff --git a/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextFieldInfosFormat.java b/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextFieldInfosFormat.java index 3a02b476ca0..3381af7c20a 100644 --- a/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextFieldInfosFormat.java +++ b/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextFieldInfosFormat.java @@ -86,7 +86,7 @@ public class SimpleTextFieldInfosFormat extends FieldInfosFormat { SimpleTextUtil.readLine(input, scratch); assert StringHelper.startsWith(scratch.get(), NUMFIELDS); final int size = Integer.parseInt(readString(NUMFIELDS.length, scratch)); - FieldInfo infos[] = new FieldInfo[size]; + FieldInfo[] infos = new FieldInfo[size]; for (int i = 0; i < size; i++) { SimpleTextUtil.readLine(input, scratch); diff --git a/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextStoredFieldsReader.java b/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextStoredFieldsReader.java index 60b9cdabcd8..f63f88a9639 100644 --- a/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextStoredFieldsReader.java +++ b/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextStoredFieldsReader.java @@ -60,7 +60,7 @@ import org.apache.lucene.util.StringHelper; */ public class SimpleTextStoredFieldsReader extends StoredFieldsReader { - private long offsets[]; /* docid -> offset in .fld file */ + private long[] offsets; /* docid -> offset in .fld file */ private IndexInput in; private BytesRefBuilder scratch = new BytesRefBuilder(); private CharsRefBuilder scratchUTF16 = new CharsRefBuilder(); @@ -91,7 +91,7 @@ public class SimpleTextStoredFieldsReader extends StoredFieldsReader { } // used by clone - SimpleTextStoredFieldsReader(long offsets[], IndexInput in, FieldInfos fieldInfos) { + SimpleTextStoredFieldsReader(long[] offsets, IndexInput in, FieldInfos fieldInfos) { this.offsets = offsets; this.in = in; this.fieldInfos = fieldInfos; diff --git a/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextTermVectorsReader.java b/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextTermVectorsReader.java index a9a7e73a643..a107096d4bb 100644 --- a/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextTermVectorsReader.java +++ b/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextTermVectorsReader.java @@ -56,7 +56,7 @@ import org.apache.lucene.util.StringHelper; */ public class SimpleTextTermVectorsReader extends TermVectorsReader { - private long offsets[]; /* docid -> offset in .vec file */ + private long[] offsets; /* docid -> offset in .vec file */ private IndexInput in; private BytesRefBuilder scratch = new BytesRefBuilder(); private CharsRefBuilder scratchUTF16 = new CharsRefBuilder(); @@ -83,7 +83,7 @@ public class SimpleTextTermVectorsReader extends TermVectorsReader { } // used by clone - SimpleTextTermVectorsReader(long offsets[], IndexInput in) { + SimpleTextTermVectorsReader(long[] offsets, IndexInput in) { this.offsets = offsets; this.in = in; } @@ -185,7 +185,7 @@ public class SimpleTextTermVectorsReader extends TermVectorsReader { if (scratch.length() - PAYLOAD.length == 0) { postings.payloads[k] = null; } else { - byte payloadBytes[] = new byte[scratch.length() - PAYLOAD.length]; + byte[] payloadBytes = new byte[scratch.length() - PAYLOAD.length]; System.arraycopy( scratch.bytes(), PAYLOAD.length, payloadBytes, 0, payloadBytes.length); postings.payloads[k] = new BytesRef(payloadBytes); @@ -332,10 +332,10 @@ public class SimpleTextTermVectorsReader extends TermVectorsReader { private static class SimpleTVPostings { private int freq; - private int positions[]; - private int startOffsets[]; - private int endOffsets[]; - private BytesRef payloads[]; + private int[] positions; + private int[] startOffsets; + private int[] endOffsets; + private BytesRef[] payloads; } private static class SimpleTVTermsEnum extends BaseTermsEnum { @@ -525,7 +525,7 @@ public class SimpleTextTermVectorsReader extends TermVectorsReader { return slowAdvance(target); } - public void reset(int[] positions, int[] startOffsets, int[] endOffsets, BytesRef payloads[]) { + public void reset(int[] positions, int[] startOffsets, int[] endOffsets, BytesRef[] payloads) { this.positions = positions; this.startOffsets = startOffsets; this.endOffsets = endOffsets; diff --git a/lucene/core/src/generated/checksums/generateStandardTokenizer.json b/lucene/core/src/generated/checksums/generateStandardTokenizer.json index 87e54361589..313c014c066 100644 --- a/lucene/core/src/generated/checksums/generateStandardTokenizer.json +++ b/lucene/core/src/generated/checksums/generateStandardTokenizer.json @@ -1,6 +1,6 @@ { - "gradle/generation/jflex/skeleton.disable.buffer.expansion.txt": "68263ff0a014904c6e89b040d868d8f399408908", + "gradle/generation/jflex/skeleton.disable.buffer.expansion.txt": "1424f4df33c977bb150d7377c3bd61f819113091", "lucene/core/src/data/jflex/UnicodeEmojiProperties.jflex": "7491dd535debc6e9e9ce367c4d3a7217e466dcae", - "lucene/core/src/java/org/apache/lucene/analysis/standard/StandardTokenizerImpl.java": "8e33c2698446c1c7a9479796a41316d1932ceda9", + "lucene/core/src/java/org/apache/lucene/analysis/standard/StandardTokenizerImpl.java": "47efe8b0d39974f41707b415668136d4c0cf425b", "lucene/core/src/java/org/apache/lucene/analysis/standard/StandardTokenizerImpl.jflex": "6158aeb8dd11cd9100623608b2dcce51b2df9d0b" } \ No newline at end of file diff --git a/lucene/core/src/java/org/apache/lucene/analysis/WordlistLoader.java b/lucene/core/src/java/org/apache/lucene/analysis/WordlistLoader.java index b8b7506a757..1c67cc9941b 100644 --- a/lucene/core/src/java/org/apache/lucene/analysis/WordlistLoader.java +++ b/lucene/core/src/java/org/apache/lucene/analysis/WordlistLoader.java @@ -141,7 +141,7 @@ public class WordlistLoader { while ((line = br.readLine()) != null) { int comment = line.indexOf('|'); if (comment >= 0) line = line.substring(0, comment); - String words[] = line.split("\\s+"); + String[] words = line.split("\\s+"); for (int i = 0; i < words.length; i++) { if (words[i].length() > 0) result.add(words[i]); } diff --git a/lucene/core/src/java/org/apache/lucene/analysis/standard/StandardTokenizerImpl.java b/lucene/core/src/java/org/apache/lucene/analysis/standard/StandardTokenizerImpl.java index 621c2f0a026..3f17c1fa2f4 100644 --- a/lucene/core/src/java/org/apache/lucene/analysis/standard/StandardTokenizerImpl.java +++ b/lucene/core/src/java/org/apache/lucene/analysis/standard/StandardTokenizerImpl.java @@ -463,7 +463,7 @@ public final class StandardTokenizerImpl { private static final int ZZ_PUSHBACK_2BIG = 2; /* error messages for the codes above */ - private static final String ZZ_ERROR_MSG[] = { + private static final String[] ZZ_ERROR_MSG = { "Unknown internal scanner error", "Error: could not match input", "Error: pushback value was too large" @@ -509,7 +509,7 @@ public final class StandardTokenizerImpl { /** this buffer contains the current text to be matched and is the source of the yytext() string */ - private char zzBuffer[] = new char[ZZ_BUFFERSIZE]; + private char[] zzBuffer = new char[ZZ_BUFFERSIZE]; /** the textposition at the last accepting state */ private int zzMarkedPos; diff --git a/lucene/core/src/java/org/apache/lucene/codecs/CodecUtil.java b/lucene/core/src/java/org/apache/lucene/codecs/CodecUtil.java index 921ffdb16b1..cb4fac454c8 100644 --- a/lucene/core/src/java/org/apache/lucene/codecs/CodecUtil.java +++ b/lucene/core/src/java/org/apache/lucene/codecs/CodecUtil.java @@ -360,7 +360,7 @@ public final class CodecUtil { /** Expert: just reads and verifies the object ID of an index header */ public static byte[] checkIndexHeaderID(DataInput in, byte[] expectedID) throws IOException { - byte id[] = new byte[StringHelper.ID_LENGTH]; + byte[] id = new byte[StringHelper.ID_LENGTH]; in.readBytes(id, 0, id.length); if (!Arrays.equals(id, expectedID)) { throw new CorruptIndexException( @@ -377,7 +377,7 @@ public final class CodecUtil { public static String checkIndexHeaderSuffix(DataInput in, String expectedSuffix) throws IOException { int suffixLength = in.readByte() & 0xFF; - byte suffixBytes[] = new byte[suffixLength]; + byte[] suffixBytes = new byte[suffixLength]; in.readBytes(suffixBytes, 0, suffixBytes.length); String suffix = new String(suffixBytes, 0, suffixBytes.length, StandardCharsets.UTF_8); if (!suffix.equals(expectedSuffix)) { diff --git a/lucene/core/src/java/org/apache/lucene/codecs/DocValuesConsumer.java b/lucene/core/src/java/org/apache/lucene/codecs/DocValuesConsumer.java index 6320f97c100..6d962be51e0 100644 --- a/lucene/core/src/java/org/apache/lucene/codecs/DocValuesConsumer.java +++ b/lucene/core/src/java/org/apache/lucene/codecs/DocValuesConsumer.java @@ -608,10 +608,10 @@ public abstract class DocValuesConsumer implements Closeable { } final int numReaders = toMerge.size(); - final SortedDocValues dvs[] = toMerge.toArray(new SortedDocValues[numReaders]); + final SortedDocValues[] dvs = toMerge.toArray(new SortedDocValues[numReaders]); // step 1: iterate thru each sub and mark terms still in use - TermsEnum liveTerms[] = new TermsEnum[dvs.length]; + TermsEnum[] liveTerms = new TermsEnum[dvs.length]; long[] weights = new long[liveTerms.length]; for (int sub = 0; sub < numReaders; sub++) { SortedDocValues dv = dvs[sub]; @@ -793,7 +793,7 @@ public abstract class DocValuesConsumer implements Closeable { } // step 1: iterate thru each sub and mark terms still in use - TermsEnum liveTerms[] = new TermsEnum[toMerge.size()]; + TermsEnum[] liveTerms = new TermsEnum[toMerge.size()]; long[] weights = new long[liveTerms.length]; for (int sub = 0; sub < liveTerms.length; sub++) { SortedSetDocValues dv = toMerge.get(sub); diff --git a/lucene/core/src/java/org/apache/lucene/codecs/MultiLevelSkipListReader.java b/lucene/core/src/java/org/apache/lucene/codecs/MultiLevelSkipListReader.java index 7f62ec20b5f..353fd378bc5 100644 --- a/lucene/core/src/java/org/apache/lucene/codecs/MultiLevelSkipListReader.java +++ b/lucene/core/src/java/org/apache/lucene/codecs/MultiLevelSkipListReader.java @@ -55,10 +55,10 @@ public abstract class MultiLevelSkipListReader implements Closeable { private IndexInput[] skipStream; /** The start pointer of each skip level. */ - private long skipPointer[]; + private long[] skipPointer; /** skipInterval of each level. */ - private int skipInterval[]; + private int[] skipInterval; /** * Number of docs skipped per level. It's possible for some values to overflow a signed int, but diff --git a/lucene/core/src/java/org/apache/lucene/codecs/SegmentInfoFormat.java b/lucene/core/src/java/org/apache/lucene/codecs/SegmentInfoFormat.java index 941b8ba75ce..f9352281db5 100644 --- a/lucene/core/src/java/org/apache/lucene/codecs/SegmentInfoFormat.java +++ b/lucene/core/src/java/org/apache/lucene/codecs/SegmentInfoFormat.java @@ -41,7 +41,7 @@ public abstract class SegmentInfoFormat { * @throws IOException If an I/O error occurs */ public abstract SegmentInfo read( - Directory directory, String segmentName, byte segmentID[], IOContext context) + Directory directory, String segmentName, byte[] segmentID, IOContext context) throws IOException; /** diff --git a/lucene/core/src/java/org/apache/lucene/codecs/lucene90/Lucene90FieldInfosFormat.java b/lucene/core/src/java/org/apache/lucene/codecs/lucene90/Lucene90FieldInfosFormat.java index 31ee09b7d4d..b4063f7ecb7 100644 --- a/lucene/core/src/java/org/apache/lucene/codecs/lucene90/Lucene90FieldInfosFormat.java +++ b/lucene/core/src/java/org/apache/lucene/codecs/lucene90/Lucene90FieldInfosFormat.java @@ -122,7 +122,7 @@ public final class Lucene90FieldInfosFormat extends FieldInfosFormat { IndexFileNames.segmentFileName(segmentInfo.name, segmentSuffix, EXTENSION); try (ChecksumIndexInput input = directory.openChecksumInput(fileName, context)) { Throwable priorE = null; - FieldInfo infos[] = null; + FieldInfo[] infos = null; try { CodecUtil.checkIndexHeader( input, diff --git a/lucene/core/src/java/org/apache/lucene/codecs/lucene90/Lucene90LiveDocsFormat.java b/lucene/core/src/java/org/apache/lucene/codecs/lucene90/Lucene90LiveDocsFormat.java index e5496a96928..5def7b5d657 100644 --- a/lucene/core/src/java/org/apache/lucene/codecs/lucene90/Lucene90LiveDocsFormat.java +++ b/lucene/core/src/java/org/apache/lucene/codecs/lucene90/Lucene90LiveDocsFormat.java @@ -100,7 +100,7 @@ public final class Lucene90LiveDocsFormat extends LiveDocsFormat { } private FixedBitSet readFixedBitSet(IndexInput input, int length) throws IOException { - long data[] = new long[FixedBitSet.bits2words(length)]; + long[] data = new long[FixedBitSet.bits2words(length)]; for (int i = 0; i < data.length; i++) { data[i] = input.readLong(); } diff --git a/lucene/core/src/java/org/apache/lucene/codecs/lucene90/Lucene90SkipReader.java b/lucene/core/src/java/org/apache/lucene/codecs/lucene90/Lucene90SkipReader.java index e05f5ce1f8a..da31bd75a80 100644 --- a/lucene/core/src/java/org/apache/lucene/codecs/lucene90/Lucene90SkipReader.java +++ b/lucene/core/src/java/org/apache/lucene/codecs/lucene90/Lucene90SkipReader.java @@ -49,11 +49,11 @@ import org.apache.lucene.store.IndexInput; *

    Therefore, we'll trim df before passing it to the interface. see trim(int) */ class Lucene90SkipReader extends MultiLevelSkipListReader { - private long docPointer[]; - private long posPointer[]; - private long payPointer[]; - private int posBufferUpto[]; - private int payloadByteUpto[]; + private long[] docPointer; + private long[] posPointer; + private long[] payPointer; + private int[] posBufferUpto; + private int[] payloadByteUpto; private long lastPosPointer; private long lastPayPointer; diff --git a/lucene/core/src/java/org/apache/lucene/document/DoublePoint.java b/lucene/core/src/java/org/apache/lucene/document/DoublePoint.java index b20638d69cf..e9848f1bbdd 100644 --- a/lucene/core/src/java/org/apache/lucene/document/DoublePoint.java +++ b/lucene/core/src/java/org/apache/lucene/document/DoublePoint.java @@ -172,12 +172,12 @@ public final class DoublePoint extends Field { // public helper methods (e.g. for queries) /** Encode single double dimension */ - public static void encodeDimension(double value, byte dest[], int offset) { + public static void encodeDimension(double value, byte[] dest, int offset) { NumericUtils.longToSortableBytes(NumericUtils.doubleToSortableLong(value), dest, offset); } /** Decode single double dimension */ - public static double decodeDimension(byte value[], int offset) { + public static double decodeDimension(byte[] value, int offset) { return NumericUtils.sortableLongToDouble(NumericUtils.sortableBytesToLong(value, offset)); } diff --git a/lucene/core/src/java/org/apache/lucene/document/FloatPoint.java b/lucene/core/src/java/org/apache/lucene/document/FloatPoint.java index bc8feb78c75..c3063f25c00 100644 --- a/lucene/core/src/java/org/apache/lucene/document/FloatPoint.java +++ b/lucene/core/src/java/org/apache/lucene/document/FloatPoint.java @@ -172,12 +172,12 @@ public final class FloatPoint extends Field { // public helper methods (e.g. for queries) /** Encode single float dimension */ - public static void encodeDimension(float value, byte dest[], int offset) { + public static void encodeDimension(float value, byte[] dest, int offset) { NumericUtils.intToSortableBytes(NumericUtils.floatToSortableInt(value), dest, offset); } /** Decode single float dimension */ - public static float decodeDimension(byte value[], int offset) { + public static float decodeDimension(byte[] value, int offset) { return NumericUtils.sortableIntToFloat(NumericUtils.sortableBytesToInt(value, offset)); } diff --git a/lucene/core/src/java/org/apache/lucene/document/InetAddressPoint.java b/lucene/core/src/java/org/apache/lucene/document/InetAddressPoint.java index 8a9cc72b2e2..a304dcf8bf5 100644 --- a/lucene/core/src/java/org/apache/lucene/document/InetAddressPoint.java +++ b/lucene/core/src/java/org/apache/lucene/document/InetAddressPoint.java @@ -182,7 +182,7 @@ public class InetAddressPoint extends Field { } /** Decodes InetAddress value from binary encoding */ - public static InetAddress decode(byte value[]) { + public static InetAddress decode(byte[] value) { try { return InetAddress.getByAddress(value); } catch (UnknownHostException e) { @@ -227,8 +227,8 @@ public class InetAddressPoint extends Field { } // create the lower value by zeroing out the host portion, upper value by filling it with all // ones. - byte lower[] = value.getAddress(); - byte upper[] = value.getAddress(); + byte[] lower = value.getAddress(); + byte[] upper = value.getAddress(); for (int i = prefixLength; i < 8 * lower.length; i++) { int m = 1 << (7 - (i & 7)); lower[i >> 3] &= ~m; diff --git a/lucene/core/src/java/org/apache/lucene/document/IntPoint.java b/lucene/core/src/java/org/apache/lucene/document/IntPoint.java index dfde582d361..26c443cd8f9 100644 --- a/lucene/core/src/java/org/apache/lucene/document/IntPoint.java +++ b/lucene/core/src/java/org/apache/lucene/document/IntPoint.java @@ -148,12 +148,12 @@ public final class IntPoint extends Field { // public helper methods (e.g. for queries) /** Encode single integer dimension */ - public static void encodeDimension(int value, byte dest[], int offset) { + public static void encodeDimension(int value, byte[] dest, int offset) { NumericUtils.intToSortableBytes(value, dest, offset); } /** Decode single integer dimension */ - public static int decodeDimension(byte value[], int offset) { + public static int decodeDimension(byte[] value, int offset) { return NumericUtils.sortableBytesToInt(value, offset); } diff --git a/lucene/core/src/java/org/apache/lucene/document/LatLonPoint.java b/lucene/core/src/java/org/apache/lucene/document/LatLonPoint.java index f2d59409b6e..21eb8fb541d 100644 --- a/lucene/core/src/java/org/apache/lucene/document/LatLonPoint.java +++ b/lucene/core/src/java/org/apache/lucene/document/LatLonPoint.java @@ -135,7 +135,7 @@ public class LatLonPoint extends Field { result.append(name); result.append(':'); - byte bytes[] = ((BytesRef) fieldsData).bytes; + byte[] bytes = ((BytesRef) fieldsData).bytes; result.append(decodeLatitude(bytes, 0)); result.append(','); result.append(decodeLongitude(bytes, Integer.BYTES)); diff --git a/lucene/core/src/java/org/apache/lucene/document/LatLonPointDistanceFeatureQuery.java b/lucene/core/src/java/org/apache/lucene/document/LatLonPointDistanceFeatureQuery.java index 5cf75e2ace0..8508cb13761 100644 --- a/lucene/core/src/java/org/apache/lucene/document/LatLonPointDistanceFeatureQuery.java +++ b/lucene/core/src/java/org/apache/lucene/document/LatLonPointDistanceFeatureQuery.java @@ -387,10 +387,10 @@ final class LatLonPointDistanceFeatureQuery extends Query { // Ideally we would be doing a distance query but that is too expensive so we approximate // with a box query which performs better. Rectangle box = Rectangle.fromPointDistance(originLat, originLon, maxDistance); - final byte minLat[] = new byte[LatLonPoint.BYTES]; - final byte maxLat[] = new byte[LatLonPoint.BYTES]; - final byte minLon[] = new byte[LatLonPoint.BYTES]; - final byte maxLon[] = new byte[LatLonPoint.BYTES]; + final byte[] minLat = new byte[LatLonPoint.BYTES]; + final byte[] maxLat = new byte[LatLonPoint.BYTES]; + final byte[] minLon = new byte[LatLonPoint.BYTES]; + final byte[] maxLon = new byte[LatLonPoint.BYTES]; final boolean crossDateLine = box.crossesDateline(); NumericUtils.intToSortableBytes(GeoEncodingUtils.encodeLatitude(box.minLat), minLat, 0); diff --git a/lucene/core/src/java/org/apache/lucene/document/LatLonPointDistanceQuery.java b/lucene/core/src/java/org/apache/lucene/document/LatLonPointDistanceQuery.java index 3307816a34c..a8badc9dced 100644 --- a/lucene/core/src/java/org/apache/lucene/document/LatLonPointDistanceQuery.java +++ b/lucene/core/src/java/org/apache/lucene/document/LatLonPointDistanceQuery.java @@ -83,12 +83,12 @@ final class LatLonPointDistanceQuery extends Query { Rectangle box = Rectangle.fromPointDistance(latitude, longitude, radiusMeters); // create bounding box(es) for the distance range // these are pre-encoded with LatLonPoint's encoding - final byte minLat[] = new byte[Integer.BYTES]; - final byte maxLat[] = new byte[Integer.BYTES]; - final byte minLon[] = new byte[Integer.BYTES]; - final byte maxLon[] = new byte[Integer.BYTES]; + final byte[] minLat = new byte[Integer.BYTES]; + final byte[] maxLat = new byte[Integer.BYTES]; + final byte[] minLon = new byte[Integer.BYTES]; + final byte[] maxLon = new byte[Integer.BYTES]; // second set of longitude ranges to check (for cross-dateline case) - final byte minLon2[] = new byte[Integer.BYTES]; + final byte[] minLon2 = new byte[Integer.BYTES]; NumericUtils.intToSortableBytes(encodeLatitude(box.minLat), minLat, 0); NumericUtils.intToSortableBytes(encodeLatitude(box.maxLat), maxLat, 0); diff --git a/lucene/core/src/java/org/apache/lucene/document/LongPoint.java b/lucene/core/src/java/org/apache/lucene/document/LongPoint.java index 3224c6adfac..ae9438697bd 100644 --- a/lucene/core/src/java/org/apache/lucene/document/LongPoint.java +++ b/lucene/core/src/java/org/apache/lucene/document/LongPoint.java @@ -151,12 +151,12 @@ public final class LongPoint extends Field { // public helper methods (e.g. for queries) /** Encode single long dimension */ - public static void encodeDimension(long value, byte dest[], int offset) { + public static void encodeDimension(long value, byte[] dest, int offset) { NumericUtils.longToSortableBytes(value, dest, offset); } /** Decode single long dimension */ - public static long decodeDimension(byte value[], int offset) { + public static long decodeDimension(byte[] value, int offset) { return NumericUtils.sortableBytesToLong(value, offset); } diff --git a/lucene/core/src/java/org/apache/lucene/document/XYPointField.java b/lucene/core/src/java/org/apache/lucene/document/XYPointField.java index f806bc43413..545125de1c8 100644 --- a/lucene/core/src/java/org/apache/lucene/document/XYPointField.java +++ b/lucene/core/src/java/org/apache/lucene/document/XYPointField.java @@ -107,7 +107,7 @@ public class XYPointField extends Field { result.append(name); result.append(':'); - byte bytes[] = ((BytesRef) fieldsData).bytes; + byte[] bytes = ((BytesRef) fieldsData).bytes; result.append(XYEncodingUtils.decode(bytes, 0)); result.append(','); result.append(XYEncodingUtils.decode(bytes, Integer.BYTES)); diff --git a/lucene/core/src/java/org/apache/lucene/geo/EdgeTree.java b/lucene/core/src/java/org/apache/lucene/geo/EdgeTree.java index 2c0fac717ec..84e405499d8 100644 --- a/lucene/core/src/java/org/apache/lucene/geo/EdgeTree.java +++ b/lucene/core/src/java/org/apache/lucene/geo/EdgeTree.java @@ -334,7 +334,7 @@ final class EdgeTree { * @return root node of the tree. */ static EdgeTree createTree(double[] x, double[] y) { - EdgeTree edges[] = new EdgeTree[x.length - 1]; + EdgeTree[] edges = new EdgeTree[x.length - 1]; for (int i = 1; i < x.length; i++) { double x1 = x[i - 1]; double y1 = y[i - 1]; @@ -356,7 +356,7 @@ final class EdgeTree { } /** Creates tree from sorted edges (with range low and high inclusive) */ - private static EdgeTree createTree(EdgeTree edges[], int low, int high) { + private static EdgeTree createTree(EdgeTree[] edges, int low, int high) { if (low > high) { return null; } diff --git a/lucene/core/src/java/org/apache/lucene/geo/Polygon2D.java b/lucene/core/src/java/org/apache/lucene/geo/Polygon2D.java index 96e3d822414..4294f5e8540 100644 --- a/lucene/core/src/java/org/apache/lucene/geo/Polygon2D.java +++ b/lucene/core/src/java/org/apache/lucene/geo/Polygon2D.java @@ -359,7 +359,7 @@ final class Polygon2D implements Component2D { /** Builds a Polygon2D from LatLon polygon */ static Component2D create(Polygon polygon) { - Polygon gonHoles[] = polygon.getHoles(); + Polygon[] gonHoles = polygon.getHoles(); Component2D holes = null; if (gonHoles.length > 0) { holes = LatLonGeometry.create(gonHoles); @@ -369,7 +369,7 @@ final class Polygon2D implements Component2D { /** Builds a Polygon2D from XY polygon */ static Component2D create(XYPolygon polygon) { - XYPolygon gonHoles[] = polygon.getHoles(); + XYPolygon[] gonHoles = polygon.getHoles(); Component2D holes = null; if (gonHoles.length > 0) { holes = XYGeometry.create(gonHoles); diff --git a/lucene/core/src/java/org/apache/lucene/index/CheckIndex.java b/lucene/core/src/java/org/apache/lucene/index/CheckIndex.java index c58282a856b..116092c40c6 100644 --- a/lucene/core/src/java/org/apache/lucene/index/CheckIndex.java +++ b/lucene/core/src/java/org/apache/lucene/index/CheckIndex.java @@ -862,9 +862,9 @@ public final class CheckIndex implements Closeable { infoStream.print(" test: index sort.........."); } - SortField fields[] = sort.getSort(); - final int reverseMul[] = new int[fields.length]; - final LeafFieldComparator comparators[] = new LeafFieldComparator[fields.length]; + SortField[] fields = sort.getSort(); + final int[] reverseMul = new int[fields.length]; + final LeafFieldComparator[] comparators = new LeafFieldComparator[fields.length]; LeafReaderContext readerContext = new LeafReaderContext(reader); @@ -3648,7 +3648,7 @@ public final class CheckIndex implements Closeable { // actual main: returns exit code instead of terminating JVM (for easy testing) @SuppressForbidden(reason = "System.out required: command line tool") - private static int doMain(String args[]) throws IOException, InterruptedException { + private static int doMain(String[] args) throws IOException, InterruptedException { Options opts; try { opts = parseOptions(args); diff --git a/lucene/core/src/java/org/apache/lucene/index/FreqProxTermsWriterPerField.java b/lucene/core/src/java/org/apache/lucene/index/FreqProxTermsWriterPerField.java index a738735829d..b05747b8755 100644 --- a/lucene/core/src/java/org/apache/lucene/index/FreqProxTermsWriterPerField.java +++ b/lucene/core/src/java/org/apache/lucene/index/FreqProxTermsWriterPerField.java @@ -251,11 +251,11 @@ final class FreqProxTermsWriterPerField extends TermsHashPerField { // writeOffsets); } - int termFreqs[]; // # times this term occurs in the current doc - int lastDocIDs[]; // Last docID where this term occurred - int lastDocCodes[]; // Code for prior doc - int lastPositions[]; // Last position where this term occurred - int lastOffsets[]; // Last endOffset where this term occurred + int[] termFreqs; // # times this term occurs in the current doc + int[] lastDocIDs; // Last docID where this term occurred + int[] lastDocCodes; // Code for prior doc + int[] lastPositions; // Last position where this term occurred + int[] lastOffsets; // Last endOffset where this term occurred @Override ParallelPostingsArray newInstance(int size) { diff --git a/lucene/core/src/java/org/apache/lucene/index/FrozenBufferedUpdates.java b/lucene/core/src/java/org/apache/lucene/index/FrozenBufferedUpdates.java index 6771fa0f1af..c68a88aba09 100644 --- a/lucene/core/src/java/org/apache/lucene/index/FrozenBufferedUpdates.java +++ b/lucene/core/src/java/org/apache/lucene/index/FrozenBufferedUpdates.java @@ -85,7 +85,7 @@ final class FrozenBufferedUpdates { this.privateSegment = privateSegment; assert privateSegment == null || updates.deleteTerms.isEmpty() : "segment private packet should only have del queries"; - Term termsArray[] = updates.deleteTerms.keySet().toArray(new Term[updates.deleteTerms.size()]); + Term[] termsArray = updates.deleteTerms.keySet().toArray(new Term[updates.deleteTerms.size()]); ArrayUtil.timSort(termsArray); PrefixCodedTerms.Builder builder = new PrefixCodedTerms.Builder(); for (Term term : termsArray) { diff --git a/lucene/core/src/java/org/apache/lucene/index/IndexFileNames.java b/lucene/core/src/java/org/apache/lucene/index/IndexFileNames.java index 59bf0df0eb7..968900c7f0c 100644 --- a/lucene/core/src/java/org/apache/lucene/index/IndexFileNames.java +++ b/lucene/core/src/java/org/apache/lucene/index/IndexFileNames.java @@ -145,7 +145,7 @@ public final class IndexFileNames { /** Returns the generation from this file name, or 0 if there is no generation. */ public static long parseGeneration(String filename) { assert filename.startsWith("_"); - String parts[] = stripExtension(filename).substring(1).split("_"); + String[] parts = stripExtension(filename).substring(1).split("_"); // 4 cases: // segment.ext // segment_gen.ext diff --git a/lucene/core/src/java/org/apache/lucene/index/IndexingChain.java b/lucene/core/src/java/org/apache/lucene/index/IndexingChain.java index 99b7217c5e7..6db54f3a2de 100644 --- a/lucene/core/src/java/org/apache/lucene/index/IndexingChain.java +++ b/lucene/core/src/java/org/apache/lucene/index/IndexingChain.java @@ -531,7 +531,7 @@ final class IndexingChain implements Accountable { int newHashSize = (fieldHash.length * 2); assert newHashSize > fieldHash.length; - PerField newHashArray[] = new PerField[newHashSize]; + PerField[] newHashArray = new PerField[newHashSize]; // Rehash int newHashMask = newHashSize - 1; diff --git a/lucene/core/src/java/org/apache/lucene/index/MultiFields.java b/lucene/core/src/java/org/apache/lucene/index/MultiFields.java index 46de8ad8d27..e9c01429dcd 100644 --- a/lucene/core/src/java/org/apache/lucene/index/MultiFields.java +++ b/lucene/core/src/java/org/apache/lucene/index/MultiFields.java @@ -50,7 +50,7 @@ public final class MultiFields extends Fields { @SuppressWarnings({"unchecked", "rawtypes"}) @Override public Iterator iterator() { - Iterator subIterators[] = new Iterator[subs.length]; + Iterator[] subIterators = new Iterator[subs.length]; for (int i = 0; i < subs.length; i++) { subIterators[i] = subs[i].iterator(); } diff --git a/lucene/core/src/java/org/apache/lucene/index/MultiSorter.java b/lucene/core/src/java/org/apache/lucene/index/MultiSorter.java index 81e965598e8..cdff279b49c 100644 --- a/lucene/core/src/java/org/apache/lucene/index/MultiSorter.java +++ b/lucene/core/src/java/org/apache/lucene/index/MultiSorter.java @@ -39,7 +39,7 @@ final class MultiSorter { // TODO: optimize if only 1 reader is incoming, though that's a rare case - SortField fields[] = sort.getSort(); + SortField[] fields = sort.getSort(); final IndexSorter.ComparableProvider[][] comparables = new IndexSorter.ComparableProvider[fields.length][]; final int[] reverseMuls = new int[fields.length]; diff --git a/lucene/core/src/java/org/apache/lucene/index/OrdinalMap.java b/lucene/core/src/java/org/apache/lucene/index/OrdinalMap.java index ae92991b005..b69f0b08508 100644 --- a/lucene/core/src/java/org/apache/lucene/index/OrdinalMap.java +++ b/lucene/core/src/java/org/apache/lucene/index/OrdinalMap.java @@ -171,7 +171,7 @@ public class OrdinalMap implements Accountable { * @throws IOException if an I/O error occurred. */ public static OrdinalMap build( - IndexReader.CacheKey owner, TermsEnum subs[], long[] weights, float acceptableOverheadRatio) + IndexReader.CacheKey owner, TermsEnum[] subs, long[] weights, float acceptableOverheadRatio) throws IOException { if (subs.length != weights.length) { throw new IllegalArgumentException("subs and weights must have the same length"); @@ -195,7 +195,7 @@ public class OrdinalMap implements Accountable { // globalOrd -> first segment container final LongValues firstSegments; // for every segment, segmentOrd -> globalOrd - final LongValues segmentToGlobalOrds[]; + final LongValues[] segmentToGlobalOrds; // the map from/to segment ids final SegmentMap segmentMap; // ram usage @@ -203,7 +203,7 @@ public class OrdinalMap implements Accountable { OrdinalMap( IndexReader.CacheKey owner, - TermsEnum subs[], + TermsEnum[] subs, SegmentMap segmentMap, float acceptableOverheadRatio) throws IOException { diff --git a/lucene/core/src/java/org/apache/lucene/index/SegmentInfos.java b/lucene/core/src/java/org/apache/lucene/index/SegmentInfos.java index 3933bd5a51a..8650d96867d 100644 --- a/lucene/core/src/java/org/apache/lucene/index/SegmentInfos.java +++ b/lucene/core/src/java/org/apache/lucene/index/SegmentInfos.java @@ -325,7 +325,7 @@ public final class SegmentInfos implements Cloneable, Iterable>> chunkSizePower); // we always allocate one more slice, the last one may be a 0 byte one - final ByteBuffer slices[] = new ByteBuffer[endIndex - startIndex + 1]; + final ByteBuffer[] slices = new ByteBuffer[endIndex - startIndex + 1]; for (int i = 0; i < slices.length; i++) { slices[i] = buffers[startIndex + i].duplicate().order(ByteOrder.LITTLE_ENDIAN); diff --git a/lucene/core/src/java/org/apache/lucene/store/MMapDirectory.java b/lucene/core/src/java/org/apache/lucene/store/MMapDirectory.java index acadeefdf9b..161cc384046 100644 --- a/lucene/core/src/java/org/apache/lucene/store/MMapDirectory.java +++ b/lucene/core/src/java/org/apache/lucene/store/MMapDirectory.java @@ -255,7 +255,7 @@ public class MMapDirectory extends FSDirectory { // we always allocate one more buffer, the last one may be a 0 byte one final int nrBuffers = (int) (length >>> chunkSizePower) + 1; - ByteBuffer buffers[] = new ByteBuffer[nrBuffers]; + ByteBuffer[] buffers = new ByteBuffer[nrBuffers]; long bufferStart = 0L; for (int bufNr = 0; bufNr < nrBuffers; bufNr++) { diff --git a/lucene/core/src/java/org/apache/lucene/util/ByteBlockPool.java b/lucene/core/src/java/org/apache/lucene/util/ByteBlockPool.java index 2ede7531fae..4b8c14635b6 100644 --- a/lucene/core/src/java/org/apache/lucene/util/ByteBlockPool.java +++ b/lucene/core/src/java/org/apache/lucene/util/ByteBlockPool.java @@ -343,7 +343,7 @@ public final class ByteBlockPool implements Accountable { * *

    Note: this method allows to copy across block boundaries. */ - public void readBytes(final long offset, final byte bytes[], int bytesOffset, int bytesLength) { + public void readBytes(final long offset, final byte[] bytes, int bytesOffset, int bytesLength) { int bytesLeft = bytesLength; int bufferIndex = (int) (offset >> BYTE_BLOCK_SHIFT); int pos = (int) (offset & BYTE_BLOCK_MASK); diff --git a/lucene/core/src/java/org/apache/lucene/util/StringHelper.java b/lucene/core/src/java/org/apache/lucene/util/StringHelper.java index 4a6cf9b45e7..48734f25b46 100644 --- a/lucene/core/src/java/org/apache/lucene/util/StringHelper.java +++ b/lucene/core/src/java/org/apache/lucene/util/StringHelper.java @@ -314,7 +314,7 @@ public abstract class StringHelper { // what impact that has on the period, whereas the simple ++ (mod 2^128) // we use here is guaranteed to have the full period. - byte bits[]; + byte[] bits; synchronized (idLock) { bits = nextId.toByteArray(); nextId = nextId.add(BigInteger.ONE).and(mask128); @@ -339,7 +339,7 @@ public abstract class StringHelper { * representation for debugging. Never throws an exception. The returned string may indicate if * the id is definitely invalid. */ - public static String idToString(byte id[]) { + public static String idToString(byte[] id) { if (id == null) { return "(null)"; } else { diff --git a/lucene/core/src/java/org/apache/lucene/util/automaton/LevenshteinAutomata.java b/lucene/core/src/java/org/apache/lucene/util/automaton/LevenshteinAutomata.java index 3604e47fc05..190a5d8ccd2 100644 --- a/lucene/core/src/java/org/apache/lucene/util/automaton/LevenshteinAutomata.java +++ b/lucene/core/src/java/org/apache/lucene/util/automaton/LevenshteinAutomata.java @@ -37,18 +37,18 @@ public class LevenshteinAutomata { */ public static final int MAXIMUM_SUPPORTED_DISTANCE = 2; /* input word */ - final int word[]; + final int[] word; /* the automata alphabet. */ - final int alphabet[]; + final int[] alphabet; /* the maximum symbol in the alphabet (e.g. 255 for UTF-8 or 10FFFF for UTF-32) */ final int alphaMax; /* the ranges outside of alphabet */ - final int rangeLower[]; - final int rangeUpper[]; + final int[] rangeLower; + final int[] rangeUpper; int numRanges = 0; - ParametricDescription descriptions[]; + ParametricDescription[] descriptions; /** * Create a new LevenshteinAutomata for some input String. Optionally count transpositions as a @@ -114,7 +114,7 @@ public class LevenshteinAutomata { private static int[] codePoints(String input) { int length = Character.codePointCount(input, 0, input.length()); - int word[] = new int[length]; + int[] word = new int[length]; for (int i = 0, j = 0, cp = 0; i < input.length(); i += Character.charCount(cp)) { word[j++] = cp = input.codePointAt(i); } diff --git a/lucene/core/src/java/org/apache/lucene/util/packed/DirectWriter.java b/lucene/core/src/java/org/apache/lucene/util/packed/DirectWriter.java index a2b499edf36..aae949aebe1 100644 --- a/lucene/core/src/java/org/apache/lucene/util/packed/DirectWriter.java +++ b/lucene/core/src/java/org/apache/lucene/util/packed/DirectWriter.java @@ -188,6 +188,6 @@ public final class DirectWriter { return roundBits(PackedInts.unsignedBitsRequired(maxValue)); } - static final int SUPPORTED_BITS_PER_VALUE[] = + static final int[] SUPPORTED_BITS_PER_VALUE = new int[] {1, 2, 4, 8, 12, 16, 20, 24, 28, 32, 40, 48, 56, 64}; } diff --git a/lucene/core/src/test/org/apache/lucene/analysis/standard/TestStandardAnalyzer.java b/lucene/core/src/test/org/apache/lucene/analysis/standard/TestStandardAnalyzer.java index 6d79dc59375..044f0d2dfcf 100644 --- a/lucene/core/src/test/org/apache/lucene/analysis/standard/TestStandardAnalyzer.java +++ b/lucene/core/src/test/org/apache/lucene/analysis/standard/TestStandardAnalyzer.java @@ -113,7 +113,7 @@ public class TestStandardAnalyzer extends BaseTokenStreamTestCase { public void testHugeDoc() throws IOException { StringBuilder sb = new StringBuilder(); - char whitespace[] = new char[4094]; + char[] whitespace = new char[4094]; Arrays.fill(whitespace, ' '); sb.append(whitespace); sb.append("testing 1234"); diff --git a/lucene/core/src/test/org/apache/lucene/codecs/TestCodecUtil.java b/lucene/core/src/test/org/apache/lucene/codecs/TestCodecUtil.java index 3c8717aaa68..26f00c3c2ee 100644 --- a/lucene/core/src/test/org/apache/lucene/codecs/TestCodecUtil.java +++ b/lucene/core/src/test/org/apache/lucene/codecs/TestCodecUtil.java @@ -117,7 +117,7 @@ public class TestCodecUtil extends LuceneTestCase { CodecUtil.checkFooter(input, mine); }); assertEquals("fake exception", expected.getMessage()); - Throwable suppressed[] = expected.getSuppressed(); + Throwable[] suppressed = expected.getSuppressed(); assertEquals(1, suppressed.length); assertTrue(suppressed[0].getMessage().contains("checksum passed")); input.close(); @@ -143,7 +143,7 @@ public class TestCodecUtil extends LuceneTestCase { CodecUtil.checkFooter(input, mine); }); assertEquals("fake exception", expected.getMessage()); - Throwable suppressed[] = expected.getSuppressed(); + Throwable[] suppressed = expected.getSuppressed(); assertEquals(1, suppressed.length); assertTrue(suppressed[0].getMessage().contains("checksum passed")); input.close(); @@ -171,7 +171,7 @@ public class TestCodecUtil extends LuceneTestCase { CodecUtil.checkFooter(input, mine); }); assertTrue(expected.getMessage().contains("checksum status indeterminate")); - Throwable suppressed[] = expected.getSuppressed(); + Throwable[] suppressed = expected.getSuppressed(); assertEquals(1, suppressed.length); assertEquals("fake exception", suppressed[0].getMessage()); input.close(); @@ -199,7 +199,7 @@ public class TestCodecUtil extends LuceneTestCase { CodecUtil.checkFooter(input, mine); }); assertTrue(expected.getMessage().contains("checksum failed")); - Throwable suppressed[] = expected.getSuppressed(); + Throwable[] suppressed = expected.getSuppressed(); assertEquals(1, suppressed.length); assertEquals("fake exception", suppressed[0].getMessage()); input.close(); diff --git a/lucene/core/src/test/org/apache/lucene/codecs/lucene90/TestLucene90DocValuesFormat.java b/lucene/core/src/test/org/apache/lucene/codecs/lucene90/TestLucene90DocValuesFormat.java index 33015825902..1439d0862d6 100644 --- a/lucene/core/src/test/org/apache/lucene/codecs/lucene90/TestLucene90DocValuesFormat.java +++ b/lucene/core/src/test/org/apache/lucene/codecs/lucene90/TestLucene90DocValuesFormat.java @@ -655,7 +655,7 @@ public class TestLucene90DocValuesFormat extends BaseCompressingDocValuesFormatT Document doc = new Document(); int valueCount = (int) counts.getAsLong(); - long valueArray[] = new long[valueCount]; + long[] valueArray = new long[valueCount]; for (int j = 0; j < valueCount; j++) { long value = values.getAsLong(); valueArray[j] = value; @@ -685,12 +685,12 @@ public class TestLucene90DocValuesFormat extends BaseCompressingDocValuesFormatT if (i > docValues.docID()) { docValues.nextDoc(); } - String expectedStored[] = r.document(i).getValues("stored"); + String[] expectedStored = r.document(i).getValues("stored"); if (i < docValues.docID()) { assertEquals(0, expectedStored.length); } else { long[] readValueArray = new long[docValues.docValueCount()]; - String actualDocValue[] = new String[docValues.docValueCount()]; + String[] actualDocValue = new String[docValues.docValueCount()]; for (int j = 0; j < docValues.docValueCount(); ++j) { long actualDV = docValues.nextValue(); readValueArray[j] = actualDV; diff --git a/lucene/core/src/test/org/apache/lucene/document/TestField.java b/lucene/core/src/test/org/apache/lucene/document/TestField.java index 4f57a21ed2a..f187a069744 100644 --- a/lucene/core/src/test/org/apache/lucene/document/TestField.java +++ b/lucene/core/src/test/org/apache/lucene/document/TestField.java @@ -304,7 +304,7 @@ public class TestField extends LuceneTestCase { } public void testStringField() throws Exception { - Field fields[] = + Field[] fields = new Field[] { new StringField("foo", "bar", Field.Store.NO), new StringField("foo", "bar", Field.Store.YES) @@ -328,7 +328,7 @@ public class TestField extends LuceneTestCase { } public void testTextFieldString() throws Exception { - Field fields[] = + Field[] fields = new Field[] { new TextField("foo", "bar", Field.Store.NO), new TextField("foo", "bar", Field.Store.YES) }; @@ -375,7 +375,7 @@ public class TestField extends LuceneTestCase { */ public void testStoredFieldBytes() throws Exception { - Field fields[] = + Field[] fields = new Field[] { new StoredField("foo", "bar".getBytes(StandardCharsets.UTF_8)), new StoredField("foo", "bar".getBytes(StandardCharsets.UTF_8), 0, 3), diff --git a/lucene/core/src/test/org/apache/lucene/document/TestLatLonPointDistanceSort.java b/lucene/core/src/test/org/apache/lucene/document/TestLatLonPointDistanceSort.java index 5368bd3ac08..24b9a3b93ab 100644 --- a/lucene/core/src/test/org/apache/lucene/document/TestLatLonPointDistanceSort.java +++ b/lucene/core/src/test/org/apache/lucene/document/TestLatLonPointDistanceSort.java @@ -214,7 +214,7 @@ public class TestLatLonPointDistanceSort extends LuceneTestCase { double lon = GeoTestUtil.nextLongitude(); double missingValue = Double.POSITIVE_INFINITY; - Result expected[] = new Result[reader.maxDoc()]; + Result[] expected = new Result[reader.maxDoc()]; for (int doc = 0; doc < reader.maxDoc(); doc++) { Document targetDoc = reader.document(doc); diff --git a/lucene/core/src/test/org/apache/lucene/document/TestLatLonShape.java b/lucene/core/src/test/org/apache/lucene/document/TestLatLonShape.java index e3d643acd5e..51cfef99b82 100644 --- a/lucene/core/src/test/org/apache/lucene/document/TestLatLonShape.java +++ b/lucene/core/src/test/org/apache/lucene/document/TestLatLonShape.java @@ -125,8 +125,8 @@ public class TestLatLonShape extends LuceneTestCase { // add a line document document = new Document(); // add a line string - double lats[] = new double[p.numPoints() - 1]; - double lons[] = new double[p.numPoints() - 1]; + double[] lats = new double[p.numPoints() - 1]; + double[] lons = new double[p.numPoints() - 1]; for (int i = 0; i < lats.length; ++i) { lats[i] = p.getPolyLat(i); lons[i] = p.getPolyLon(i); @@ -169,8 +169,8 @@ public class TestLatLonShape extends LuceneTestCase { // add a line document document = new Document(); // add a line string - double lats[] = new double[p.numPoints() - 1]; - double lons[] = new double[p.numPoints() - 1]; + double[] lats = new double[p.numPoints() - 1]; + double[] lons = new double[p.numPoints() - 1]; for (int i = 0; i < lats.length; ++i) { lats[i] = p.getPolyLat(i); lons[i] = p.getPolyLon(i); diff --git a/lucene/core/src/test/org/apache/lucene/document/TestXYShape.java b/lucene/core/src/test/org/apache/lucene/document/TestXYShape.java index 0e577b0e4ff..4ffbe61d105 100644 --- a/lucene/core/src/test/org/apache/lucene/document/TestXYShape.java +++ b/lucene/core/src/test/org/apache/lucene/document/TestXYShape.java @@ -75,8 +75,8 @@ public class TestXYShape extends LuceneTestCase { // add a line document document = new Document(); // add a line string - float x[] = new float[p.numPoints() - 1]; - float y[] = new float[p.numPoints() - 1]; + float[] x = new float[p.numPoints() - 1]; + float[] y = new float[p.numPoints() - 1]; for (int i = 0; i < x.length; ++i) { x[i] = p.getPolyX(i); y[i] = p.getPolyY(i); diff --git a/lucene/core/src/test/org/apache/lucene/geo/TestGeoUtils.java b/lucene/core/src/test/org/apache/lucene/geo/TestGeoUtils.java index 8792570e767..27a76a56132 100644 --- a/lucene/core/src/test/org/apache/lucene/geo/TestGeoUtils.java +++ b/lucene/core/src/test/org/apache/lucene/geo/TestGeoUtils.java @@ -51,7 +51,7 @@ public class TestGeoUtils extends LuceneTestCase { int numPointsToTry = 1000; for (int i = 0; i < numPointsToTry; i++) { - double point[] = GeoTestUtil.nextPointNear(bbox); + double[] point = GeoTestUtil.nextPointNear(bbox); double lat = point[0]; double lon = point[1]; @@ -123,7 +123,7 @@ public class TestGeoUtils extends LuceneTestCase { } for (int j = 0; j < 1000; j++) { - double point[] = GeoTestUtil.nextPointNear(box); + double[] point = GeoTestUtil.nextPointNear(box); double lat2 = point[0]; double lon2 = point[1]; // if the point is within radius, then it should be in our bounding box @@ -153,7 +153,7 @@ public class TestGeoUtils extends LuceneTestCase { SloppyMath.haversinSortKey(lat, lon, box.maxLat, lon)); for (int j = 0; j < 10000; j++) { - double point[] = GeoTestUtil.nextPointNear(box); + double[] point = GeoTestUtil.nextPointNear(box); double lat2 = point[0]; double lon2 = point[1]; // if the point is within radius, then it should be <= our sort key diff --git a/lucene/core/src/test/org/apache/lucene/geo/TestPolygon2D.java b/lucene/core/src/test/org/apache/lucene/geo/TestPolygon2D.java index 0de43591f5f..c39e63b3408 100644 --- a/lucene/core/src/test/org/apache/lucene/geo/TestPolygon2D.java +++ b/lucene/core/src/test/org/apache/lucene/geo/TestPolygon2D.java @@ -98,7 +98,7 @@ public class TestPolygon2D extends LuceneTestCase { Component2D impl = Polygon2D.create(polygon); for (int j = 0; j < 100; j++) { - double point[] = GeoTestUtil.nextPointNear(polygon); + double[] point = GeoTestUtil.nextPointNear(polygon); double latitude = point[0]; double longitude = point[1]; // if the point is within poly, then it should be in our bounding box @@ -127,7 +127,7 @@ public class TestPolygon2D extends LuceneTestCase { for (int k = 0; k < 500; k++) { // this tests in our range but sometimes outside! so we have to double-check its really // in other box - double point[] = GeoTestUtil.nextPointNear(rectangle); + double[] point = GeoTestUtil.nextPointNear(rectangle); double latitude = point[0]; double longitude = point[1]; // check for sure its in our box @@ -141,7 +141,7 @@ public class TestPolygon2D extends LuceneTestCase { for (int k = 0; k < 100; k++) { // this tests in our range but sometimes outside! so we have to double-check its really // in other box - double point[] = GeoTestUtil.nextPointNear(polygon); + double[] point = GeoTestUtil.nextPointNear(polygon); double latitude = point[0]; double longitude = point[1]; // check for sure its in our box @@ -176,7 +176,7 @@ public class TestPolygon2D extends LuceneTestCase { for (int k = 0; k < 100; k++) { // this tests in our range but sometimes outside! so we have to double-check its really // in other box - double point[] = GeoTestUtil.nextPointNear(rectangle); + double[] point = GeoTestUtil.nextPointNear(rectangle); double latitude = point[0]; double longitude = point[1]; // check for sure its in our box @@ -190,7 +190,7 @@ public class TestPolygon2D extends LuceneTestCase { for (int k = 0; k < 20; k++) { // this tests in our range but sometimes outside! so we have to double-check its really // in other box - double point[] = GeoTestUtil.nextPointNear(polygon); + double[] point = GeoTestUtil.nextPointNear(polygon); double latitude = point[0]; double longitude = point[1]; // check for sure its in our box @@ -223,7 +223,7 @@ public class TestPolygon2D extends LuceneTestCase { if (impl.relate(rectangle.minLon, rectangle.maxLon, rectangle.minLat, rectangle.maxLat) == Relation.CELL_OUTSIDE_QUERY) { for (int k = 0; k < 1000; k++) { - double point[] = GeoTestUtil.nextPointNear(rectangle); + double[] point = GeoTestUtil.nextPointNear(rectangle); // this tests in our range but sometimes outside! so we have to double-check its really // in other box double latitude = point[0]; @@ -237,7 +237,7 @@ public class TestPolygon2D extends LuceneTestCase { } } for (int k = 0; k < 100; k++) { - double point[] = GeoTestUtil.nextPointNear(polygon); + double[] point = GeoTestUtil.nextPointNear(polygon); // this tests in our range but sometimes outside! so we have to double-check its really // in other box double latitude = point[0]; @@ -275,7 +275,7 @@ public class TestPolygon2D extends LuceneTestCase { for (int k = 0; k < 100; k++) { // this tests in our range but sometimes outside! so we have to double-check its really // in other box - double point[] = GeoTestUtil.nextPointNear(rectangle); + double[] point = GeoTestUtil.nextPointNear(rectangle); double latitude = point[0]; double longitude = point[1]; // check for sure its in our box @@ -289,7 +289,7 @@ public class TestPolygon2D extends LuceneTestCase { for (int k = 0; k < 50; k++) { // this tests in our range but sometimes outside! so we have to double-check its really // in other box - double point[] = GeoTestUtil.nextPointNear(polygon); + double[] point = GeoTestUtil.nextPointNear(polygon); double latitude = point[0]; double longitude = point[1]; // check for sure its in our box @@ -368,7 +368,7 @@ public class TestPolygon2D extends LuceneTestCase { // random lat/lons against polygon for (int j = 0; j < 1000; j++) { - double point[] = GeoTestUtil.nextPointNear(polygon); + double[] point = GeoTestUtil.nextPointNear(polygon); double latitude = point[0]; double longitude = point[1]; boolean expected = GeoTestUtil.containsSlowly(polygon, longitude, latitude); diff --git a/lucene/core/src/test/org/apache/lucene/index/Test2BBinaryDocValues.java b/lucene/core/src/test/org/apache/lucene/index/Test2BBinaryDocValues.java index 14faaae13e8..d6fd95a38a6 100644 --- a/lucene/core/src/test/org/apache/lucene/index/Test2BBinaryDocValues.java +++ b/lucene/core/src/test/org/apache/lucene/index/Test2BBinaryDocValues.java @@ -59,7 +59,7 @@ public class Test2BBinaryDocValues extends LuceneTestCase { .setCodec(TestUtil.getDefaultCodec())); Document doc = new Document(); - byte bytes[] = new byte[4]; + byte[] bytes = new byte[4]; BytesRef data = new BytesRef(bytes); BinaryDocValuesField dvField = new BinaryDocValuesField("dv", data); doc.add(dvField); @@ -122,7 +122,7 @@ public class Test2BBinaryDocValues extends LuceneTestCase { .setCodec(TestUtil.getDefaultCodec())); Document doc = new Document(); - byte bytes[] = new byte[4]; + byte[] bytes = new byte[4]; ByteArrayDataOutput encoder = new ByteArrayDataOutput(bytes); BytesRef data = new BytesRef(bytes); BinaryDocValuesField dvField = new BinaryDocValuesField("dv", data); diff --git a/lucene/core/src/test/org/apache/lucene/index/Test2BPostingsBytes.java b/lucene/core/src/test/org/apache/lucene/index/Test2BPostingsBytes.java index 08f8b0f8412..ff932ad0b8f 100644 --- a/lucene/core/src/test/org/apache/lucene/index/Test2BPostingsBytes.java +++ b/lucene/core/src/test/org/apache/lucene/index/Test2BPostingsBytes.java @@ -110,7 +110,7 @@ public class Test2BPostingsBytes extends LuceneTestCase { w.close(); DirectoryReader oneThousand = DirectoryReader.open(dir); - DirectoryReader subReaders[] = new DirectoryReader[1000]; + DirectoryReader[] subReaders = new DirectoryReader[1000]; Arrays.fill(subReaders, oneThousand); BaseDirectoryWrapper dir2 = newFSDirectory(createTempDir("2BPostingsBytes2")); if (dir2 instanceof MockDirectoryWrapper) { diff --git a/lucene/core/src/test/org/apache/lucene/index/Test2BSortedDocValuesFixedSorted.java b/lucene/core/src/test/org/apache/lucene/index/Test2BSortedDocValuesFixedSorted.java index 2fa0dbd4d88..daf662d7bb5 100644 --- a/lucene/core/src/test/org/apache/lucene/index/Test2BSortedDocValuesFixedSorted.java +++ b/lucene/core/src/test/org/apache/lucene/index/Test2BSortedDocValuesFixedSorted.java @@ -57,7 +57,7 @@ public class Test2BSortedDocValuesFixedSorted extends LuceneTestCase { .setCodec(TestUtil.getDefaultCodec())); Document doc = new Document(); - byte bytes[] = new byte[2]; + byte[] bytes = new byte[2]; BytesRef data = new BytesRef(bytes); SortedDocValuesField dvField = new SortedDocValuesField("dv", data); doc.add(dvField); diff --git a/lucene/core/src/test/org/apache/lucene/index/Test2BSortedDocValuesOrds.java b/lucene/core/src/test/org/apache/lucene/index/Test2BSortedDocValuesOrds.java index a91695cdac9..a314a8bb5a4 100644 --- a/lucene/core/src/test/org/apache/lucene/index/Test2BSortedDocValuesOrds.java +++ b/lucene/core/src/test/org/apache/lucene/index/Test2BSortedDocValuesOrds.java @@ -57,7 +57,7 @@ public class Test2BSortedDocValuesOrds extends LuceneTestCase { .setCodec(TestUtil.getDefaultCodec())); Document doc = new Document(); - byte bytes[] = new byte[4]; + byte[] bytes = new byte[4]; BytesRef data = new BytesRef(bytes); SortedDocValuesField dvField = new SortedDocValuesField("dv", data); doc.add(dvField); diff --git a/lucene/core/src/test/org/apache/lucene/index/TestDocValuesIndexing.java b/lucene/core/src/test/org/apache/lucene/index/TestDocValuesIndexing.java index ed48a0754cf..1771faca801 100644 --- a/lucene/core/src/test/org/apache/lucene/index/TestDocValuesIndexing.java +++ b/lucene/core/src/test/org/apache/lucene/index/TestDocValuesIndexing.java @@ -361,7 +361,7 @@ public class TestDocValuesIndexing extends LuceneTestCase { iwriter.addDocument(doc); Document hugeDoc = new Document(); - byte bytes[] = new byte[100000]; + byte[] bytes = new byte[100000]; BytesRef b = newBytesRef(bytes); random().nextBytes(bytes); hugeDoc.add(new SortedDocValuesField("dv", b)); @@ -391,7 +391,7 @@ public class TestDocValuesIndexing extends LuceneTestCase { iwriter.addDocument(doc); Document hugeDoc = new Document(); - byte bytes[] = new byte[100000]; + byte[] bytes = new byte[100000]; BytesRef b = newBytesRef(bytes); random().nextBytes(bytes); hugeDoc.add(new SortedSetDocValuesField("dv", b)); diff --git a/lucene/core/src/test/org/apache/lucene/index/TestDuelingCodecs.java b/lucene/core/src/test/org/apache/lucene/index/TestDuelingCodecs.java index 18579e462bf..db8abfcd83e 100644 --- a/lucene/core/src/test/org/apache/lucene/index/TestDuelingCodecs.java +++ b/lucene/core/src/test/org/apache/lucene/index/TestDuelingCodecs.java @@ -108,7 +108,7 @@ public class TestDuelingCodecs extends LuceneTestCase { Document document = lineFileDocs.nextDoc(); // grab the title and add some SortedSet instances for fun String title = document.get("titleTokenized"); - String split[] = title.split("\\s+"); + String[] split = title.split("\\s+"); document.removeFields("sortedset"); for (String trash : split) { document.add(new SortedSetDocValuesField("sortedset", new BytesRef(trash))); diff --git a/lucene/core/src/test/org/apache/lucene/index/TestIndexFileDeleter.java b/lucene/core/src/test/org/apache/lucene/index/TestIndexFileDeleter.java index 4fcc3e67963..7c2cbb93282 100644 --- a/lucene/core/src/test/org/apache/lucene/index/TestIndexFileDeleter.java +++ b/lucene/core/src/test/org/apache/lucene/index/TestIndexFileDeleter.java @@ -112,7 +112,7 @@ public class TestIndexFileDeleter extends LuceneTestCase { // non-existent segment: copyFile(dir, "_0_1" + ext, "_188_1" + ext); - String cfsFiles0[] = + String[] cfsFiles0 = si0.getCodec() instanceof SimpleTextCodec ? new String[] {"_0.scf"} : new String[] {"_0.cfs", "_0.cfe"}; @@ -128,7 +128,7 @@ public class TestIndexFileDeleter extends LuceneTestCase { // TODO: assert is bogus (relies upon codec-specific filenames) assertTrue(slowFileExists(dir, "_3.fdt") || slowFileExists(dir, "_3.fld")); - String cfsFiles3[] = + String[] cfsFiles3 = si3.getCodec() instanceof SimpleTextCodec ? new String[] {"_3.scf"} : new String[] {"_3.cfs", "_3.cfe"}; @@ -136,7 +136,7 @@ public class TestIndexFileDeleter extends LuceneTestCase { assertTrue(!slowFileExists(dir, f)); } - String cfsFiles1[] = + String[] cfsFiles1 = si1.getCodec() instanceof SimpleTextCodec ? new String[] {"_1.scf"} : new String[] {"_1.cfs", "_1.cfe"}; diff --git a/lucene/core/src/test/org/apache/lucene/index/TestIndexWriter.java b/lucene/core/src/test/org/apache/lucene/index/TestIndexWriter.java index f18f82c7fc6..cc8d50fccc3 100644 --- a/lucene/core/src/test/org/apache/lucene/index/TestIndexWriter.java +++ b/lucene/core/src/test/org/apache/lucene/index/TestIndexWriter.java @@ -1480,7 +1480,7 @@ public class TestIndexWriter extends LuceneTestCase { // After rollback, IW should remove all files writer.rollback(); - String allFiles[] = dir.listAll(); + String[] allFiles = dir.listAll(); assertEquals( "no files should exist in the directory after rollback", origFiles.length + extraFileCount, diff --git a/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterExceptions.java b/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterExceptions.java index 3fb6bd89d36..fc2aca1fae5 100644 --- a/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterExceptions.java +++ b/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterExceptions.java @@ -1058,7 +1058,7 @@ public class TestIndexWriterExceptions extends LuceneTestCase { + "", failure.failOnCommit && (failure.failOnDeleteFile || failure.failOnSyncMetadata)); w.rollback(); - String files[] = dir.listAll(); + String[] files = dir.listAll(); assertTrue( files.length == fileCount || (files.length == fileCount + 1 @@ -1668,7 +1668,7 @@ public class TestIndexWriterExceptions extends LuceneTestCase { NullPointerException.class, () -> { // set to null value - byte v[] = null; + byte[] v = null; Field theField = new StoredField("foo", v); doc.add(theField); iw.addDocument(doc); @@ -1697,7 +1697,7 @@ public class TestIndexWriterExceptions extends LuceneTestCase { NullPointerException.class, () -> { // set to null value - byte v[] = null; + byte[] v = null; theField.setBytesValue(v); iw.addDocument(doc); }); diff --git a/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterMaxDocs.java b/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterMaxDocs.java index 7a68585fbb0..e4a54634944 100644 --- a/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterMaxDocs.java +++ b/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterMaxDocs.java @@ -325,7 +325,7 @@ public class TestIndexWriterMaxDocs extends LuceneTestCase { DirectoryReader ir = DirectoryReader.open(dir); DirectoryReader ir2 = DirectoryReader.open(dir2); - IndexReader subReaders[] = new IndexReader[copies + 1]; + IndexReader[] subReaders = new IndexReader[copies + 1]; Arrays.fill(subReaders, ir); subReaders[subReaders.length - 1] = ir2; @@ -364,7 +364,7 @@ public class TestIndexWriterMaxDocs extends LuceneTestCase { DirectoryReader ir = DirectoryReader.open(dir); DirectoryReader ir2 = DirectoryReader.open(dir2); - IndexReader subReaders[] = new IndexReader[copies + 1]; + IndexReader[] subReaders = new IndexReader[copies + 1]; Arrays.fill(subReaders, ir); subReaders[subReaders.length - 1] = ir2; @@ -400,7 +400,7 @@ public class TestIndexWriterMaxDocs extends LuceneTestCase { w = new IndexWriter(dir2, new IndexWriterConfig(null)); w.commit(); // don't confuse checkindex dir2.setMaxSizeInBytes(dir2.sizeInBytes() + 65536); // 64KB - Directory dirs[] = new Directory[1 + (IndexWriter.MAX_DOCS / 100000)]; + Directory[] dirs = new Directory[1 + (IndexWriter.MAX_DOCS / 100000)]; for (int i = 0; i < dirs.length; i++) { // bypass iw check for duplicate dirs dirs[i] = new FilterDirectory(dir) {}; @@ -453,7 +453,7 @@ public class TestIndexWriterMaxDocs extends LuceneTestCase { IndexReader r = DirectoryReader.open(dir); CodecReader segReader = (CodecReader) r.leaves().get(0).reader(); - CodecReader readers[] = new CodecReader[1 + (IndexWriter.MAX_DOCS / 100000)]; + CodecReader[] readers = new CodecReader[1 + (IndexWriter.MAX_DOCS / 100000)]; for (int i = 0; i < readers.length; i++) { readers[i] = segReader; } diff --git a/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterOnDiskFull.java b/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterOnDiskFull.java index 23ad6ed028b..d54f08fdcef 100644 --- a/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterOnDiskFull.java +++ b/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterOnDiskFull.java @@ -328,7 +328,7 @@ public class TestIndexWriterOnDiskFull extends LuceneTestCase { } writer.forceMerge(1); } else if (1 == method) { - DirectoryReader readers[] = new DirectoryReader[dirs.length]; + DirectoryReader[] readers = new DirectoryReader[dirs.length]; for (int i = 0; i < dirs.length; i++) { readers[i] = DirectoryReader.open(dirs[i]); } diff --git a/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterThreadsToSegments.java b/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterThreadsToSegments.java index da15e8452bd..78c3f1b780a 100644 --- a/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterThreadsToSegments.java +++ b/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterThreadsToSegments.java @@ -359,7 +359,7 @@ public class TestIndexWriterThreadsToSegments extends LuceneTestCase { String segName = IndexFileNames.parseSegmentName(fileName); if (segSeen.contains(segName) == false) { segSeen.add(segName); - byte id[] = readSegmentInfoID(dir, fileName); + byte[] id = readSegmentInfoID(dir, fileName); SegmentInfo si = TestUtil.getDefaultCodec() .segmentInfoFormat() @@ -394,7 +394,7 @@ public class TestIndexWriterThreadsToSegments extends LuceneTestCase { in.readInt(); // magic in.readString(); // codec name in.readInt(); // version - byte id[] = new byte[StringHelper.ID_LENGTH]; + byte[] id = new byte[StringHelper.ID_LENGTH]; in.readBytes(id, 0, id.length); return id; } diff --git a/lucene/core/src/test/org/apache/lucene/index/TestPayloads.java b/lucene/core/src/test/org/apache/lucene/index/TestPayloads.java index ec25b96e329..80ff34536d4 100644 --- a/lucene/core/src/test/org/apache/lucene/index/TestPayloads.java +++ b/lucene/core/src/test/org/apache/lucene/index/TestPayloads.java @@ -293,7 +293,7 @@ public class TestPayloads extends LuceneTestCase { private void generateRandomData(byte[] data) { // this test needs the random data to be valid unicode String s = TestUtil.randomFixedByteLengthUnicodeString(random(), data.length); - byte b[] = s.getBytes(utf8); + byte[] b = s.getBytes(utf8); assert b.length == data.length; System.arraycopy(b, 0, data, 0, b.length); } diff --git a/lucene/core/src/test/org/apache/lucene/index/TestPostingsOffsets.java b/lucene/core/src/test/org/apache/lucene/index/TestPostingsOffsets.java index 2187e425386..f38168762d5 100644 --- a/lucene/core/src/test/org/apache/lucene/index/TestPostingsOffsets.java +++ b/lucene/core/src/test/org/apache/lucene/index/TestPostingsOffsets.java @@ -149,7 +149,7 @@ public class TestPostingsOffsets extends LuceneTestCase { IndexReader reader = w.getReader(); w.close(); - String terms[] = { + String[] terms = { "one", "two", "three", "four", "five", "six", "seven", "eight", "nine", "ten", "hundred" }; diff --git a/lucene/core/src/test/org/apache/lucene/index/TestSameTokenSamePosition.java b/lucene/core/src/test/org/apache/lucene/index/TestSameTokenSamePosition.java index 7a1f27e6d70..d888cf5ec98 100644 --- a/lucene/core/src/test/org/apache/lucene/index/TestSameTokenSamePosition.java +++ b/lucene/core/src/test/org/apache/lucene/index/TestSameTokenSamePosition.java @@ -63,10 +63,10 @@ final class BugReproTokenStream extends TokenStream { addAttribute(PositionIncrementAttribute.class); private static final int TOKEN_COUNT = 4; private int nextTokenIndex = 0; - private final String terms[] = new String[] {"six", "six", "drunken", "drunken"}; - private final int starts[] = new int[] {0, 0, 4, 4}; - private final int ends[] = new int[] {3, 3, 11, 11}; - private final int incs[] = new int[] {1, 0, 1, 0}; + private final String[] terms = new String[] {"six", "six", "drunken", "drunken"}; + private final int[] starts = new int[] {0, 0, 4, 4}; + private final int[] ends = new int[] {3, 3, 11, 11}; + private final int[] incs = new int[] {1, 0, 1, 0}; @Override public boolean incrementToken() { diff --git a/lucene/core/src/test/org/apache/lucene/index/TestSegmentInfos.java b/lucene/core/src/test/org/apache/lucene/index/TestSegmentInfos.java index b1665eaa74c..91e537a8a8c 100644 --- a/lucene/core/src/test/org/apache/lucene/index/TestSegmentInfos.java +++ b/lucene/core/src/test/org/apache/lucene/index/TestSegmentInfos.java @@ -64,7 +64,7 @@ public class TestSegmentInfos extends LuceneTestCase { public void testVersionsOneSegment() throws IOException { BaseDirectoryWrapper dir = newDirectory(); dir.setCheckIndexOnClose(false); - byte id[] = StringHelper.randomId(); + byte[] id = StringHelper.randomId(); Codec codec = Codec.getDefault(); SegmentInfos sis = new SegmentInfos(Version.LATEST.major); @@ -98,7 +98,7 @@ public class TestSegmentInfos extends LuceneTestCase { public void testVersionsTwoSegments() throws IOException { BaseDirectoryWrapper dir = newDirectory(); dir.setCheckIndexOnClose(false); - byte id[] = StringHelper.randomId(); + byte[] id = StringHelper.randomId(); Codec codec = Codec.getDefault(); SegmentInfos sis = new SegmentInfos(Version.LATEST.major); @@ -266,7 +266,7 @@ public class TestSegmentInfos extends LuceneTestCase { public void testIDChangesOnAdvance() throws IOException { try (BaseDirectoryWrapper dir = newDirectory()) { dir.setCheckIndexOnClose(false); - byte id[] = StringHelper.randomId(); + byte[] id = StringHelper.randomId(); SegmentInfo info = new SegmentInfo( dir, @@ -309,7 +309,7 @@ public class TestSegmentInfos extends LuceneTestCase { public void testBitFlippedTriggersCorruptIndexException() throws IOException { BaseDirectoryWrapper dir = newDirectory(); dir.setCheckIndexOnClose(false); - byte id[] = StringHelper.randomId(); + byte[] id = StringHelper.randomId(); Codec codec = Codec.getDefault(); SegmentInfos sis = new SegmentInfos(Version.LATEST.major); diff --git a/lucene/core/src/test/org/apache/lucene/search/TestAutomatonQuery.java b/lucene/core/src/test/org/apache/lucene/search/TestAutomatonQuery.java index 393ce928a31..a4e84c31aff 100644 --- a/lucene/core/src/test/org/apache/lucene/search/TestAutomatonQuery.java +++ b/lucene/core/src/test/org/apache/lucene/search/TestAutomatonQuery.java @@ -194,7 +194,7 @@ public class TestAutomatonQuery extends LuceneTestCase { } public void testHashCodeWithThreads() throws Exception { - final AutomatonQuery queries[] = new AutomatonQuery[atLeast(100)]; + final AutomatonQuery[] queries = new AutomatonQuery[atLeast(100)]; for (int i = 0; i < queries.length; i++) { queries[i] = new AutomatonQuery( diff --git a/lucene/core/src/test/org/apache/lucene/search/TestBooleanQueryVisitSubscorers.java b/lucene/core/src/test/org/apache/lucene/search/TestBooleanQueryVisitSubscorers.java index 71e06f0535c..f2290907775 100644 --- a/lucene/core/src/test/org/apache/lucene/search/TestBooleanQueryVisitSubscorers.java +++ b/lucene/core/src/test/org/apache/lucene/search/TestBooleanQueryVisitSubscorers.java @@ -264,7 +264,7 @@ public class TestBooleanQueryVisitSubscorers extends LuceneTestCase { private static class ScorerSummarizingCollector implements Collector { private final List summaries = new ArrayList<>(); - private int numHits[] = new int[1]; + private int[] numHits = new int[1]; public int getNumHits() { return numHits[0]; diff --git a/lucene/core/src/test/org/apache/lucene/search/TestDoubleValuesSource.java b/lucene/core/src/test/org/apache/lucene/search/TestDoubleValuesSource.java index 73535777f56..42b8b0bd16d 100644 --- a/lucene/core/src/test/org/apache/lucene/search/TestDoubleValuesSource.java +++ b/lucene/core/src/test/org/apache/lucene/search/TestDoubleValuesSource.java @@ -162,7 +162,7 @@ public class TestDoubleValuesSource extends LuceneTestCase { Sort randomSort() throws Exception { boolean reversed = random().nextBoolean(); - SortField fields[] = + SortField[] fields = new SortField[] { new SortField("int", SortField.Type.INT, reversed), new SortField("long", SortField.Type.LONG, reversed), @@ -177,8 +177,8 @@ public class TestDoubleValuesSource extends LuceneTestCase { // Take a Sort, and replace any field sorts with Sortables Sort convertSortToSortable(Sort sort) { - SortField original[] = sort.getSort(); - SortField mutated[] = new SortField[original.length]; + SortField[] original = sort.getSort(); + SortField[] mutated = new SortField[original.length]; for (int i = 0; i < mutated.length; i++) { if (random().nextInt(3) > 0) { SortField s = original[i]; diff --git a/lucene/core/src/test/org/apache/lucene/search/TestFuzzyQuery.java b/lucene/core/src/test/org/apache/lucene/search/TestFuzzyQuery.java index ad5ba73a4a4..7be05292bce 100644 --- a/lucene/core/src/test/org/apache/lucene/search/TestFuzzyQuery.java +++ b/lucene/core/src/test/org/apache/lucene/search/TestFuzzyQuery.java @@ -318,7 +318,7 @@ public class TestFuzzyQuery extends LuceneTestCase { searcher.setSimilarity( new ClassicSimilarity()); // avoid randomisation of similarity algo by test framework writer.close(); - String searchTerms[] = {"smith", "smythe", "smdssasd"}; + String[] searchTerms = {"smith", "smythe", "smdssasd"}; for (String searchTerm : searchTerms) { FuzzyQuery query = new FuzzyQuery(new Term("field", searchTerm), 2, 1); ScoreDoc[] hits = searcher.search(query, 1000).scoreDocs; @@ -757,7 +757,7 @@ public class TestFuzzyQuery extends LuceneTestCase { IntsRef targetPoints; IntsRef otherPoints; int n; - int d[][]; // cost array + int[][] d; // cost array // NOTE: if we cared, we could 3*m space instead of m*n space, similar to // what LevenshteinDistance does, except cycling thru a ring of three diff --git a/lucene/core/src/test/org/apache/lucene/search/TestIndexSearcher.java b/lucene/core/src/test/org/apache/lucene/search/TestIndexSearcher.java index 4de410f8691..bfd9c2c1fc6 100644 --- a/lucene/core/src/test/org/apache/lucene/search/TestIndexSearcher.java +++ b/lucene/core/src/test/org/apache/lucene/search/TestIndexSearcher.java @@ -94,11 +94,11 @@ public class TestIndexSearcher extends LuceneTestCase { new LinkedBlockingQueue(), new NamedThreadFactory("TestIndexSearcher")); - IndexSearcher searchers[] = + IndexSearcher[] searchers = new IndexSearcher[] {new IndexSearcher(reader), new IndexSearcher(reader, service)}; - Query queries[] = new Query[] {new MatchAllDocsQuery(), new TermQuery(new Term("field", "1"))}; - Sort sorts[] = new Sort[] {null, new Sort(new SortField("field2", SortField.Type.STRING))}; - ScoreDoc afters[] = + Query[] queries = new Query[] {new MatchAllDocsQuery(), new TermQuery(new Term("field", "1"))}; + Sort[] sorts = new Sort[] {null, new Sort(new SortField("field2", SortField.Type.STRING))}; + ScoreDoc[] afters = new ScoreDoc[] {null, new FieldDoc(0, 0f, new Object[] {newBytesRef("boo!")})}; for (IndexSearcher searcher : searchers) { @@ -419,9 +419,9 @@ public class TestIndexSearcher extends LuceneTestCase { IndexSearcher searcher = new IndexSearcher(reader.getContext(), service, sliceExecutor); - Query queries[] = new Query[] {new MatchAllDocsQuery(), new TermQuery(new Term("field", "1"))}; - Sort sorts[] = new Sort[] {null, new Sort(new SortField("field2", SortField.Type.STRING))}; - ScoreDoc afters[] = + Query[] queries = new Query[] {new MatchAllDocsQuery(), new TermQuery(new Term("field", "1"))}; + Sort[] sorts = new Sort[] {null, new Sort(new SortField("field2", SortField.Type.STRING))}; + ScoreDoc[] afters = new ScoreDoc[] {null, new FieldDoc(0, 0f, new Object[] {newBytesRef("boo!")})}; for (ScoreDoc after : afters) { diff --git a/lucene/core/src/test/org/apache/lucene/search/TestLongValuesSource.java b/lucene/core/src/test/org/apache/lucene/search/TestLongValuesSource.java index c2c0685260e..f90302a494c 100644 --- a/lucene/core/src/test/org/apache/lucene/search/TestLongValuesSource.java +++ b/lucene/core/src/test/org/apache/lucene/search/TestLongValuesSource.java @@ -139,7 +139,7 @@ public class TestLongValuesSource extends LuceneTestCase { Sort randomSort() throws Exception { boolean reversed = random().nextBoolean(); - SortField fields[] = + SortField[] fields = new SortField[] { new SortField("int", SortField.Type.INT, reversed), new SortField("long", SortField.Type.LONG, reversed) @@ -151,8 +151,8 @@ public class TestLongValuesSource extends LuceneTestCase { // Take a Sort, and replace any field sorts with Sortables Sort convertSortToSortable(Sort sort) { - SortField original[] = sort.getSort(); - SortField mutated[] = new SortField[original.length]; + SortField[] original = sort.getSort(); + SortField[] mutated = new SortField[original.length]; for (int i = 0; i < mutated.length; i++) { if (random().nextInt(3) > 0) { SortField s = original[i]; diff --git a/lucene/core/src/test/org/apache/lucene/search/TestMinShouldMatch2.java b/lucene/core/src/test/org/apache/lucene/search/TestMinShouldMatch2.java index 45a8e803658..918dca79800 100644 --- a/lucene/core/src/test/org/apache/lucene/search/TestMinShouldMatch2.java +++ b/lucene/core/src/test/org/apache/lucene/search/TestMinShouldMatch2.java @@ -49,10 +49,10 @@ public class TestMinShouldMatch2 extends LuceneTestCase { static LeafReader reader; static IndexSearcher searcher; - static final String alwaysTerms[] = {"a"}; - static final String commonTerms[] = {"b", "c", "d"}; - static final String mediumTerms[] = {"e", "f", "g"}; - static final String rareTerms[] = { + static final String[] alwaysTerms = {"a"}; + static final String[] commonTerms = {"b", "c", "d"}; + static final String[] mediumTerms = {"e", "f", "g"}; + static final String[] rareTerms = { "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z" }; @@ -101,7 +101,7 @@ public class TestMinShouldMatch2 extends LuceneTestCase { dir = null; } - private static void addSome(Document doc, String values[]) { + private static void addSome(Document doc, String[] values) { List list = Arrays.asList(values); Collections.shuffle(list, random()); int howMany = TestUtil.nextInt(random(), 1, list.size()); @@ -111,7 +111,7 @@ public class TestMinShouldMatch2 extends LuceneTestCase { } } - private Scorer scorer(String values[], int minShouldMatch, Mode mode) throws Exception { + private Scorer scorer(String[] values, int minShouldMatch, Mode mode) throws Exception { BooleanQuery.Builder bq = new BooleanQuery.Builder(); for (String value : values) { bq.add(new TermQuery(new Term("field", value)), BooleanClause.Occur.SHOULD); @@ -250,7 +250,7 @@ public class TestMinShouldMatch2 extends LuceneTestCase { termsList.addAll(Arrays.asList(commonTerms)); termsList.addAll(Arrays.asList(mediumTerms)); termsList.addAll(Arrays.asList(rareTerms)); - String terms[] = termsList.toArray(new String[0]); + String[] terms = termsList.toArray(new String[0]); for (int minNrShouldMatch = 1; minNrShouldMatch <= terms.length; minNrShouldMatch++) { Scorer expected = scorer(terms, minNrShouldMatch, Mode.DOC_VALUES); @@ -269,7 +269,7 @@ public class TestMinShouldMatch2 extends LuceneTestCase { termsList.addAll(Arrays.asList(commonTerms)); termsList.addAll(Arrays.asList(mediumTerms)); termsList.addAll(Arrays.asList(rareTerms)); - String terms[] = termsList.toArray(new String[0]); + String[] terms = termsList.toArray(new String[0]); for (int amount = 25; amount < 200; amount += 25) { for (int minNrShouldMatch = 1; minNrShouldMatch <= terms.length; minNrShouldMatch++) { @@ -292,7 +292,7 @@ public class TestMinShouldMatch2 extends LuceneTestCase { termsList.addAll(Arrays.asList(rareTerms)); Collections.shuffle(termsList, random()); for (int numTerms = 2; numTerms <= termsList.size(); numTerms++) { - String terms[] = termsList.subList(0, numTerms).toArray(new String[0]); + String[] terms = termsList.subList(0, numTerms).toArray(new String[0]); for (int minNrShouldMatch = 1; minNrShouldMatch <= terms.length; minNrShouldMatch++) { Scorer expected = scorer(terms, minNrShouldMatch, Mode.DOC_VALUES); Scorer actual = scorer(terms, minNrShouldMatch, Mode.SCORER); @@ -316,7 +316,7 @@ public class TestMinShouldMatch2 extends LuceneTestCase { for (int amount = 25; amount < 200; amount += 25) { for (int numTerms = 2; numTerms <= termsList.size(); numTerms++) { - String terms[] = termsList.subList(0, numTerms).toArray(new String[0]); + String[] terms = termsList.subList(0, numTerms).toArray(new String[0]); for (int minNrShouldMatch = 1; minNrShouldMatch <= terms.length; minNrShouldMatch++) { Scorer expected = scorer(terms, minNrShouldMatch, Mode.DOC_VALUES); Scorer actual = scorer(terms, minNrShouldMatch, Mode.SCORER); diff --git a/lucene/core/src/test/org/apache/lucene/search/TestMultiPhraseQuery.java b/lucene/core/src/test/org/apache/lucene/search/TestMultiPhraseQuery.java index ab5e424e108..1e7a238394b 100644 --- a/lucene/core/src/test/org/apache/lucene/search/TestMultiPhraseQuery.java +++ b/lucene/core/src/test/org/apache/lucene/search/TestMultiPhraseQuery.java @@ -505,7 +505,7 @@ public class TestMultiPhraseQuery extends LuceneTestCase { /** MPQ Combined AND OR Mode - Manually creating a multiple phrase query */ public void testZeroPosIncrSloppyMpqAndOrMatch() throws IOException { final MultiPhraseQuery.Builder mpqb = new MultiPhraseQuery.Builder(); - for (Token tap[] : INCR_0_QUERY_TOKENS_AND_OR_MATCH) { + for (Token[] tap : INCR_0_QUERY_TOKENS_AND_OR_MATCH) { Term[] terms = tapTerms(tap); final int pos = tap[0].getPositionIncrement() - 1; mpqb.add(terms, pos); // AND logic in pos, OR across lines @@ -520,7 +520,7 @@ public class TestMultiPhraseQuery extends LuceneTestCase { /** MPQ Combined AND OR Mode - Manually creating a multiple phrase query - with no match */ public void testZeroPosIncrSloppyMpqAndOrNoMatch() throws IOException { final MultiPhraseQuery.Builder mpqb = new MultiPhraseQuery.Builder(); - for (Token tap[] : INCR_0_QUERY_TOKENS_AND_OR_NO_MATCHN) { + for (Token[] tap : INCR_0_QUERY_TOKENS_AND_OR_NO_MATCHN) { Term[] terms = tapTerms(tap); final int pos = tap[0].getPositionIncrement() - 1; mpqb.add(terms, pos); // AND logic in pos, OR across lines diff --git a/lucene/core/src/test/org/apache/lucene/search/TestMultiSliceMerge.java b/lucene/core/src/test/org/apache/lucene/search/TestMultiSliceMerge.java index a8199c51b07..495604f8a8c 100644 --- a/lucene/core/src/test/org/apache/lucene/search/TestMultiSliceMerge.java +++ b/lucene/core/src/test/org/apache/lucene/search/TestMultiSliceMerge.java @@ -88,7 +88,7 @@ public class TestMultiSliceMerge extends LuceneTestCase { Executor executor1 = runnable -> runnable.run(); Executor executor2 = runnable -> runnable.run(); - IndexSearcher searchers[] = + IndexSearcher[] searchers = new IndexSearcher[] { new IndexSearcher(reader1, executor1), new IndexSearcher(reader2, executor2) }; @@ -105,7 +105,7 @@ public class TestMultiSliceMerge extends LuceneTestCase { Executor executor1 = runnable -> runnable.run(); Executor executor2 = runnable -> runnable.run(); - IndexSearcher searchers[] = + IndexSearcher[] searchers = new IndexSearcher[] { new IndexSearcher(reader1, executor1), new IndexSearcher(reader2, executor2) }; diff --git a/lucene/core/src/test/org/apache/lucene/search/TestPointQueries.java b/lucene/core/src/test/org/apache/lucene/search/TestPointQueries.java index 1c1d0a3e34a..723f6772e05 100644 --- a/lucene/core/src/test/org/apache/lucene/search/TestPointQueries.java +++ b/lucene/core/src/test/org/apache/lucene/search/TestPointQueries.java @@ -252,7 +252,7 @@ public class TestPointQueries extends LuceneTestCase { assertEquals(1, s.count(DoublePoint.newExactQuery("point", Double.NaN))); // set query - double set[] = + double[] set = new double[] { Double.MAX_VALUE, Double.NaN, @@ -325,7 +325,7 @@ public class TestPointQueries extends LuceneTestCase { assertEquals(1, s.count(FloatPoint.newExactQuery("point", Float.NaN))); // set query - float set[] = + float[] set = new float[] { Float.MAX_VALUE, Float.NaN, diff --git a/lucene/core/src/test/org/apache/lucene/search/TestSloppyPhraseQuery2.java b/lucene/core/src/test/org/apache/lucene/search/TestSloppyPhraseQuery2.java index 5a0472325a0..cfa590174d7 100644 --- a/lucene/core/src/test/org/apache/lucene/search/TestSloppyPhraseQuery2.java +++ b/lucene/core/src/test/org/apache/lucene/search/TestSloppyPhraseQuery2.java @@ -156,7 +156,7 @@ public class TestSloppyPhraseQuery2 extends SearchEquivalenceTestBase { int position = 0; for (int i = 0; i < length; i++) { int depth = TestUtil.nextInt(random, 1, 3); - Term terms[] = new Term[depth]; + Term[] terms = new Term[depth]; for (int j = 0; j < depth; j++) { terms[j] = new Term("field", "" + (char) TestUtil.nextInt(random, 'a', 'z')); } diff --git a/lucene/core/src/test/org/apache/lucene/search/TestTimeLimitingCollector.java b/lucene/core/src/test/org/apache/lucene/search/TestTimeLimitingCollector.java index 1e843a97171..43827ee5afc 100644 --- a/lucene/core/src/test/org/apache/lucene/search/TestTimeLimitingCollector.java +++ b/lucene/core/src/test/org/apache/lucene/search/TestTimeLimitingCollector.java @@ -68,7 +68,7 @@ public class TestTimeLimitingCollector extends LuceneTestCase { counter = Counter.newCounter(true); counterThread = new TimerThread(counter); counterThread.start(); - final String docText[] = { + final String[] docText = { "docThatNeverMatchesSoWeCanRequireLastDocCollectedToBeGreaterThanZero", "one blah three", "one foo three multiOne", diff --git a/lucene/core/src/test/org/apache/lucene/search/TestWildcard.java b/lucene/core/src/test/org/apache/lucene/search/TestWildcard.java index b8918268d52..4c5c2db3a11 100644 --- a/lucene/core/src/test/org/apache/lucene/search/TestWildcard.java +++ b/lucene/core/src/test/org/apache/lucene/search/TestWildcard.java @@ -238,12 +238,12 @@ public class TestWildcard extends LuceneTestCase { */ public void testParsingAndSearching() throws Exception { String field = "content"; - String docs[] = { + String[] docs = { "\\ abcdefg1", "\\79 hijklmn1", "\\\\ opqrstu1", }; // queries that should find all docs - Query matchAll[] = { + Query[] matchAll = { new WildcardQuery(new Term(field, "*")), new WildcardQuery(new Term(field, "*1")), new WildcardQuery(new Term(field, "**1")), @@ -256,7 +256,7 @@ public class TestWildcard extends LuceneTestCase { }; // queries that should find no docs - Query matchNone[] = { + Query[] matchNone = { new WildcardQuery(new Term(field, "a*h")), new WildcardQuery(new Term(field, "a?h")), new WildcardQuery(new Term(field, "*a*h")), @@ -264,7 +264,7 @@ public class TestWildcard extends LuceneTestCase { new WildcardQuery(new Term(field, "a?")) }; - PrefixQuery matchOneDocPrefix[][] = { + PrefixQuery[][] matchOneDocPrefix = { { new PrefixQuery(new Term(field, "a")), new PrefixQuery(new Term(field, "ab")), @@ -284,7 +284,7 @@ public class TestWildcard extends LuceneTestCase { }, // these should find only doc 2 }; - WildcardQuery matchOneDocWild[][] = { + WildcardQuery[][] matchOneDocWild = { { new WildcardQuery(new Term(field, "*a*")), // these should find only doc 0 new WildcardQuery(new Term(field, "*ab*")), diff --git a/lucene/core/src/test/org/apache/lucene/search/TestXYPointDistanceSort.java b/lucene/core/src/test/org/apache/lucene/search/TestXYPointDistanceSort.java index 62b560697b0..3586e50648a 100644 --- a/lucene/core/src/test/org/apache/lucene/search/TestXYPointDistanceSort.java +++ b/lucene/core/src/test/org/apache/lucene/search/TestXYPointDistanceSort.java @@ -213,7 +213,7 @@ public class TestXYPointDistanceSort extends LuceneTestCase { float y = ShapeTestUtil.nextFloat(random()); double missingValue = Double.POSITIVE_INFINITY; - Result expected[] = new Result[reader.maxDoc()]; + Result[] expected = new Result[reader.maxDoc()]; for (int doc = 0; doc < reader.maxDoc(); doc++) { Document targetDoc = reader.document(doc); diff --git a/lucene/core/src/test/org/apache/lucene/store/TestBufferedChecksum.java b/lucene/core/src/test/org/apache/lucene/store/TestBufferedChecksum.java index 9044f824003..12f296feed6 100644 --- a/lucene/core/src/test/org/apache/lucene/store/TestBufferedChecksum.java +++ b/lucene/core/src/test/org/apache/lucene/store/TestBufferedChecksum.java @@ -39,7 +39,7 @@ public class TestBufferedChecksum extends LuceneTestCase { case 0: // update(byte[], int, int) int length = random().nextInt(1024); - byte bytes[] = new byte[length]; + byte[] bytes = new byte[length]; random().nextBytes(bytes); c1.update(bytes, 0, bytes.length); c2.update(bytes, 0, bytes.length); diff --git a/lucene/core/src/test/org/apache/lucene/store/TestMmapDirectory.java b/lucene/core/src/test/org/apache/lucene/store/TestMmapDirectory.java index ce96116873c..1ae552b7bce 100644 --- a/lucene/core/src/test/org/apache/lucene/store/TestMmapDirectory.java +++ b/lucene/core/src/test/org/apache/lucene/store/TestMmapDirectory.java @@ -53,7 +53,7 @@ public class TestMmapDirectory extends BaseDirectoryTestCase { out.close(); IndexInput in = dir.openInput("test", IOContext.DEFAULT); IndexInput clone = in.clone(); - final byte accum[] = new byte[32 * 1024 * 1024]; + final byte[] accum = new byte[32 * 1024 * 1024]; final CountDownLatch shotgun = new CountDownLatch(1); Thread t1 = new Thread( diff --git a/lucene/core/src/test/org/apache/lucene/store/TestMultiMMap.java b/lucene/core/src/test/org/apache/lucene/store/TestMultiMMap.java index 6599ba91fc7..59d2ec7acd5 100644 --- a/lucene/core/src/test/org/apache/lucene/store/TestMultiMMap.java +++ b/lucene/core/src/test/org/apache/lucene/store/TestMultiMMap.java @@ -198,12 +198,12 @@ public class TestMultiMMap extends BaseDirectoryTestCase { for (int i = 0; i < 17; i++) { MMapDirectory mmapDir = new MMapDirectory(createTempDir("testSeekEnd"), 1 << i); IndexOutput io = mmapDir.createOutput("bytes", newIOContext(random())); - byte bytes[] = new byte[1 << i]; + byte[] bytes = new byte[1 << i]; random().nextBytes(bytes); io.writeBytes(bytes, bytes.length); io.close(); IndexInput ii = mmapDir.openInput("bytes", newIOContext(random())); - byte actual[] = new byte[1 << i]; + byte[] actual = new byte[1 << i]; ii.readBytes(actual, 0, actual.length); assertEquals(new BytesRef(bytes), new BytesRef(actual)); ii.seek(1 << i); @@ -216,13 +216,13 @@ public class TestMultiMMap extends BaseDirectoryTestCase { for (int i = 0; i < 17; i++) { MMapDirectory mmapDir = new MMapDirectory(createTempDir("testSeekSliceEnd"), 1 << i); IndexOutput io = mmapDir.createOutput("bytes", newIOContext(random())); - byte bytes[] = new byte[1 << i]; + byte[] bytes = new byte[1 << i]; random().nextBytes(bytes); io.writeBytes(bytes, bytes.length); io.close(); IndexInput slicer = mmapDir.openInput("bytes", newIOContext(random())); IndexInput ii = slicer.slice("full slice", 0, bytes.length); - byte actual[] = new byte[1 << i]; + byte[] actual = new byte[1 << i]; ii.readBytes(actual, 0, actual.length); assertEquals(new BytesRef(bytes), new BytesRef(actual)); ii.seek(1 << i); @@ -237,17 +237,17 @@ public class TestMultiMMap extends BaseDirectoryTestCase { for (int i = 0; i < numIters; i++) { MMapDirectory mmapDir = new MMapDirectory(createTempDir("testSeeking"), 1 << i); IndexOutput io = mmapDir.createOutput("bytes", newIOContext(random())); - byte bytes[] = new byte[1 << (i + 1)]; // make sure we switch buffers + byte[] bytes = new byte[1 << (i + 1)]; // make sure we switch buffers random().nextBytes(bytes); io.writeBytes(bytes, bytes.length); io.close(); IndexInput ii = mmapDir.openInput("bytes", newIOContext(random())); - byte actual[] = new byte[1 << (i + 1)]; // first read all bytes + byte[] actual = new byte[1 << (i + 1)]; // first read all bytes ii.readBytes(actual, 0, actual.length); assertEquals(new BytesRef(bytes), new BytesRef(actual)); for (int sliceStart = 0; sliceStart < bytes.length; sliceStart++) { for (int sliceLength = 0; sliceLength < bytes.length - sliceStart; sliceLength++) { - byte slice[] = new byte[sliceLength]; + byte[] slice = new byte[sliceLength]; ii.seek(sliceStart); ii.readBytes(slice, 0, slice.length); assertEquals(new BytesRef(bytes, sliceStart, sliceLength), new BytesRef(slice)); @@ -265,12 +265,12 @@ public class TestMultiMMap extends BaseDirectoryTestCase { for (int i = 0; i < numIters; i++) { MMapDirectory mmapDir = new MMapDirectory(createTempDir("testSlicedSeeking"), 1 << i); IndexOutput io = mmapDir.createOutput("bytes", newIOContext(random())); - byte bytes[] = new byte[1 << (i + 1)]; // make sure we switch buffers + byte[] bytes = new byte[1 << (i + 1)]; // make sure we switch buffers random().nextBytes(bytes); io.writeBytes(bytes, bytes.length); io.close(); IndexInput ii = mmapDir.openInput("bytes", newIOContext(random())); - byte actual[] = new byte[1 << (i + 1)]; // first read all bytes + byte[] actual = new byte[1 << (i + 1)]; // first read all bytes ii.readBytes(actual, 0, actual.length); ii.close(); assertEquals(new BytesRef(bytes), new BytesRef(actual)); @@ -291,12 +291,12 @@ public class TestMultiMMap extends BaseDirectoryTestCase { for (int i = 0; i < upto; i++) { MMapDirectory mmapDir = new MMapDirectory(createTempDir("testSliceOfSlice"), 1 << i); IndexOutput io = mmapDir.createOutput("bytes", newIOContext(random())); - byte bytes[] = new byte[1 << (i + 1)]; // make sure we switch buffers + byte[] bytes = new byte[1 << (i + 1)]; // make sure we switch buffers random().nextBytes(bytes); io.writeBytes(bytes, bytes.length); io.close(); IndexInput ii = mmapDir.openInput("bytes", newIOContext(random())); - byte actual[] = new byte[1 << (i + 1)]; // first read all bytes + byte[] actual = new byte[1 << (i + 1)]; // first read all bytes ii.readBytes(actual, 0, actual.length); ii.close(); assertEquals(new BytesRef(bytes), new BytesRef(actual)); @@ -319,7 +319,7 @@ public class TestMultiMMap extends BaseDirectoryTestCase { private void assertSlice( byte[] bytes, IndexInput slicer, int outerSliceStart, int sliceStart, int sliceLength) throws IOException { - byte slice[] = new byte[sliceLength]; + byte[] slice = new byte[sliceLength]; IndexInput input = slicer.slice("bytesSlice", sliceStart, slice.length); input.readBytes(slice, 0, slice.length); input.close(); @@ -375,12 +375,12 @@ public class TestMultiMMap extends BaseDirectoryTestCase { MMapDirectory mmapDir = new MMapDirectory(createTempDir("testImplementations"), chunkSize); IndexOutput io = mmapDir.createOutput("bytes", newIOContext(random())); int size = random().nextInt(chunkSize * 2) + 3; // add some buffer of 3 for slice tests - byte bytes[] = new byte[size]; + byte[] bytes = new byte[size]; random().nextBytes(bytes); io.writeBytes(bytes, bytes.length); io.close(); IndexInput ii = mmapDir.openInput("bytes", newIOContext(random())); - byte actual[] = new byte[size]; // first read all bytes + byte[] actual = new byte[size]; // first read all bytes ii.readBytes(actual, 0, actual.length); assertEquals(new BytesRef(bytes), new BytesRef(actual)); // reinit: diff --git a/lucene/core/src/test/org/apache/lucene/util/TestBytesRef.java b/lucene/core/src/test/org/apache/lucene/util/TestBytesRef.java index 4c13ec2c6ca..004f5581738 100644 --- a/lucene/core/src/test/org/apache/lucene/util/TestBytesRef.java +++ b/lucene/core/src/test/org/apache/lucene/util/TestBytesRef.java @@ -25,7 +25,7 @@ public class TestBytesRef extends LuceneTestCase { } public void testFromBytes() { - byte bytes[] = new byte[] {(byte) 'a', (byte) 'b', (byte) 'c', (byte) 'd'}; + byte[] bytes = new byte[] {(byte) 'a', (byte) 'b', (byte) 'c', (byte) 'd'}; BytesRef b = new BytesRef(bytes); assertEquals(bytes, b.bytes); assertEquals(0, b.offset); diff --git a/lucene/core/src/test/org/apache/lucene/util/TestCharsRef.java b/lucene/core/src/test/org/apache/lucene/util/TestCharsRef.java index 23aaa1719e7..673c7145871 100644 --- a/lucene/core/src/test/org/apache/lucene/util/TestCharsRef.java +++ b/lucene/core/src/test/org/apache/lucene/util/TestCharsRef.java @@ -23,8 +23,8 @@ public class TestCharsRef extends LuceneTestCase { @SuppressWarnings("deprecation") public void testUTF16InUTF8Order() { final int numStrings = atLeast(1000); - BytesRef utf8[] = new BytesRef[numStrings]; - CharsRef utf16[] = new CharsRef[numStrings]; + BytesRef[] utf8 = new BytesRef[numStrings]; + CharsRef[] utf16 = new CharsRef[numStrings]; for (int i = 0; i < numStrings; i++) { String s = TestUtil.randomUnicodeString(random()); @@ -90,7 +90,7 @@ public class TestCharsRef extends LuceneTestCase { // LUCENE-3590: fix off-by-one in subsequence, and fully obey interface // LUCENE-4671: fix subSequence public void testCharSequenceSubSequence() { - CharSequence sequences[] = { + CharSequence[] sequences = { new CharsRef("abc"), new CharsRef("0abc".toCharArray(), 1, 3), new CharsRef("abc0".toCharArray(), 0, 3), diff --git a/lucene/core/src/test/org/apache/lucene/util/TestIntsRef.java b/lucene/core/src/test/org/apache/lucene/util/TestIntsRef.java index 0283778a608..8144657385a 100644 --- a/lucene/core/src/test/org/apache/lucene/util/TestIntsRef.java +++ b/lucene/core/src/test/org/apache/lucene/util/TestIntsRef.java @@ -25,7 +25,7 @@ public class TestIntsRef extends LuceneTestCase { } public void testFromInts() { - int ints[] = new int[] {1, 2, 3, 4}; + int[] ints = new int[] {1, 2, 3, 4}; IntsRef i = new IntsRef(ints, 0, 4); assertEquals(ints, i.ints); assertEquals(0, i.offset); diff --git a/lucene/core/src/test/org/apache/lucene/util/TestLongsRef.java b/lucene/core/src/test/org/apache/lucene/util/TestLongsRef.java index d8ed6daf63e..75437d8b6a4 100644 --- a/lucene/core/src/test/org/apache/lucene/util/TestLongsRef.java +++ b/lucene/core/src/test/org/apache/lucene/util/TestLongsRef.java @@ -25,7 +25,7 @@ public class TestLongsRef extends LuceneTestCase { } public void testFromLongs() { - long longs[] = new long[] {1, 2, 3, 4}; + long[] longs = new long[] {1, 2, 3, 4}; LongsRef i = new LongsRef(longs, 0, 4); assertEquals(longs, i.longs); assertEquals(0, i.offset); diff --git a/lucene/core/src/test/org/apache/lucene/util/automaton/TestDeterminizeLexicon.java b/lucene/core/src/test/org/apache/lucene/util/automaton/TestDeterminizeLexicon.java index 56097623f88..4e788dea73b 100644 --- a/lucene/core/src/test/org/apache/lucene/util/automaton/TestDeterminizeLexicon.java +++ b/lucene/core/src/test/org/apache/lucene/util/automaton/TestDeterminizeLexicon.java @@ -57,7 +57,7 @@ public class TestDeterminizeLexicon extends LuceneTestCase { // TODO: very wasteful of RAM to do this without minimizing first. final ByteRunAutomaton lexByte = new ByteRunAutomaton(lex, false, 1000000); for (String s : terms) { - byte bytes[] = s.getBytes(StandardCharsets.UTF_8); + byte[] bytes = s.getBytes(StandardCharsets.UTF_8); assertTrue(lexByte.run(bytes, 0, bytes.length)); } } diff --git a/lucene/core/src/test/org/apache/lucene/util/automaton/TestLevenshteinAutomata.java b/lucene/core/src/test/org/apache/lucene/util/automaton/TestLevenshteinAutomata.java index f8b7b463ccf..871109940b4 100644 --- a/lucene/core/src/test/org/apache/lucene/util/automaton/TestLevenshteinAutomata.java +++ b/lucene/core/src/test/org/apache/lucene/util/automaton/TestLevenshteinAutomata.java @@ -64,8 +64,8 @@ public class TestLevenshteinAutomata extends LuceneTestCase { private void assertLev(String s, int maxDistance) { LevenshteinAutomata builder = new LevenshteinAutomata(s, false); LevenshteinAutomata tbuilder = new LevenshteinAutomata(s, true); - Automaton automata[] = new Automaton[maxDistance + 1]; - Automaton tautomata[] = new Automaton[maxDistance + 1]; + Automaton[] automata = new Automaton[maxDistance + 1]; + Automaton[] tautomata = new Automaton[maxDistance + 1]; for (int n = 0; n < automata.length; n++) { automata[n] = builder.toAutomaton(n); tautomata[n] = tbuilder.toAutomaton(n); @@ -263,9 +263,9 @@ public class TestLevenshteinAutomata extends LuceneTestCase { private int getDistance(String target, String other) { char[] sa; int n; - int p[]; // 'previous' cost array, horizontally - int d[]; // cost array, horizontally - int _d[]; // placeholder to assist in swapping p and d + int[] p; // 'previous' cost array, horizontally + int[] d; // cost array, horizontally + int[] _d; // placeholder to assist in swapping p and d /* The difference between this impl. and the previous is that, rather @@ -334,7 +334,7 @@ public class TestLevenshteinAutomata extends LuceneTestCase { private int getTDistance(String target, String other) { char[] sa; int n; - int d[][]; // cost array + int[][] d; // cost array sa = target.toCharArray(); n = sa.length; diff --git a/lucene/core/src/test/org/apache/lucene/util/automaton/TestUTF32ToUTF8.java b/lucene/core/src/test/org/apache/lucene/util/automaton/TestUTF32ToUTF8.java index ed83ab96a93..206ea38ecb7 100644 --- a/lucene/core/src/test/org/apache/lucene/util/automaton/TestUTF32ToUTF8.java +++ b/lucene/core/src/test/org/apache/lucene/util/automaton/TestUTF32ToUTF8.java @@ -252,7 +252,7 @@ public class TestUTF32ToUTF8 extends LuceneTestCase { throw e; } } - byte bytes[] = string.getBytes(StandardCharsets.UTF_8); + byte[] bytes = string.getBytes(StandardCharsets.UTF_8); assertEquals(cra.run(string), bra.run(bytes, 0, bytes.length)); } } diff --git a/lucene/core/src/test/org/apache/lucene/util/packed/TestDirectPacked.java b/lucene/core/src/test/org/apache/lucene/util/packed/TestDirectPacked.java index ae9e70830ee..046953f1bcd 100644 --- a/lucene/core/src/test/org/apache/lucene/util/packed/TestDirectPacked.java +++ b/lucene/core/src/test/org/apache/lucene/util/packed/TestDirectPacked.java @@ -96,7 +96,7 @@ public class TestDirectPacked extends LuceneTestCase { MyRandom random = new MyRandom(random().nextLong()); int numIters = TEST_NIGHTLY ? 100 : 10; for (int i = 0; i < numIters; i++) { - long original[] = randomLongs(random, bpv); + long[] original = randomLongs(random, bpv); int bitsRequired = bpv == 64 ? 64 : DirectWriter.bitsRequired(1L << (bpv - 1)); String name = "bpv" + bpv + "_" + i; IndexOutput output = directory.createOutput(name, IOContext.DEFAULT); @@ -122,7 +122,7 @@ public class TestDirectPacked extends LuceneTestCase { private long[] randomLongs(MyRandom random, int bpv) { int amount = random.nextInt(5000); - long longs[] = new long[amount]; + long[] longs = new long[amount]; for (int i = 0; i < longs.length; i++) { longs[i] = random.nextLong(bpv); } @@ -131,7 +131,7 @@ public class TestDirectPacked extends LuceneTestCase { // java.util.Random only returns 48bits of randomness in nextLong... static class MyRandom extends Random { - byte buffer[] = new byte[8]; + byte[] buffer = new byte[8]; ByteArrayDataInput input = new ByteArrayDataInput(); MyRandom(long seed) { diff --git a/lucene/expressions/src/test/org/apache/lucene/expressions/TestExpressionSorts.java b/lucene/expressions/src/test/org/apache/lucene/expressions/TestExpressionSorts.java index eca9ccfa092..3f33365b6b0 100644 --- a/lucene/expressions/src/test/org/apache/lucene/expressions/TestExpressionSorts.java +++ b/lucene/expressions/src/test/org/apache/lucene/expressions/TestExpressionSorts.java @@ -100,7 +100,7 @@ public class TestExpressionSorts extends LuceneTestCase { void assertQuery(Query query) throws Exception { for (int i = 0; i < 10; i++) { boolean reversed = random().nextBoolean(); - SortField fields[] = + SortField[] fields = new SortField[] { new SortField("int", SortField.Type.INT, reversed), new SortField("long", SortField.Type.LONG, reversed), @@ -121,8 +121,8 @@ public class TestExpressionSorts extends LuceneTestCase { // make our actual sort, mutating original by replacing some of the // sortfields with equivalent expressions - SortField original[] = sort.getSort(); - SortField mutated[] = new SortField[original.length]; + SortField[] original = sort.getSort(); + SortField[] mutated = new SortField[original.length]; for (int i = 0; i < mutated.length; i++) { if (random().nextInt(3) > 0) { SortField s = original[i]; diff --git a/lucene/facet/src/java/org/apache/lucene/facet/taxonomy/TaxonomyMergeUtils.java b/lucene/facet/src/java/org/apache/lucene/facet/taxonomy/TaxonomyMergeUtils.java index a0b03f1727b..c1ee5314e3f 100644 --- a/lucene/facet/src/java/org/apache/lucene/facet/taxonomy/TaxonomyMergeUtils.java +++ b/lucene/facet/src/java/org/apache/lucene/facet/taxonomy/TaxonomyMergeUtils.java @@ -51,12 +51,12 @@ public abstract class TaxonomyMergeUtils { // merge the taxonomies destTaxoWriter.addTaxonomy(srcTaxoDir, map); - int ordinalMap[] = map.getMap(); + int[] ordinalMap = map.getMap(); DirectoryReader reader = DirectoryReader.open(srcIndexDir); try { List leaves = reader.leaves(); int numReaders = leaves.size(); - CodecReader wrappedLeaves[] = new CodecReader[numReaders]; + CodecReader[] wrappedLeaves = new CodecReader[numReaders]; for (int i = 0; i < numReaders; i++) { wrappedLeaves[i] = SlowCodecReaderWrapper.wrap( diff --git a/lucene/facet/src/test/org/apache/lucene/facet/taxonomy/TestTaxonomyCombined.java b/lucene/facet/src/test/org/apache/lucene/facet/taxonomy/TestTaxonomyCombined.java index 9f3ca9ef84e..4664b23a24e 100644 --- a/lucene/facet/src/test/org/apache/lucene/facet/taxonomy/TestTaxonomyCombined.java +++ b/lucene/facet/src/test/org/apache/lucene/facet/taxonomy/TestTaxonomyCombined.java @@ -790,7 +790,7 @@ public class TestTaxonomyCombined extends FacetTestCase { final AtomicBoolean stop = new AtomicBoolean(false); final Throwable[] error = new Throwable[] {null}; - final int retrieval[] = {0}; + final int[] retrieval = {0}; Thread thread = new Thread("Child Arrays Verifier") { diff --git a/lucene/facet/src/test/org/apache/lucene/facet/taxonomy/directory/TestAddTaxonomy.java b/lucene/facet/src/test/org/apache/lucene/facet/taxonomy/directory/TestAddTaxonomy.java index b008691d868..58d0a3de78f 100644 --- a/lucene/facet/src/test/org/apache/lucene/facet/taxonomy/directory/TestAddTaxonomy.java +++ b/lucene/facet/src/test/org/apache/lucene/facet/taxonomy/directory/TestAddTaxonomy.java @@ -36,7 +36,7 @@ public class TestAddTaxonomy extends FacetTestCase { private void dotest(int ncats, final int range) throws Exception { final AtomicInteger numCats = new AtomicInteger(ncats); - Directory dirs[] = new Directory[2]; + Directory[] dirs = new Directory[2]; for (int i = 0; i < dirs.length; i++) { dirs[i] = newDirectory(); final DirectoryTaxonomyWriter tw = new DirectoryTaxonomyWriter(dirs[i]); diff --git a/lucene/highlighter/src/java/org/apache/lucene/search/highlight/GradientFormatter.java b/lucene/highlighter/src/java/org/apache/lucene/search/highlight/GradientFormatter.java index 9178f44703f..cf602ebb720 100644 --- a/lucene/highlighter/src/java/org/apache/lucene/search/highlight/GradientFormatter.java +++ b/lucene/highlighter/src/java/org/apache/lucene/search/highlight/GradientFormatter.java @@ -150,7 +150,7 @@ public class GradientFormatter implements Formatter { return Math.min(colorMin, colorMax) + (int) colScore; } - private static char hexDigits[] = { + private static char[] hexDigits = { '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'A', 'B', 'C', 'D', 'E', 'F' }; diff --git a/lucene/highlighter/src/java/org/apache/lucene/search/highlight/Highlighter.java b/lucene/highlighter/src/java/org/apache/lucene/search/highlight/Highlighter.java index 0809577386f..311eb18fa57 100644 --- a/lucene/highlighter/src/java/org/apache/lucene/search/highlight/Highlighter.java +++ b/lucene/highlighter/src/java/org/apache/lucene/search/highlight/Highlighter.java @@ -286,7 +286,7 @@ public class Highlighter { } // return the most relevant fragments - TextFragment frag[] = new TextFragment[fragQueue.size()]; + TextFragment[] frag = new TextFragment[fragQueue.size()]; for (int i = frag.length - 1; i >= 0; i--) { frag[i] = fragQueue.pop(); } @@ -396,7 +396,7 @@ public class Highlighter { public final String getBestFragments( TokenStream tokenStream, String text, int maxNumFragments, String separator) throws IOException, InvalidTokenOffsetsException { - String sections[] = getBestFragments(tokenStream, text, maxNumFragments); + String[] sections = getBestFragments(tokenStream, text, maxNumFragments); StringBuilder result = new StringBuilder(); for (int i = 0; i < sections.length; i++) { if (i > 0) { diff --git a/lucene/highlighter/src/java/org/apache/lucene/search/uhighlight/DefaultPassageFormatter.java b/lucene/highlighter/src/java/org/apache/lucene/search/uhighlight/DefaultPassageFormatter.java index 49b88f8bffd..345e2b61316 100644 --- a/lucene/highlighter/src/java/org/apache/lucene/search/uhighlight/DefaultPassageFormatter.java +++ b/lucene/highlighter/src/java/org/apache/lucene/search/uhighlight/DefaultPassageFormatter.java @@ -56,7 +56,7 @@ public class DefaultPassageFormatter extends PassageFormatter { } @Override - public String format(Passage passages[], String content) { + public String format(Passage[] passages, String content) { StringBuilder sb = new StringBuilder(); int pos = 0; for (Passage passage : passages) { diff --git a/lucene/highlighter/src/java/org/apache/lucene/search/uhighlight/Passage.java b/lucene/highlighter/src/java/org/apache/lucene/search/uhighlight/Passage.java index 562991a8e20..6fa281bb16c 100644 --- a/lucene/highlighter/src/java/org/apache/lucene/search/uhighlight/Passage.java +++ b/lucene/highlighter/src/java/org/apache/lucene/search/uhighlight/Passage.java @@ -44,10 +44,10 @@ public class Passage { assert startOffset >= this.startOffset && startOffset <= this.endOffset; if (numMatches == matchStarts.length) { int newLength = ArrayUtil.oversize(numMatches + 1, RamUsageEstimator.NUM_BYTES_OBJECT_REF); - int newMatchStarts[] = new int[newLength]; - int newMatchEnds[] = new int[newLength]; - int newMatchTermFreqInDoc[] = new int[newLength]; - BytesRef newMatchTerms[] = new BytesRef[newLength]; + int[] newMatchStarts = new int[newLength]; + int[] newMatchEnds = new int[newLength]; + int[] newMatchTermFreqInDoc = new int[newLength]; + BytesRef[] newMatchTerms = new BytesRef[newLength]; System.arraycopy(matchStarts, 0, newMatchStarts, 0, numMatches); System.arraycopy(matchEnds, 0, newMatchEnds, 0, numMatches); System.arraycopy(matchTerms, 0, newMatchTerms, 0, numMatches); diff --git a/lucene/highlighter/src/java/org/apache/lucene/search/uhighlight/PassageFormatter.java b/lucene/highlighter/src/java/org/apache/lucene/search/uhighlight/PassageFormatter.java index 135002b5d78..edc5a2624bd 100644 --- a/lucene/highlighter/src/java/org/apache/lucene/search/uhighlight/PassageFormatter.java +++ b/lucene/highlighter/src/java/org/apache/lucene/search/uhighlight/PassageFormatter.java @@ -34,5 +34,5 @@ public abstract class PassageFormatter { * that return String, the toString method on the Object returned by this method is used to * compute the string. */ - public abstract Object format(Passage passages[], String content); + public abstract Object format(Passage[] passages, String content); } diff --git a/lucene/highlighter/src/java/org/apache/lucene/search/uhighlight/TokenStreamOffsetStrategy.java b/lucene/highlighter/src/java/org/apache/lucene/search/uhighlight/TokenStreamOffsetStrategy.java index b99a56915cd..05a9743a8cb 100644 --- a/lucene/highlighter/src/java/org/apache/lucene/search/uhighlight/TokenStreamOffsetStrategy.java +++ b/lucene/highlighter/src/java/org/apache/lucene/search/uhighlight/TokenStreamOffsetStrategy.java @@ -70,7 +70,7 @@ public class TokenStreamOffsetStrategy extends AnalysisOffsetStrategy { int currentMatch = -1; - final BytesRef matchDescriptions[]; + final BytesRef[] matchDescriptions; TokenStreamOffsetsEnum(TokenStream ts, CharArrayMatcher[] matchers) throws IOException { this.stream = ts; diff --git a/lucene/highlighter/src/java/org/apache/lucene/search/uhighlight/UnifiedHighlighter.java b/lucene/highlighter/src/java/org/apache/lucene/search/uhighlight/UnifiedHighlighter.java index c71d1864075..bcbfc4ad827 100644 --- a/lucene/highlighter/src/java/org/apache/lucene/search/uhighlight/UnifiedHighlighter.java +++ b/lucene/highlighter/src/java/org/apache/lucene/search/uhighlight/UnifiedHighlighter.java @@ -442,7 +442,7 @@ public class UnifiedHighlighter { */ public Map highlightFields(String[] fields, Query query, TopDocs topDocs) throws IOException { - int maxPassages[] = new int[fields.length]; + int[] maxPassages = new int[fields.length]; Arrays.fill(maxPassages, 1); return highlightFields(fields, query, topDocs, maxPassages); } @@ -474,8 +474,8 @@ public class UnifiedHighlighter { */ public Map highlightFields( String[] fields, Query query, TopDocs topDocs, int[] maxPassages) throws IOException { - final ScoreDoc scoreDocs[] = topDocs.scoreDocs; - int docids[] = new int[scoreDocs.length]; + final ScoreDoc[] scoreDocs = topDocs.scoreDocs; + int[] docids = new int[scoreDocs.length]; for (int i = 0; i < docids.length; i++) { docids[i] = scoreDocs[i].doc; } @@ -556,8 +556,8 @@ public class UnifiedHighlighter { copyAndSortDocIdsWithIndex(docIdsIn, docIds, docInIndexes); // latter 2 are "out" params // Sort fields w/ maxPassages pair: (copy input arrays since we sort in-place) - final String fields[] = new String[fieldsIn.length]; - final int maxPassages[] = new int[maxPassagesIn.length]; + final String[] fields = new String[fieldsIn.length]; + final int[] maxPassages = new int[maxPassagesIn.length]; copyAndSortFieldsWithMaxPassages( fieldsIn, maxPassagesIn, fields, maxPassages); // latter 2 are "out" params diff --git a/lucene/highlighter/src/test/org/apache/lucene/search/highlight/TestHighlighter.java b/lucene/highlighter/src/test/org/apache/lucene/search/highlight/TestHighlighter.java index c1e353d3bea..52e5c1bbb8e 100644 --- a/lucene/highlighter/src/test/org/apache/lucene/search/highlight/TestHighlighter.java +++ b/lucene/highlighter/src/test/org/apache/lucene/search/highlight/TestHighlighter.java @@ -1187,7 +1187,7 @@ public class TestHighlighter extends BaseTokenStreamTestCase implements Formatte @Override public void run() throws Exception { numHighlights = 0; - SpanQuery clauses[] = { + SpanQuery[] clauses = { new SpanTermQuery(new Term("contents", "john")), new SpanTermQuery(new Term("contents", "kennedy")), }; @@ -1230,7 +1230,7 @@ public class TestHighlighter extends BaseTokenStreamTestCase implements Formatte @Override public void run() throws Exception { numHighlights = 0; - SpanQuery clauses[] = { + SpanQuery[] clauses = { new SpanTermQuery(new Term("contents", "john")), new SpanTermQuery(new Term("contents", "kennedy")), }; @@ -1534,10 +1534,10 @@ public class TestHighlighter extends BaseTokenStreamTestCase implements Formatte query, FIELD_NAME, TestHighlighter.this); // new Highlighter(this, new // QueryTermScorer(query)); highlighter.setTextFragmenter(new SimpleFragmenter(20)); - String stringResults[] = highlighter.getBestFragments(tokenStream, text, 10); + String[] stringResults = highlighter.getBestFragments(tokenStream, text, 10); tokenStream = analyzer.tokenStream(FIELD_NAME, text); - TextFragment fragmentResults[] = + TextFragment[] fragmentResults = highlighter.getBestTextFragments(tokenStream, text, true, 10); assertTrue( diff --git a/lucene/highlighter/src/test/org/apache/lucene/search/uhighlight/TestUnifiedHighlighter.java b/lucene/highlighter/src/test/org/apache/lucene/search/uhighlight/TestUnifiedHighlighter.java index f91f040c783..1f23c3f29f3 100644 --- a/lucene/highlighter/src/test/org/apache/lucene/search/uhighlight/TestUnifiedHighlighter.java +++ b/lucene/highlighter/src/test/org/apache/lucene/search/uhighlight/TestUnifiedHighlighter.java @@ -159,7 +159,7 @@ public class TestUnifiedHighlighter extends LuceneTestCase { Query query = new TermQuery(new Term("body", "highlighting")); TopDocs topDocs = searcher.search(query, 10, Sort.INDEXORDER); assertEquals(2, topDocs.totalHits.value); - String snippets[] = highlighter.highlight("body", query, topDocs); + String[] snippets = highlighter.highlight("body", query, topDocs); assertEquals(2, snippets.length); assertEquals("Just a test highlighting from postings. ", snippets[0]); assertEquals("Highlighting the first term. ", snippets[1]); @@ -223,7 +223,7 @@ public class TestUnifiedHighlighter extends LuceneTestCase { UnifiedHighlighter highlighter = randomUnifiedHighlighter(searcher, indexAnalyzer); highlighter.setMaxLength(maxLength); - String snippets[] = highlighter.highlight("body", query, topDocs); + String[] snippets = highlighter.highlight("body", query, topDocs); ir.close(); return snippets; @@ -248,7 +248,7 @@ public class TestUnifiedHighlighter extends LuceneTestCase { Query query = new TermQuery(new Term("body", "test")); TopDocs topDocs = searcher.search(query, 10, Sort.INDEXORDER); assertEquals(1, topDocs.totalHits.value); - String snippets[] = highlighter.highlight("body", query, topDocs); + String[] snippets = highlighter.highlight("body", query, topDocs); assertEquals(1, snippets.length); assertEquals("This is a test", snippets[0]); @@ -276,7 +276,7 @@ public class TestUnifiedHighlighter extends LuceneTestCase { Query query = new TermQuery(new Term("body", "test")); TopDocs topDocs = searcher.search(query, 10, Sort.INDEXORDER); assertEquals(2, topDocs.totalHits.value); - String snippets[] = highlighter.highlight("body", query, topDocs); + String[] snippets = highlighter.highlight("body", query, topDocs); assertEquals(2, snippets.length); assertEquals("This is a test.", snippets[0]); assertEquals("Test a one sentence document.", snippets[1]); @@ -306,7 +306,7 @@ public class TestUnifiedHighlighter extends LuceneTestCase { Query query = new TermQuery(new Term("body", "field")); TopDocs topDocs = searcher.search(query, 10, Sort.INDEXORDER); assertEquals(1, topDocs.totalHits.value); - String snippets[] = highlighter.highlight("body", query, topDocs, 10); + String[] snippets = highlighter.highlight("body", query, topDocs, 10); assertEquals(1, snippets.length); String highlightedValue = "This is a multivalued field. Sentencetwo field."; assertEquals(highlightedValue + "... " + highlightedValue, snippets[0]); @@ -379,7 +379,7 @@ public class TestUnifiedHighlighter extends LuceneTestCase { .build(); TopDocs topDocs = searcher.search(query, 10, Sort.INDEXORDER); assertEquals(2, topDocs.totalHits.value); - String snippets[] = highlighter.highlight("body", query, topDocs); + String[] snippets = highlighter.highlight("body", query, topDocs); assertEquals(2, snippets.length); assertEquals("Just a test highlighting from postings. ", snippets[0]); assertEquals("Highlighting the first term. ", snippets[1]); @@ -408,7 +408,7 @@ public class TestUnifiedHighlighter extends LuceneTestCase { Query query = new TermQuery(new Term("body", "test")); TopDocs topDocs = searcher.search(query, 10, Sort.INDEXORDER); assertEquals(2, topDocs.totalHits.value); - String snippets[] = highlighter.highlight("body", query, topDocs, 2); + String[] snippets = highlighter.highlight("body", query, topDocs, 2); assertEquals(2, snippets.length); assertEquals( "This is a test. Just a test highlighting from postings. ", snippets[0]); @@ -448,7 +448,7 @@ public class TestUnifiedHighlighter extends LuceneTestCase { assertEquals(1, topDocs.totalHits.value); UnifiedHighlighter highlighter = randomUnifiedHighlighter(searcher, indexAnalyzer); highlighter.setHighlightPhrasesStrictly(false); - String snippets[] = highlighter.highlight("body", query, topDocs, 2); + String[] snippets = highlighter.highlight("body", query, topDocs, 2); assertEquals(1, snippets.length); if (highlighter .getFlags("body") @@ -484,7 +484,7 @@ public class TestUnifiedHighlighter extends LuceneTestCase { assertEquals(1, topDocs.totalHits.value); UnifiedHighlighter highlighter = randomUnifiedHighlighter(searcher, indexAnalyzer); highlighter.setHighlightPhrasesStrictly(false); - String snippets[] = highlighter.highlight("body", query, topDocs, 2); + String[] snippets = highlighter.highlight("body", query, topDocs, 2); assertEquals(1, snippets.length); assertFalse(snippets[0].contains("CuriousCurious")); ir.close(); @@ -515,7 +515,7 @@ public class TestUnifiedHighlighter extends LuceneTestCase { assertEquals(1, topDocs.totalHits.value); UnifiedHighlighter highlighter = randomUnifiedHighlighter(searcher, indexAnalyzer); highlighter.setMaxLength(Integer.MAX_VALUE - 1); - String snippets[] = highlighter.highlight("body", query, topDocs, 2); + String[] snippets = highlighter.highlight("body", query, topDocs, 2); assertEquals(1, snippets.length); assertTrue(snippets[0].contains("Square")); assertTrue(snippets[0].contains("Porter")); @@ -541,7 +541,7 @@ public class TestUnifiedHighlighter extends LuceneTestCase { Query query = new TermQuery(new Term("body", "test")); TopDocs topDocs = searcher.search(query, 10, Sort.INDEXORDER); assertEquals(1, topDocs.totalHits.value); - String snippets[] = highlighter.highlight("body", query, topDocs, 2); + String[] snippets = highlighter.highlight("body", query, topDocs, 2); assertEquals(1, snippets.length); assertEquals( "This is a test. ... Feel free to test test test test test test test.", @@ -578,7 +578,7 @@ public class TestUnifiedHighlighter extends LuceneTestCase { assertEquals(1, topDocs.totalHits.value); UnifiedHighlighter highlighter = randomUnifiedHighlighter(searcher, indexAnalyzer); highlighter.setMaxLength(Integer.MAX_VALUE - 1); - String snippets[] = highlighter.highlight("body", query, topDocs, 2); + String[] snippets = highlighter.highlight("body", query, topDocs, 2); assertEquals(1, snippets.length); assertFalse(snippets[0].contains("both")); ir.close(); @@ -610,7 +610,7 @@ public class TestUnifiedHighlighter extends LuceneTestCase { Query query = new TermQuery(new Term("body", "test")); TopDocs topDocs = searcher.search(query, 10, Sort.INDEXORDER); assertEquals(1, topDocs.totalHits.value); - String snippets[] = highlighter.highlight("body", query, topDocs, 2); + String[] snippets = highlighter.highlight("body", query, topDocs, 2); assertEquals(1, snippets.length); assertEquals( "This is a test. Just highlighting from postings. This is also a much sillier test. Feel free to test test test test test test test.", @@ -644,7 +644,7 @@ public class TestUnifiedHighlighter extends LuceneTestCase { int[] docIDs = new int[2]; docIDs[0] = hits[0].doc; docIDs[1] = hits[1].doc; - String snippets[] = + String[] snippets = highlighter .highlightFields(new String[] {"body"}, query, docIDs, new int[] {1}) .get("body"); @@ -692,7 +692,7 @@ public class TestUnifiedHighlighter extends LuceneTestCase { Query query = new TermQuery(new Term("body", "test")); TopDocs topDocs = searcher.search(query, 10, Sort.INDEXORDER); assertEquals(1, topDocs.totalHits.value); - String snippets[] = highlighter.highlight("body", query, topDocs, 2); + String[] snippets = highlighter.highlight("body", query, topDocs, 2); assertEquals(1, snippets.length); assertEquals( "This is a test. Just highlighting from postings. This is also a much sillier test. Feel free to test test test test test test test.", @@ -722,7 +722,7 @@ public class TestUnifiedHighlighter extends LuceneTestCase { UnifiedHighlighter highlighter = randomUnifiedHighlighter(searcher, indexAnalyzer); Query query = new TermQuery(new Term("body", "highlighting")); int[] docIDs = new int[] {0}; - String snippets[] = + String[] snippets = highlighter .highlightFields(new String[] {"body"}, query, docIDs, new int[] {2}) .get("body"); @@ -755,7 +755,7 @@ public class TestUnifiedHighlighter extends LuceneTestCase { Query query = new TermQuery(new Term("body", "highlighting")); int[] docIDs = new int[1]; docIDs[0] = docID; - String snippets[] = + String[] snippets = highlighter .highlightFields(new String[] {"body"}, query, docIDs, new int[] {2}) .get("body"); @@ -788,7 +788,7 @@ public class TestUnifiedHighlighter extends LuceneTestCase { highlighter.setMaxNoHighlightPassages(0); // don't want any default summary Query query = new TermQuery(new Term("body", "highlighting")); int[] docIDs = new int[] {0}; - String snippets[] = + String[] snippets = highlighter .highlightFields(new String[] {"body"}, query, docIDs, new int[] {2}) .get("body"); @@ -825,7 +825,7 @@ public class TestUnifiedHighlighter extends LuceneTestCase { }; Query query = new TermQuery(new Term("body", "highlighting")); int[] docIDs = new int[] {0}; - String snippets[] = + String[] snippets = highlighter .highlightFields(new String[] {"body"}, query, docIDs, new int[] {2}) .get("body"); @@ -857,7 +857,7 @@ public class TestUnifiedHighlighter extends LuceneTestCase { UnifiedHighlighter highlighter = randomUnifiedHighlighter(searcher, indexAnalyzer); Query query = new TermQuery(new Term("bogus", "highlighting")); int[] docIDs = new int[] {0}; - String snippets[] = + String[] snippets = highlighter .highlightFields(new String[] {"bogus"}, query, docIDs, new int[] {2}) .get("bogus"); @@ -889,7 +889,7 @@ public class TestUnifiedHighlighter extends LuceneTestCase { Query query = new TermQuery(new Term("body", "highlighting")); int[] docIDs = new int[1]; docIDs[0] = docID; - String snippets[] = + String[] snippets = highlighter .highlightFields(new String[] {"body"}, query, docIDs, new int[] {2}) .get("body"); @@ -921,7 +921,7 @@ public class TestUnifiedHighlighter extends LuceneTestCase { Query query = new TermQuery(new Term("body", "highlighting")); int[] docIDs = new int[1]; docIDs[0] = docID; - String snippets[] = + String[] snippets = highlighter .highlightFields(new String[] {"body"}, query, docIDs, new int[] {2}) .get("body"); @@ -961,7 +961,7 @@ public class TestUnifiedHighlighter extends LuceneTestCase { TopDocs hits = searcher.search(query, numDocs); assertEquals(numDocs, hits.totalHits.value); - String snippets[] = highlighter.highlight("body", query, hits); + String[] snippets = highlighter.highlight("body", query, hits); assertEquals(numDocs, snippets.length); for (int hit = 0; hit < numDocs; hit++) { Document doc = searcher.doc(hits.scoreDocs[hit].doc); @@ -1037,7 +1037,7 @@ public class TestUnifiedHighlighter extends LuceneTestCase { Query query = new TermQuery(new Term("body", "highlighting")); TopDocs topDocs = searcher.search(query, 10, Sort.INDEXORDER); assertEquals(1, topDocs.totalHits.value); - String snippets[] = highlighter.highlight("body", query, topDocs); + String[] snippets = highlighter.highlight("body", query, topDocs); assertEquals(1, snippets.length); assertEquals( "Just a test highlighting from <i>postings</i>. ", snippets[0]); @@ -1069,7 +1069,7 @@ public class TestUnifiedHighlighter extends LuceneTestCase { PassageFormatter defaultFormatter = new DefaultPassageFormatter(); @Override - public String[] format(Passage passages[], String content) { + public String[] format(Passage[] passages, String content) { // Just turns the String snippet into a length 2 // array of String return new String[] { @@ -1519,7 +1519,7 @@ public class TestUnifiedHighlighter extends LuceneTestCase { Query query = new TermQuery(new Term("body", "highlighting")); TopDocs topDocs = searcher.search(query, 10, Sort.INDEXORDER); assertEquals(1, topDocs.totalHits.value); - String snippets[] = highlighter.highlight("body", query, topDocs); + String[] snippets = highlighter.highlight("body", query, topDocs); assertEquals(1, snippets.length); assertEquals("Just a test highlighting from postings. ", snippets[0]); diff --git a/lucene/highlighter/src/test/org/apache/lucene/search/uhighlight/TestUnifiedHighlighterMTQ.java b/lucene/highlighter/src/test/org/apache/lucene/search/uhighlight/TestUnifiedHighlighterMTQ.java index b12ef3a9ee3..4ddf2382bdb 100644 --- a/lucene/highlighter/src/test/org/apache/lucene/search/uhighlight/TestUnifiedHighlighterMTQ.java +++ b/lucene/highlighter/src/test/org/apache/lucene/search/uhighlight/TestUnifiedHighlighterMTQ.java @@ -121,7 +121,7 @@ public class TestUnifiedHighlighterMTQ extends LuceneTestCase { Query query = new WildcardQuery(new Term("body", "te*")); TopDocs topDocs = searcher.search(query, 10, Sort.INDEXORDER); assertEquals(2, topDocs.totalHits.value); - String snippets[] = highlighter.highlight("body", query, topDocs); + String[] snippets = highlighter.highlight("body", query, topDocs); assertEquals(2, snippets.length); assertEquals("This is a test.", snippets[0]); assertEquals("Test a one sentence document.", snippets[1]); @@ -177,7 +177,7 @@ public class TestUnifiedHighlighterMTQ extends LuceneTestCase { Query query = new BoostQuery(new PrefixQuery(new Term("body", "te")), 2.0f); TopDocs topDocs = searcher.search(query, 10, Sort.INDEXORDER); assertEquals(2, topDocs.totalHits.value); - String snippets[] = highlighter.highlight("body", query, topDocs); + String[] snippets = highlighter.highlight("body", query, topDocs); assertEquals(2, snippets.length); assertEquals("This is a test.", snippets[0]); assertEquals("Test a one sentence document.", snippets[1]); @@ -219,7 +219,7 @@ public class TestUnifiedHighlighterMTQ extends LuceneTestCase { Query query = new RegexpQuery(new Term("body", "te.*")); TopDocs topDocs = searcher.search(query, 10, Sort.INDEXORDER); assertEquals(2, topDocs.totalHits.value); - String snippets[] = highlighter.highlight("body", query, topDocs); + String[] snippets = highlighter.highlight("body", query, topDocs); assertEquals(2, snippets.length); assertEquals("This is a test.", snippets[0]); assertEquals("Test a one sentence document.", snippets[1]); @@ -261,7 +261,7 @@ public class TestUnifiedHighlighterMTQ extends LuceneTestCase { Query query = new FuzzyQuery(new Term("body", "tets"), 1); TopDocs topDocs = searcher.search(query, 10, Sort.INDEXORDER); assertEquals(2, topDocs.totalHits.value); - String snippets[] = highlighter.highlight("body", query, topDocs); + String[] snippets = highlighter.highlight("body", query, topDocs); assertEquals(2, snippets.length); assertEquals("This is a test.", snippets[0]); assertEquals("Test a one sentence document.", snippets[1]); @@ -321,7 +321,7 @@ public class TestUnifiedHighlighterMTQ extends LuceneTestCase { Query query = TermRangeQuery.newStringRange("body", "ta", "tf", true, true); TopDocs topDocs = searcher.search(query, 10, Sort.INDEXORDER); assertEquals(2, topDocs.totalHits.value); - String snippets[] = highlighter.highlight("body", query, topDocs); + String[] snippets = highlighter.highlight("body", query, topDocs); assertEquals(2, snippets.length); assertEquals("This is a test.", snippets[0]); assertEquals("Test a one sentence document.", snippets[1]); @@ -434,7 +434,7 @@ public class TestUnifiedHighlighterMTQ extends LuceneTestCase { .build(); TopDocs topDocs = searcher.search(query, 10, Sort.INDEXORDER); assertEquals(2, topDocs.totalHits.value); - String snippets[] = highlighter.highlight("body", query, topDocs); + String[] snippets = highlighter.highlight("body", query, topDocs); assertEquals(2, snippets.length); assertEquals("This is a test.", snippets[0]); assertEquals("Test a one sentence document.", snippets[1]); @@ -479,7 +479,7 @@ public class TestUnifiedHighlighterMTQ extends LuceneTestCase { .build(); TopDocs topDocs = searcher.search(query, 10, Sort.INDEXORDER); assertEquals(2, topDocs.totalHits.value); - String snippets[] = highlighter.highlight("body", query, topDocs); + String[] snippets = highlighter.highlight("body", query, topDocs); assertEquals(2, snippets.length); assertEquals("This is a test.", snippets[0]); assertEquals("Test a one sentence document.", snippets[1]); @@ -507,7 +507,7 @@ public class TestUnifiedHighlighterMTQ extends LuceneTestCase { ConstantScoreQuery query = new ConstantScoreQuery(new WildcardQuery(new Term("body", "te*"))); TopDocs topDocs = searcher.search(query, 10, Sort.INDEXORDER); assertEquals(2, topDocs.totalHits.value); - String snippets[] = highlighter.highlight("body", query, topDocs); + String[] snippets = highlighter.highlight("body", query, topDocs); assertEquals(2, snippets.length); assertEquals("This is a test.", snippets[0]); assertEquals("Test a one sentence document.", snippets[1]); @@ -537,7 +537,7 @@ public class TestUnifiedHighlighterMTQ extends LuceneTestCase { Collections.singleton(new WildcardQuery(new Term("body", "te*"))), 0); TopDocs topDocs = searcher.search(query, 10, Sort.INDEXORDER); assertEquals(2, topDocs.totalHits.value); - String snippets[] = highlighter.highlight("body", query, topDocs); + String[] snippets = highlighter.highlight("body", query, topDocs); assertEquals(2, snippets.length); assertEquals("This is a test.", snippets[0]); assertEquals("Test a one sentence document.", snippets[1]); @@ -565,7 +565,7 @@ public class TestUnifiedHighlighterMTQ extends LuceneTestCase { Query query = new SpanMultiTermQueryWrapper<>(new WildcardQuery(new Term("body", "te*"))); TopDocs topDocs = searcher.search(query, 10, Sort.INDEXORDER); assertEquals(2, topDocs.totalHits.value); - String snippets[] = highlighter.highlight("body", query, topDocs); + String[] snippets = highlighter.highlight("body", query, topDocs); assertEquals(2, snippets.length); assertEquals("This is a test.", snippets[0]); assertEquals("Test a one sentence document.", snippets[1]); @@ -595,7 +595,7 @@ public class TestUnifiedHighlighterMTQ extends LuceneTestCase { Query query = new SpanOrQuery(new SpanQuery[] {childQuery}); TopDocs topDocs = searcher.search(query, 10, Sort.INDEXORDER); assertEquals(2, topDocs.totalHits.value); - String snippets[] = highlighter.highlight("body", query, topDocs); + String[] snippets = highlighter.highlight("body", query, topDocs); assertEquals(2, snippets.length); assertEquals("This is a test.", snippets[0]); assertEquals("Test a one sentence document.", snippets[1]); @@ -625,7 +625,7 @@ public class TestUnifiedHighlighterMTQ extends LuceneTestCase { Query query = new SpanNearQuery(new SpanQuery[] {childQuery, childQuery}, 0, false); TopDocs topDocs = searcher.search(query, 10, Sort.INDEXORDER); assertEquals(2, topDocs.totalHits.value); - String snippets[] = highlighter.highlight("body", query, topDocs); + String[] snippets = highlighter.highlight("body", query, topDocs); assertEquals(2, snippets.length); assertEquals("This is a test.", snippets[0]); assertEquals("Test a one sentence document.", snippets[1]); @@ -655,7 +655,7 @@ public class TestUnifiedHighlighterMTQ extends LuceneTestCase { Query query = new SpanNotQuery(include, exclude); TopDocs topDocs = searcher.search(query, 10, Sort.INDEXORDER); assertEquals(2, topDocs.totalHits.value); - String snippets[] = highlighter.highlight("body", query, topDocs); + String[] snippets = highlighter.highlight("body", query, topDocs); assertEquals(2, snippets.length); assertEquals("This is a test.", snippets[0]); assertEquals("Test a one sentence document.", snippets[1]); @@ -685,7 +685,7 @@ public class TestUnifiedHighlighterMTQ extends LuceneTestCase { Query query = new SpanFirstQuery(childQuery, 1000000); TopDocs topDocs = searcher.search(query, 10, Sort.INDEXORDER); assertEquals(2, topDocs.totalHits.value); - String snippets[] = highlighter.highlight("body", query, topDocs); + String[] snippets = highlighter.highlight("body", query, topDocs); assertEquals(2, snippets.length); assertEquals("This is a test.", snippets[0]); assertEquals("Test a one sentence document.", snippets[1]); @@ -720,7 +720,7 @@ public class TestUnifiedHighlighterMTQ extends LuceneTestCase { .build(); TopDocs topDocs = searcher.search(query, 10, Sort.INDEXORDER); assertEquals(1, topDocs.totalHits.value); - String snippets[] = highlighter.highlight("body", query, topDocs); + String[] snippets = highlighter.highlight("body", query, topDocs); assertEquals(1, snippets.length); // Default formatter just bolds each hit: @@ -736,7 +736,7 @@ public class TestUnifiedHighlighterMTQ extends LuceneTestCase { return new PassageFormatter() { @Override - public Object format(Passage passages[], String content) { + public Object format(Passage[] passages, String content) { // Copied from DefaultPassageFormatter, but // tweaked to include the matched term: StringBuilder sb = new StringBuilder(); @@ -813,7 +813,7 @@ public class TestUnifiedHighlighterMTQ extends LuceneTestCase { .add(new PrefixQuery(new Term("body", "bra")), BooleanClause.Occur.MUST) .build(); TopDocs topDocs = searcher.search(query, 10, Sort.INDEXORDER); - String snippets[] = + String[] snippets = highlighter.highlight("body", query, topDocs, 2); // ask for 2 but we'll only get 1 assertArrayEquals(new String[] {"Alpha Bravo foo foo foo. "}, snippets); @@ -847,7 +847,7 @@ public class TestUnifiedHighlighterMTQ extends LuceneTestCase { .add(new PrefixQuery(new Term("body", "bra")), BooleanClause.Occur.MUST) .build(); TopDocs topDocs = searcher.search(query, 10, Sort.INDEXORDER); - String snippets[] = + String[] snippets = highlighter.highlight("body", query, topDocs, 2); // ask for 2 but we'll only get 1 assertArrayEquals( new String[] {"Alpha Bravo Bravado foo foo foo."}, snippets); @@ -951,7 +951,7 @@ public class TestUnifiedHighlighterMTQ extends LuceneTestCase { Query query = new PrefixQuery(new Term("body", "nonexistent")); int[] docIDs = new int[1]; docIDs[0] = docID; - String snippets[] = + String[] snippets = highlighter .highlightFields(new String[] {"body"}, query, docIDs, new int[] {2}) .get("body"); @@ -983,7 +983,7 @@ public class TestUnifiedHighlighterMTQ extends LuceneTestCase { Query query = new PrefixQuery(new Term("body", "ab")); TopDocs topDocs = searcher.search(query, 10); - String snippets[] = + String[] snippets = highlighter.highlightFields(new String[] {"body"}, query, topDocs).get("body"); Arrays.sort(snippets); assertEquals("[word aberration, word absolve]", Arrays.toString(snippets)); @@ -1019,7 +1019,7 @@ public class TestUnifiedHighlighterMTQ extends LuceneTestCase { int[] docIds = new int[] {docID}; - String snippets[] = + String[] snippets = highlighter .highlightFields(new String[] {"body"}, query, docIds, new int[] {2}) .get("body"); @@ -1056,7 +1056,7 @@ public class TestUnifiedHighlighterMTQ extends LuceneTestCase { int[] docIds = new int[] {docId}; - String snippets[] = + String[] snippets = highlighter .highlightFields(new String[] {"body"}, query, docIds, new int[] {2}) .get("body"); diff --git a/lucene/highlighter/src/test/org/apache/lucene/search/uhighlight/TestUnifiedHighlighterRanking.java b/lucene/highlighter/src/test/org/apache/lucene/search/uhighlight/TestUnifiedHighlighterRanking.java index cd85c6c9a9c..2854c96a523 100644 --- a/lucene/highlighter/src/test/org/apache/lucene/search/uhighlight/TestUnifiedHighlighterRanking.java +++ b/lucene/highlighter/src/test/org/apache/lucene/search/uhighlight/TestUnifiedHighlighterRanking.java @@ -176,7 +176,7 @@ public class TestUnifiedHighlighterRanking extends LuceneTestCase { HashSet seen = new HashSet<>(); @Override - public String format(Passage passages[], String content) { + public String format(Passage[] passages, String content) { for (Passage p : passages) { // verify some basics about the passage assertTrue(p.getScore() >= 0); @@ -300,7 +300,7 @@ public class TestUnifiedHighlighterRanking extends LuceneTestCase { Query query = new TermQuery(new Term("body", "test")); TopDocs topDocs = searcher.search(query, 10, Sort.INDEXORDER); assertEquals(1, topDocs.totalHits.value); - String snippets[] = highlighter.highlight("body", query, topDocs, 1); + String[] snippets = highlighter.highlight("body", query, topDocs, 1); assertEquals(1, snippets.length); assertTrue(snippets[0].startsWith("This test is a better test")); @@ -356,7 +356,7 @@ public class TestUnifiedHighlighterRanking extends LuceneTestCase { .build(); TopDocs topDocs = searcher.search(query, 10, Sort.INDEXORDER); assertEquals(1, topDocs.totalHits.value); - String snippets[] = highlighter.highlight("body", query, topDocs, 1); + String[] snippets = highlighter.highlight("body", query, topDocs, 1); assertEquals(1, snippets.length); assertTrue(snippets[0].startsWith("On the other hand")); diff --git a/lucene/highlighter/src/test/org/apache/lucene/search/uhighlight/TestUnifiedHighlighterTermIntervals.java b/lucene/highlighter/src/test/org/apache/lucene/search/uhighlight/TestUnifiedHighlighterTermIntervals.java index 5817e01c59e..8e602b16d75 100644 --- a/lucene/highlighter/src/test/org/apache/lucene/search/uhighlight/TestUnifiedHighlighterTermIntervals.java +++ b/lucene/highlighter/src/test/org/apache/lucene/search/uhighlight/TestUnifiedHighlighterTermIntervals.java @@ -154,7 +154,7 @@ public class TestUnifiedHighlighterTermIntervals extends LuceneTestCase { Query query = new IntervalQuery("body", Intervals.term("highlighting")); TopDocs topDocs = searcher.search(query, 10, Sort.INDEXORDER); assertEquals(2, topDocs.totalHits.value); - String snippets[] = highlighter.highlight("body", query, topDocs); + String[] snippets = highlighter.highlight("body", query, topDocs); assertEquals(2, snippets.length); assertEquals("Just a test highlighting from postings. ", snippets[0]); assertEquals("Highlighting the first term. ", snippets[1]); @@ -217,7 +217,7 @@ public class TestUnifiedHighlighterTermIntervals extends LuceneTestCase { UnifiedHighlighter highlighter = randomUnifiedHighlighter(searcher, indexAnalyzer); highlighter.setMaxLength(maxLength); - String snippets[] = highlighter.highlight("body", query, topDocs); + String[] snippets = highlighter.highlight("body", query, topDocs); ir.close(); return snippets; @@ -242,7 +242,7 @@ public class TestUnifiedHighlighterTermIntervals extends LuceneTestCase { Query query = new IntervalQuery("body", Intervals.term("test")); TopDocs topDocs = searcher.search(query, 10, Sort.INDEXORDER); assertEquals(1, topDocs.totalHits.value); - String snippets[] = highlighter.highlight("body", query, topDocs); + String[] snippets = highlighter.highlight("body", query, topDocs); assertEquals(1, snippets.length); assertEquals("This is a test", snippets[0]); @@ -270,7 +270,7 @@ public class TestUnifiedHighlighterTermIntervals extends LuceneTestCase { Query query = new IntervalQuery("body", Intervals.term("test")); TopDocs topDocs = searcher.search(query, 10, Sort.INDEXORDER); assertEquals(2, topDocs.totalHits.value); - String snippets[] = highlighter.highlight("body", query, topDocs); + String[] snippets = highlighter.highlight("body", query, topDocs); assertEquals(2, snippets.length); assertEquals("This is a test.", snippets[0]); assertEquals("Test a one sentence document.", snippets[1]); @@ -299,7 +299,7 @@ public class TestUnifiedHighlighterTermIntervals extends LuceneTestCase { Query query = new IntervalQuery("body", Intervals.term("field")); TopDocs topDocs = searcher.search(query, 10, Sort.INDEXORDER); assertEquals(1, topDocs.totalHits.value); - String snippets[] = highlighter.highlight("body", query, topDocs, 10); + String[] snippets = highlighter.highlight("body", query, topDocs, 10); assertEquals(1, snippets.length); String highlightedValue = "This is a multivalued field. Sentencetwo field."; assertEquals(highlightedValue + "... " + highlightedValue, snippets[0]); @@ -331,7 +331,7 @@ public class TestUnifiedHighlighterTermIntervals extends LuceneTestCase { Intervals.term("highlighting"), Intervals.term("just"), Intervals.term("first"))); TopDocs topDocs = searcher.search(query, 10, Sort.INDEXORDER); assertEquals(2, topDocs.totalHits.value); - String snippets[] = highlighter.highlight("body", query, topDocs); + String[] snippets = highlighter.highlight("body", query, topDocs); assertEquals(2, snippets.length); assertEquals("Just a test highlighting from postings. ", snippets[0]); assertEquals("Highlighting the first term. ", snippets[1]); @@ -359,7 +359,7 @@ public class TestUnifiedHighlighterTermIntervals extends LuceneTestCase { Query query = new IntervalQuery("body", Intervals.term("test")); TopDocs topDocs = searcher.search(query, 10, Sort.INDEXORDER); assertEquals(2, topDocs.totalHits.value); - String snippets[] = highlighter.highlight("body", query, topDocs, 2); + String[] snippets = highlighter.highlight("body", query, topDocs, 2); assertEquals(2, snippets.length); assertEquals( "This is a test. Just a test highlighting from postings. ", snippets[0]); @@ -394,7 +394,7 @@ public class TestUnifiedHighlighterTermIntervals extends LuceneTestCase { assertEquals(1, topDocs.totalHits.value); UnifiedHighlighter highlighter = randomUnifiedHighlighter(searcher, indexAnalyzer); highlighter.setHighlightPhrasesStrictly(false); - String snippets[] = highlighter.highlight("body", query, topDocs, 2); + String[] snippets = highlighter.highlight("body", query, topDocs, 2); assertEquals(1, snippets.length); // highlighter.getFlags("body").containsAll(EnumSet.of(HighlightFlag.WEIGHT_MATCHES, // HighlightFlag.PHRASES))) { @@ -423,7 +423,7 @@ public class TestUnifiedHighlighterTermIntervals extends LuceneTestCase { assertEquals(1, topDocs.totalHits.value); UnifiedHighlighter highlighter = randomUnifiedHighlighter(searcher, indexAnalyzer); highlighter.setHighlightPhrasesStrictly(false); - String snippets[] = highlighter.highlight("body", query, topDocs, 2); + String[] snippets = highlighter.highlight("body", query, topDocs, 2); assertEquals(1, snippets.length); assertFalse(snippets[0].contains("CuriousCurious")); int matches = 0; @@ -462,7 +462,7 @@ public class TestUnifiedHighlighterTermIntervals extends LuceneTestCase { assertEquals(1, topDocs.totalHits.value); UnifiedHighlighter highlighter = randomUnifiedHighlighter(searcher, indexAnalyzer); highlighter.setMaxLength(Integer.MAX_VALUE - 1); - String snippets[] = highlighter.highlight("body", query, topDocs, 2); + String[] snippets = highlighter.highlight("body", query, topDocs, 2); assertEquals(1, snippets.length); assertTrue(snippets[0].contains("Square")); assertTrue(snippets[0].contains("Porter")); @@ -490,7 +490,7 @@ public class TestUnifiedHighlighterTermIntervals extends LuceneTestCase { Query query = new IntervalQuery("body", Intervals.term("test")); TopDocs topDocs = searcher.search(query, 10, Sort.INDEXORDER); assertEquals(1, topDocs.totalHits.value); - String snippets[] = highlighter.highlight("body", query, topDocs, 2); + String[] snippets = highlighter.highlight("body", query, topDocs, 2); assertEquals(1, snippets.length); assertEquals( "This is a test. ... Feel free to test test test test test test test.", @@ -518,7 +518,7 @@ public class TestUnifiedHighlighterTermIntervals extends LuceneTestCase { assertEquals(1, topDocs.totalHits.value); UnifiedHighlighter highlighter = randomUnifiedHighlighter(searcher, indexAnalyzer); highlighter.setMaxLength(Integer.MAX_VALUE - 1); - String snippets[] = highlighter.highlight("body", query, topDocs, 2); + String[] snippets = highlighter.highlight("body", query, topDocs, 2); assertEquals(1, snippets.length); assertFalse(snippets[0].contains("both")); assertTrue(snippets[0].contains("terms")); @@ -551,7 +551,7 @@ public class TestUnifiedHighlighterTermIntervals extends LuceneTestCase { Query query = new IntervalQuery("body", Intervals.term("test")); TopDocs topDocs = searcher.search(query, 10, Sort.INDEXORDER); assertEquals(1, topDocs.totalHits.value); - String snippets[] = highlighter.highlight("body", query, topDocs, 2); + String[] snippets = highlighter.highlight("body", query, topDocs, 2); assertEquals(1, snippets.length); assertEquals( "This is a test. Just highlighting from postings. This is also a much sillier test. Feel free to test test test test test test test.", @@ -584,7 +584,7 @@ public class TestUnifiedHighlighterTermIntervals extends LuceneTestCase { int[] docIDs = new int[2]; docIDs[0] = hits[0].doc; docIDs[1] = hits[1].doc; - String snippets[] = + String[] snippets = highlighter .highlightFields(new String[] {"body"}, query, docIDs, new int[] {1}) .get("body"); @@ -631,7 +631,7 @@ public class TestUnifiedHighlighterTermIntervals extends LuceneTestCase { Query query = new IntervalQuery("body", Intervals.term("test")); TopDocs topDocs = searcher.search(query, 10, Sort.INDEXORDER); assertEquals(1, topDocs.totalHits.value); - String snippets[] = highlighter.highlight("body", query, topDocs, 2); + String[] snippets = highlighter.highlight("body", query, topDocs, 2); assertEquals(1, snippets.length); assertEquals( "This is a test. Just highlighting from postings. This is also a much sillier test. Feel free to test test test test test test test.", @@ -660,7 +660,7 @@ public class TestUnifiedHighlighterTermIntervals extends LuceneTestCase { UnifiedHighlighter highlighter = randomUnifiedHighlighter(searcher, indexAnalyzer); Query query = new IntervalQuery("body", Intervals.term("highlighting")); int[] docIDs = new int[] {0}; - String snippets[] = + String[] snippets = highlighter .highlightFields(new String[] {"body"}, query, docIDs, new int[] {2}) .get("body"); @@ -691,7 +691,7 @@ public class TestUnifiedHighlighterTermIntervals extends LuceneTestCase { Query query = new IntervalQuery("body", Intervals.term("highlighting")); int[] docIDs = new int[1]; docIDs[0] = docID; - String snippets[] = + String[] snippets = highlighter .highlightFields(new String[] {"body"}, query, docIDs, new int[] {2}) .get("body"); @@ -723,7 +723,7 @@ public class TestUnifiedHighlighterTermIntervals extends LuceneTestCase { highlighter.setMaxNoHighlightPassages(0); // don't want any default summary Query query = new IntervalQuery("body", Intervals.term("highlighting")); int[] docIDs = new int[] {0}; - String snippets[] = + String[] snippets = highlighter .highlightFields(new String[] {"body"}, query, docIDs, new int[] {2}) .get("body"); @@ -759,7 +759,7 @@ public class TestUnifiedHighlighterTermIntervals extends LuceneTestCase { }; Query query = new IntervalQuery("body", Intervals.term("highlighting")); int[] docIDs = new int[] {0}; - String snippets[] = + String[] snippets = highlighter .highlightFields(new String[] {"body"}, query, docIDs, new int[] {2}) .get("body"); @@ -790,7 +790,7 @@ public class TestUnifiedHighlighterTermIntervals extends LuceneTestCase { UnifiedHighlighter highlighter = randomUnifiedHighlighter(searcher, indexAnalyzer); Query query = new IntervalQuery("bogus", Intervals.term("highlighting")); int[] docIDs = new int[] {0}; - String snippets[] = + String[] snippets = highlighter .highlightFields(new String[] {"bogus"}, query, docIDs, new int[] {2}) .get("bogus"); @@ -821,7 +821,7 @@ public class TestUnifiedHighlighterTermIntervals extends LuceneTestCase { Query query = new IntervalQuery("body", Intervals.term("highlighting")); int[] docIDs = new int[1]; docIDs[0] = docID; - String snippets[] = + String[] snippets = highlighter .highlightFields(new String[] {"body"}, query, docIDs, new int[] {2}) .get("body"); @@ -852,7 +852,7 @@ public class TestUnifiedHighlighterTermIntervals extends LuceneTestCase { Query query = new IntervalQuery("body", Intervals.term("highlighting")); int[] docIDs = new int[1]; docIDs[0] = docID; - String snippets[] = + String[] snippets = highlighter .highlightFields(new String[] {"body"}, query, docIDs, new int[] {2}) .get("body"); @@ -891,7 +891,7 @@ public class TestUnifiedHighlighterTermIntervals extends LuceneTestCase { TopDocs hits = searcher.search(query, numDocs); assertEquals(numDocs, hits.totalHits.value); - String snippets[] = highlighter.highlight("body", query, hits); + String[] snippets = highlighter.highlight("body", query, hits); assertEquals(numDocs, snippets.length); for (int hit = 0; hit < numDocs; hit++) { Document doc = searcher.doc(hits.scoreDocs[hit].doc); @@ -931,7 +931,7 @@ public class TestUnifiedHighlighterTermIntervals extends LuceneTestCase { Query query = new IntervalQuery("body", Intervals.term("highlighting")); TopDocs topDocs = searcher.search(query, 10, Sort.INDEXORDER); assertEquals(1, topDocs.totalHits.value); - String snippets[] = highlighter.highlight("body", query, topDocs); + String[] snippets = highlighter.highlight("body", query, topDocs); assertEquals(1, snippets.length); assertEquals( "Just a test highlighting from <i>postings</i>. ", snippets[0]); @@ -962,7 +962,7 @@ public class TestUnifiedHighlighterTermIntervals extends LuceneTestCase { PassageFormatter defaultFormatter = new DefaultPassageFormatter(); @Override - public String[] format(Passage passages[], String content) { + public String[] format(Passage[] passages, String content) { // Just turns the String snippet into a length 2 // array of String return new String[] { @@ -1076,7 +1076,7 @@ public class TestUnifiedHighlighterTermIntervals extends LuceneTestCase { Query query = new IntervalQuery("body", Intervals.term("highlighting")); TopDocs topDocs = searcher.search(query, 10, Sort.INDEXORDER); assertEquals(1, topDocs.totalHits.value); - String snippets[] = highlighter.highlight("body", query, topDocs); + String[] snippets = highlighter.highlight("body", query, topDocs); assertEquals(1, snippets.length); assertEquals("Just a test highlighting from postings. ", snippets[0]); diff --git a/lucene/misc/src/java/org/apache/lucene/misc/IndexMergeTool.java b/lucene/misc/src/java/org/apache/lucene/misc/IndexMergeTool.java index 50b93175d6b..0465f4e96b9 100644 --- a/lucene/misc/src/java/org/apache/lucene/misc/IndexMergeTool.java +++ b/lucene/misc/src/java/org/apache/lucene/misc/IndexMergeTool.java @@ -47,11 +47,11 @@ public class IndexMergeTool { @SuppressForbidden(reason = "System.err required (verbose mode): command line tool") static class Options { String mergedIndexPath; - String indexPaths[]; + String[] indexPaths; IndexWriterConfig config = new IndexWriterConfig(null).setOpenMode(OpenMode.CREATE); int maxSegments = 0; - static Options parse(String args[]) throws ReflectiveOperationException { + static Options parse(String[] args) throws ReflectiveOperationException { Options options = new Options(); int index = 0; while (index < args.length) { diff --git a/lucene/misc/src/java/org/apache/lucene/misc/store/WindowsDirectory.java b/lucene/misc/src/java/org/apache/lucene/misc/store/WindowsDirectory.java index f1b14a746db..c41401af3c9 100644 --- a/lucene/misc/src/java/org/apache/lucene/misc/store/WindowsDirectory.java +++ b/lucene/misc/src/java/org/apache/lucene/misc/store/WindowsDirectory.java @@ -136,7 +136,7 @@ public class WindowsDirectory extends FSDirectory { private static native long open(String filename) throws IOException; /** Reads data from a file at pos into bytes */ - private static native int read(long fd, byte bytes[], int offset, int length, long pos) + private static native int read(long fd, byte[] bytes, int offset, int length, long pos) throws IOException; /** Closes a handle to a file */ diff --git a/lucene/misc/src/test/org/apache/lucene/misc/search/TestDiversifiedTopDocsCollector.java b/lucene/misc/src/test/org/apache/lucene/misc/search/TestDiversifiedTopDocsCollector.java index e0705463c91..72aeaaa271c 100644 --- a/lucene/misc/src/test/org/apache/lucene/misc/search/TestDiversifiedTopDocsCollector.java +++ b/lucene/misc/src/test/org/apache/lucene/misc/search/TestDiversifiedTopDocsCollector.java @@ -419,7 +419,7 @@ public class TestDiversifiedTopDocsCollector extends LuceneTestCase { parsedRecords.clear(); for (int i = 0; i < hitsOfThe60s.length; i++) { - String cols[] = hitsOfThe60s[i].split("\t"); + String[] cols = hitsOfThe60s[i].split("\t"); Record record = new Record(String.valueOf(i), cols[0], cols[1], cols[2], Float.parseFloat(cols[3])); parsedRecords.put(record.id, record); diff --git a/lucene/queries/src/test/org/apache/lucene/queries/function/FunctionTestSetup.java b/lucene/queries/src/test/org/apache/lucene/queries/function/FunctionTestSetup.java index 9665e4cd001..068923d9af4 100644 --- a/lucene/queries/src/test/org/apache/lucene/queries/function/FunctionTestSetup.java +++ b/lucene/queries/src/test/org/apache/lucene/queries/function/FunctionTestSetup.java @@ -90,7 +90,7 @@ public abstract class FunctionTestSetup extends LuceneTestCase { protected ValueSource FLOAT_MV_MAX_VALUESOURCE = new MultiValuedFloatFieldSource(FLOAT_FIELD_MV_MAX, SortedNumericSelector.Type.MAX); - private static final String DOC_TEXT_LINES[] = { + private static final String[] DOC_TEXT_LINES = { "Well, this is just some plain text we use for creating the ", "test documents. It used to be a text from an online collection ", "devoted to first aid, but if there was there an (online) lawyers ", @@ -132,7 +132,7 @@ public abstract class FunctionTestSetup extends LuceneTestCase { RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwc); // add docs not exactly in natural ID order, to verify we do check the order of docs by scores int remaining = N_DOCS; - boolean done[] = new boolean[N_DOCS]; + boolean[] done = new boolean[N_DOCS]; int i = 0; while (remaining > 0) { if (done[i]) { diff --git a/lucene/queries/src/test/org/apache/lucene/queries/function/TestFieldScoreQuery.java b/lucene/queries/src/test/org/apache/lucene/queries/function/TestFieldScoreQuery.java index a5502c7bdf6..d1984e116cc 100644 --- a/lucene/queries/src/test/org/apache/lucene/queries/function/TestFieldScoreQuery.java +++ b/lucene/queries/src/test/org/apache/lucene/queries/function/TestFieldScoreQuery.java @@ -123,7 +123,7 @@ public class TestFieldScoreQuery extends FunctionTestSetup { IndexSearcher s = newSearcher(r); TopDocs td = s.search(functionQuery, 1000); assertEquals("All docs should be matched!", N_DOCS, td.totalHits.value); - ScoreDoc sd[] = td.scoreDocs; + ScoreDoc[] sd = td.scoreDocs; for (ScoreDoc aSd : sd) { float score = aSd.score; log(s.explain(functionQuery, aSd.doc)); diff --git a/lucene/queries/src/test/org/apache/lucene/queries/function/TestFunctionScoreQuery.java b/lucene/queries/src/test/org/apache/lucene/queries/function/TestFunctionScoreQuery.java index 737ab7053f0..0c1666cb978 100644 --- a/lucene/queries/src/test/org/apache/lucene/queries/function/TestFunctionScoreQuery.java +++ b/lucene/queries/src/test/org/apache/lucene/queries/function/TestFunctionScoreQuery.java @@ -150,7 +150,7 @@ public class TestFunctionScoreQuery extends FunctionTestSetup { QueryUtils.check(random(), q, searcher, rarely()); - int expectedDocs[] = new int[] {4, 7, 9}; + int[] expectedDocs = new int[] {4, 7, 9}; TopDocs docs = searcher.search(q, 4); assertEquals(expectedDocs.length, docs.totalHits.value); for (int i = 0; i < expectedDocs.length; i++) { diff --git a/lucene/queries/src/test/org/apache/lucene/queries/function/TestIndexReaderFunctions.java b/lucene/queries/src/test/org/apache/lucene/queries/function/TestIndexReaderFunctions.java index 203d8d2f4f4..fa58d12d6c5 100644 --- a/lucene/queries/src/test/org/apache/lucene/queries/function/TestIndexReaderFunctions.java +++ b/lucene/queries/src/test/org/apache/lucene/queries/function/TestIndexReaderFunctions.java @@ -201,10 +201,10 @@ public class TestIndexReaderFunctions extends LuceneTestCase { assertEquals(expected, w.isCacheable(ctx)); } - void assertHits(DoubleValuesSource vs, float scores[]) throws Exception { + void assertHits(DoubleValuesSource vs, float[] scores) throws Exception { Query q = new FunctionScoreQuery(new MatchAllDocsQuery(), vs); - ScoreDoc expected[] = new ScoreDoc[scores.length]; - int expectedDocs[] = new int[scores.length]; + ScoreDoc[] expected = new ScoreDoc[scores.length]; + int[] expectedDocs = new int[scores.length]; for (int i = 0; i < expected.length; i++) { expectedDocs[i] = i; expected[i] = new ScoreDoc(i, scores[i]); @@ -218,7 +218,7 @@ public class TestIndexReaderFunctions extends LuceneTestCase { assertSort(vs, expected); } - void assertSort(DoubleValuesSource vs, ScoreDoc expected[]) throws Exception { + void assertSort(DoubleValuesSource vs, ScoreDoc[] expected) throws Exception { boolean reversed = random().nextBoolean(); Arrays.sort( expected, (a, b) -> reversed ? (int) (b.score - a.score) : (int) (a.score - b.score)); diff --git a/lucene/queries/src/test/org/apache/lucene/queries/function/TestLongNormValueSource.java b/lucene/queries/src/test/org/apache/lucene/queries/function/TestLongNormValueSource.java index c8998f84a3e..24f33770bdb 100644 --- a/lucene/queries/src/test/org/apache/lucene/queries/function/TestLongNormValueSource.java +++ b/lucene/queries/src/test/org/apache/lucene/queries/function/TestLongNormValueSource.java @@ -92,9 +92,9 @@ public class TestLongNormValueSource extends LuceneTestCase { } } - void assertHits(Query q, float scores[]) throws Exception { - ScoreDoc expected[] = new ScoreDoc[scores.length]; - int expectedDocs[] = new int[scores.length]; + void assertHits(Query q, float[] scores) throws Exception { + ScoreDoc[] expected = new ScoreDoc[scores.length]; + int[] expectedDocs = new int[scores.length]; for (int i = 0; i < expected.length; i++) { expectedDocs[i] = i; expected[i] = new ScoreDoc(i, scores[i]); diff --git a/lucene/queries/src/test/org/apache/lucene/queries/function/TestValueSources.java b/lucene/queries/src/test/org/apache/lucene/queries/function/TestValueSources.java index 5d4fc12a824..8f343c78e56 100644 --- a/lucene/queries/src/test/org/apache/lucene/queries/function/TestValueSources.java +++ b/lucene/queries/src/test/org/apache/lucene/queries/function/TestValueSources.java @@ -738,9 +738,9 @@ public class TestValueSources extends LuceneTestCase { } } - void assertHits(Query q, float scores[]) throws Exception { - ScoreDoc expected[] = new ScoreDoc[scores.length]; - int expectedDocs[] = new int[scores.length]; + void assertHits(Query q, float[] scores) throws Exception { + ScoreDoc[] expected = new ScoreDoc[scores.length]; + int[] expectedDocs = new int[scores.length]; for (int i = 0; i < expected.length; i++) { expectedDocs[i] = i; expected[i] = new ScoreDoc(i, scores[i]); diff --git a/lucene/queries/src/test/org/apache/lucene/queries/intervals/TestIntervals.java b/lucene/queries/src/test/org/apache/lucene/queries/intervals/TestIntervals.java index 3575ed4a5af..abee3d06a4c 100644 --- a/lucene/queries/src/test/org/apache/lucene/queries/intervals/TestIntervals.java +++ b/lucene/queries/src/test/org/apache/lucene/queries/intervals/TestIntervals.java @@ -60,7 +60,7 @@ public class TestIntervals extends LuceneTestCase { // 0 1 2 3 4 5 6 7 8 9 // // 012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 - private static String field1_docs[] = { + private static String[] field1_docs = { "Nothing of interest to anyone here", "Pease porridge hot, pease porridge cold, pease porridge in the pot nine days old. Some like it hot, some like it cold, some like it in the pot nine days old", "Pease porridge cold, pease porridge hot, pease porridge in the pot twelve days old. Some like it cold, some like it hot, some like it in the fraggle", @@ -72,7 +72,7 @@ public class TestIntervals extends LuceneTestCase { // 0 1 2 3 4 5 6 7 8 9 // // 012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 - private static String field2_docs[] = { + private static String[] field2_docs = { "In Xanadu did Kubla Khan a stately pleasure dome decree", "Where Alph the sacred river ran through caverns measureless to man", "a b a c b a b c", diff --git a/lucene/queries/src/test/org/apache/lucene/queries/payloads/TestPayloadExplanations.java b/lucene/queries/src/test/org/apache/lucene/queries/payloads/TestPayloadExplanations.java index 03e80d3fb16..6abd0d171f2 100644 --- a/lucene/queries/src/test/org/apache/lucene/queries/payloads/TestPayloadExplanations.java +++ b/lucene/queries/src/test/org/apache/lucene/queries/payloads/TestPayloadExplanations.java @@ -26,7 +26,7 @@ import org.apache.lucene.search.BaseExplanationTestCase; /** TestExplanations subclass focusing on payload queries */ public class TestPayloadExplanations extends BaseExplanationTestCase { - private static PayloadFunction functions[] = + private static PayloadFunction[] functions = new PayloadFunction[] { new AveragePayloadFunction(), new MinPayloadFunction(), new MaxPayloadFunction(), }; diff --git a/lucene/queries/src/test/org/apache/lucene/queries/spans/TestSpanSearchEquivalence.java b/lucene/queries/src/test/org/apache/lucene/queries/spans/TestSpanSearchEquivalence.java index 3d4f84bc787..e2a6d319144 100644 --- a/lucene/queries/src/test/org/apache/lucene/queries/spans/TestSpanSearchEquivalence.java +++ b/lucene/queries/src/test/org/apache/lucene/queries/spans/TestSpanSearchEquivalence.java @@ -178,7 +178,7 @@ public class TestSpanSearchEquivalence extends SearchEquivalenceTestBase { public void testSpanNearVersusPhrase() throws Exception { Term t1 = randomTerm(); Term t2 = randomTerm(); - SpanQuery subquery[] = + SpanQuery[] subquery = new SpanQuery[] {spanQuery(new SpanTermQuery(t1)), spanQuery(new SpanTermQuery(t2))}; SpanQuery q1 = spanQuery(new SpanNearQuery(subquery, 0, true)); PhraseQuery q2 = new PhraseQuery(t1.field(), t1.bytes(), t2.bytes()); @@ -193,7 +193,7 @@ public class TestSpanSearchEquivalence extends SearchEquivalenceTestBase { public void testSpanNearVersusBooleanAnd() throws Exception { Term t1 = randomTerm(); Term t2 = randomTerm(); - SpanQuery subquery[] = + SpanQuery[] subquery = new SpanQuery[] {spanQuery(new SpanTermQuery(t1)), spanQuery(new SpanTermQuery(t2))}; SpanQuery q1 = spanQuery(new SpanNearQuery(subquery, Integer.MAX_VALUE, false)); BooleanQuery.Builder q2 = new BooleanQuery.Builder(); @@ -206,7 +206,7 @@ public class TestSpanSearchEquivalence extends SearchEquivalenceTestBase { public void testSpanNearVersusSloppySpanNear() throws Exception { Term t1 = randomTerm(); Term t2 = randomTerm(); - SpanQuery subquery[] = + SpanQuery[] subquery = new SpanQuery[] {spanQuery(new SpanTermQuery(t1)), spanQuery(new SpanTermQuery(t2))}; SpanQuery q1 = spanQuery(new SpanNearQuery(subquery, 0, false)); SpanQuery q2 = spanQuery(new SpanNearQuery(subquery, 1, false)); @@ -217,7 +217,7 @@ public class TestSpanSearchEquivalence extends SearchEquivalenceTestBase { public void testSpanNearInOrderVersusOutOfOrder() throws Exception { Term t1 = randomTerm(); Term t2 = randomTerm(); - SpanQuery subquery[] = + SpanQuery[] subquery = new SpanQuery[] {spanQuery(new SpanTermQuery(t1)), spanQuery(new SpanTermQuery(t2))}; SpanQuery q1 = spanQuery(new SpanNearQuery(subquery, 3, true)); SpanQuery q2 = spanQuery(new SpanNearQuery(subquery, 3, false)); @@ -228,7 +228,7 @@ public class TestSpanSearchEquivalence extends SearchEquivalenceTestBase { public void testSpanNearIncreasingSloppiness() throws Exception { Term t1 = randomTerm(); Term t2 = randomTerm(); - SpanQuery subquery[] = + SpanQuery[] subquery = new SpanQuery[] {spanQuery(new SpanTermQuery(t1)), spanQuery(new SpanTermQuery(t2))}; for (int i = 0; i < 10; i++) { SpanQuery q1 = spanQuery(new SpanNearQuery(subquery, i, false)); @@ -242,7 +242,7 @@ public class TestSpanSearchEquivalence extends SearchEquivalenceTestBase { Term t1 = randomTerm(); Term t2 = randomTerm(); Term t3 = randomTerm(); - SpanQuery subquery[] = + SpanQuery[] subquery = new SpanQuery[] { spanQuery(new SpanTermQuery(t1)), spanQuery(new SpanTermQuery(t2)), @@ -259,7 +259,7 @@ public class TestSpanSearchEquivalence extends SearchEquivalenceTestBase { public void testSpanNearIncreasingOrderedSloppiness() throws Exception { Term t1 = randomTerm(); Term t2 = randomTerm(); - SpanQuery subquery[] = + SpanQuery[] subquery = new SpanQuery[] {spanQuery(new SpanTermQuery(t1)), spanQuery(new SpanTermQuery(t2))}; for (int i = 0; i < 10; i++) { SpanQuery q1 = spanQuery(new SpanNearQuery(subquery, i, false)); @@ -273,7 +273,7 @@ public class TestSpanSearchEquivalence extends SearchEquivalenceTestBase { Term t1 = randomTerm(); Term t2 = randomTerm(); Term t3 = randomTerm(); - SpanQuery subquery[] = + SpanQuery[] subquery = new SpanQuery[] { spanQuery(new SpanTermQuery(t1)), spanQuery(new SpanTermQuery(t2)), @@ -327,7 +327,7 @@ public class TestSpanSearchEquivalence extends SearchEquivalenceTestBase { public void testSpanRangeNear() throws Exception { Term t1 = randomTerm(); Term t2 = randomTerm(); - SpanQuery subquery[] = + SpanQuery[] subquery = new SpanQuery[] {spanQuery(new SpanTermQuery(t1)), spanQuery(new SpanTermQuery(t2))}; SpanQuery nearQuery = spanQuery(new SpanNearQuery(subquery, 10, true)); for (int i = 0; i < 5; i++) { @@ -343,7 +343,7 @@ public class TestSpanSearchEquivalence extends SearchEquivalenceTestBase { public void testSpanRangeNearIncreasingEnd() throws Exception { Term t1 = randomTerm(); Term t2 = randomTerm(); - SpanQuery subquery[] = + SpanQuery[] subquery = new SpanQuery[] {spanQuery(new SpanTermQuery(t1)), spanQuery(new SpanTermQuery(t2))}; SpanQuery nearQuery = spanQuery(new SpanNearQuery(subquery, 10, true)); for (int i = 0; i < 5; i++) { @@ -359,7 +359,7 @@ public class TestSpanSearchEquivalence extends SearchEquivalenceTestBase { public void testSpanRangeNearEverything() throws Exception { Term t1 = randomTerm(); Term t2 = randomTerm(); - SpanQuery subquery[] = + SpanQuery[] subquery = new SpanQuery[] {spanQuery(new SpanTermQuery(t1)), spanQuery(new SpanTermQuery(t2))}; SpanQuery nearQuery = spanQuery(new SpanNearQuery(subquery, 10, true)); Query q1 = spanQuery(new SpanPositionRangeQuery(nearQuery, 0, Integer.MAX_VALUE)); @@ -399,7 +399,7 @@ public class TestSpanSearchEquivalence extends SearchEquivalenceTestBase { public void testSpanFirstNear() throws Exception { Term t1 = randomTerm(); Term t2 = randomTerm(); - SpanQuery subquery[] = + SpanQuery[] subquery = new SpanQuery[] {spanQuery(new SpanTermQuery(t1)), spanQuery(new SpanTermQuery(t2))}; SpanQuery nearQuery = spanQuery(new SpanNearQuery(subquery, 10, true)); for (int i = 0; i < 10; i++) { @@ -413,7 +413,7 @@ public class TestSpanSearchEquivalence extends SearchEquivalenceTestBase { public void testSpanFirstNearIncreasing() throws Exception { Term t1 = randomTerm(); Term t2 = randomTerm(); - SpanQuery subquery[] = + SpanQuery[] subquery = new SpanQuery[] {spanQuery(new SpanTermQuery(t1)), spanQuery(new SpanTermQuery(t2))}; SpanQuery nearQuery = spanQuery(new SpanNearQuery(subquery, 10, true)); for (int i = 0; i < 10; i++) { @@ -427,7 +427,7 @@ public class TestSpanSearchEquivalence extends SearchEquivalenceTestBase { public void testSpanFirstNearEverything() throws Exception { Term t1 = randomTerm(); Term t2 = randomTerm(); - SpanQuery subquery[] = + SpanQuery[] subquery = new SpanQuery[] {spanQuery(new SpanTermQuery(t1)), spanQuery(new SpanTermQuery(t2))}; SpanQuery nearQuery = spanQuery(new SpanNearQuery(subquery, 10, true)); Query q1 = spanQuery(new SpanFirstQuery(nearQuery, Integer.MAX_VALUE)); @@ -439,7 +439,7 @@ public class TestSpanSearchEquivalence extends SearchEquivalenceTestBase { public void testSpanWithinVsNear() throws Exception { Term t1 = randomTerm(); Term t2 = randomTerm(); - SpanQuery subquery[] = + SpanQuery[] subquery = new SpanQuery[] {spanQuery(new SpanTermQuery(t1)), spanQuery(new SpanTermQuery(t2))}; SpanQuery nearQuery = spanQuery(new SpanNearQuery(subquery, 10, true)); @@ -453,7 +453,7 @@ public class TestSpanSearchEquivalence extends SearchEquivalenceTestBase { public void testSpanWithinVsContaining() throws Exception { Term t1 = randomTerm(); Term t2 = randomTerm(); - SpanQuery subquery[] = + SpanQuery[] subquery = new SpanQuery[] {spanQuery(new SpanTermQuery(t1)), spanQuery(new SpanTermQuery(t2))}; SpanQuery nearQuery = spanQuery(new SpanNearQuery(subquery, 10, true)); diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/simple/SimpleQueryParser.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/simple/SimpleQueryParser.java index a6b5e489852..5d0540012ab 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/simple/SimpleQueryParser.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/simple/SimpleQueryParser.java @@ -152,8 +152,8 @@ public class SimpleQueryParser extends QueryBuilder { return new MatchAllDocsQuery(); } - char data[] = queryText.toCharArray(); - char buffer[] = new char[data.length]; + char[] data = queryText.toCharArray(); + char[] buffer = new char[data.length]; State state = new State(data, buffer, 0, data.length); parseSubQuery(state); @@ -485,7 +485,7 @@ public class SimpleQueryParser extends QueryBuilder { * @return slop/edit distance, 0 in the case of non-parsing slop/edit string */ private int parseFuzziness(State state) { - char slopText[] = new char[state.length]; + char[] slopText = new char[state.length]; int slopLength = 0; if (state.data[state.index] == '~') { diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/CorePlusQueriesParser.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/CorePlusQueriesParser.java index cb7c8ad12ba..f70e6626f48 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/CorePlusQueriesParser.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/CorePlusQueriesParser.java @@ -47,7 +47,7 @@ public class CorePlusQueriesParser extends CoreParser { protected CorePlusQueriesParser(String defaultField, Analyzer analyzer, QueryParser parser) { super(defaultField, analyzer, parser); - String fields[] = {"contents"}; + String[] fields = {"contents"}; queryFactory.addBuilder("LikeThisQuery", new LikeThisQueryBuilder(analyzer, fields)); } } diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/LikeThisQueryBuilder.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/LikeThisQueryBuilder.java index de45d724cbd..1c7ce8f3cb4 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/LikeThisQueryBuilder.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/LikeThisQueryBuilder.java @@ -39,7 +39,7 @@ public class LikeThisQueryBuilder implements QueryBuilder { 30; // default is a 3rd of selected terms must match private final Analyzer analyzer; - private final String defaultFieldNames[]; + private final String[] defaultFieldNames; public LikeThisQueryBuilder(Analyzer analyzer, String[] defaultFieldNames) { this.analyzer = analyzer; @@ -52,7 +52,7 @@ public class LikeThisQueryBuilder implements QueryBuilder { @Override public Query getQuery(Element e) throws ParserException { String fieldsList = e.getAttribute("fieldNames"); // a comma-delimited list of fields - String fields[] = defaultFieldNames; + String[] fields = defaultFieldNames; if ((fieldsList != null) && (fieldsList.trim().length() > 0)) { fields = fieldsList.trim().split(","); // trim the fieldnames diff --git a/lucene/queryparser/src/test/org/apache/lucene/queryparser/classic/TestMultiFieldQueryParser.java b/lucene/queryparser/src/test/org/apache/lucene/queryparser/classic/TestMultiFieldQueryParser.java index 367b5c2e818..4aca1f0c756 100644 --- a/lucene/queryparser/src/test/org/apache/lucene/queryparser/classic/TestMultiFieldQueryParser.java +++ b/lucene/queryparser/src/test/org/apache/lucene/queryparser/classic/TestMultiFieldQueryParser.java @@ -62,7 +62,7 @@ public class TestMultiFieldQueryParser extends LuceneTestCase { // verify parsing of query using a stopping analyzer private void assertStopQueryEquals(String qtxt, String expectedRes) throws Exception { String[] fields = {"b", "t"}; - Occur occur[] = {Occur.SHOULD, Occur.SHOULD}; + Occur[] occur = {Occur.SHOULD, Occur.SHOULD}; TestQueryParser.QPTestAnalyzer a = new TestQueryParser.QPTestAnalyzer(); MultiFieldQueryParser mfqp = new MultiFieldQueryParser(fields, a); diff --git a/lucene/queryparser/src/test/org/apache/lucene/queryparser/classic/TestQueryParser.java b/lucene/queryparser/src/test/org/apache/lucene/queryparser/classic/TestQueryParser.java index 54ba7083a49..b78a0599e34 100644 --- a/lucene/queryparser/src/test/org/apache/lucene/queryparser/classic/TestQueryParser.java +++ b/lucene/queryparser/src/test/org/apache/lucene/queryparser/classic/TestQueryParser.java @@ -918,7 +918,7 @@ public class TestQueryParser extends QueryParserTestBase { @Override public boolean incrementToken() throws IOException { if (input.incrementToken()) { - char term[] = termAtt.buffer(); + char[] term = termAtt.buffer(); for (int i = 0; i < term.length; i++) switch (term[i]) { case 'ü': diff --git a/lucene/queryparser/src/test/org/apache/lucene/queryparser/complexPhrase/TestComplexPhraseQuery.java b/lucene/queryparser/src/test/org/apache/lucene/queryparser/complexPhrase/TestComplexPhraseQuery.java index 6ad454d5dae..01768bd7293 100644 --- a/lucene/queryparser/src/test/org/apache/lucene/queryparser/complexPhrase/TestComplexPhraseQuery.java +++ b/lucene/queryparser/src/test/org/apache/lucene/queryparser/complexPhrase/TestComplexPhraseQuery.java @@ -41,7 +41,7 @@ import org.apache.lucene.util.LuceneTestCase; public class TestComplexPhraseQuery extends LuceneTestCase { Directory rd; Analyzer analyzer; - DocData docsContent[] = { + DocData[] docsContent = { new DocData("john smith", "1", "developer"), new DocData("johathon smith", "2", "developer"), new DocData("john percival smith", "3", "designer"), diff --git a/lucene/queryparser/src/test/org/apache/lucene/queryparser/flexible/standard/TestMultiFieldQPHelper.java b/lucene/queryparser/src/test/org/apache/lucene/queryparser/flexible/standard/TestMultiFieldQPHelper.java index cbb4affde86..022e5a2ffc0 100644 --- a/lucene/queryparser/src/test/org/apache/lucene/queryparser/flexible/standard/TestMultiFieldQPHelper.java +++ b/lucene/queryparser/src/test/org/apache/lucene/queryparser/flexible/standard/TestMultiFieldQPHelper.java @@ -75,7 +75,7 @@ public class TestMultiFieldQPHelper extends LuceneTestCase { // verify parsing of query using a stopping analyzer private void assertStopQueryEquals(String qtxt, String expectedRes) throws Exception { String[] fields = {"b", "t"}; - Occur occur[] = {Occur.SHOULD, Occur.SHOULD}; + Occur[] occur = {Occur.SHOULD, Occur.SHOULD}; TestQPHelper.QPTestAnalyzer a = new TestQPHelper.QPTestAnalyzer(); StandardQueryParser mfqp = new StandardQueryParser(); mfqp.setMultiFields(fields); diff --git a/lucene/queryparser/src/test/org/apache/lucene/queryparser/flexible/standard/TestQPHelper.java b/lucene/queryparser/src/test/org/apache/lucene/queryparser/flexible/standard/TestQPHelper.java index dca7f39656a..81de1f127c7 100644 --- a/lucene/queryparser/src/test/org/apache/lucene/queryparser/flexible/standard/TestQPHelper.java +++ b/lucene/queryparser/src/test/org/apache/lucene/queryparser/flexible/standard/TestQPHelper.java @@ -1229,12 +1229,12 @@ public class TestQPHelper extends LuceneTestCase { String qtxt = "\"the words in poisitions pos02578 are stopped in this phrasequery\""; // 0 2 5 7 8 - int expectedPositions[] = {1, 3, 4, 6, 9}; + int[] expectedPositions = {1, 3, 4, 6, 9}; PhraseQuery pq = (PhraseQuery) qp.parse(qtxt, "a"); // System.out.println("Query text: "+qtxt); // System.out.println("Result: "+pq); - Term t[] = pq.getTerms(); - int pos[] = pq.getPositions(); + Term[] t = pq.getTerms(); + int[] pos = pq.getPositions(); for (int i = 0; i < t.length; i++) { // System.out.println(i+". "+t[i]+" pos: "+pos[i]); assertEquals( diff --git a/lucene/queryparser/src/test/org/apache/lucene/queryparser/simple/TestSimpleQueryParser.java b/lucene/queryparser/src/test/org/apache/lucene/queryparser/simple/TestSimpleQueryParser.java index 11185a5fa40..5ae1f3483e9 100644 --- a/lucene/queryparser/src/test/org/apache/lucene/queryparser/simple/TestSimpleQueryParser.java +++ b/lucene/queryparser/src/test/org/apache/lucene/queryparser/simple/TestSimpleQueryParser.java @@ -617,7 +617,7 @@ public class TestSimpleQueryParser extends LuceneTestCase { } public void testRandomQueries2() throws Exception { - char chars[] = new char[] {'a', '1', '|', '&', ' ', '(', ')', '"', '-', '~'}; + char[] chars = new char[] {'a', '1', '|', '&', ' ', '(', ')', '"', '-', '~'}; StringBuilder sb = new StringBuilder(); for (int i = 0; i < 1000; i++) { sb.setLength(0); diff --git a/lucene/queryparser/src/test/org/apache/lucene/queryparser/surround/query/Test01Exceptions.java b/lucene/queryparser/src/test/org/apache/lucene/queryparser/surround/query/Test01Exceptions.java index 0b968b88242..3977c6523a4 100644 --- a/lucene/queryparser/src/test/org/apache/lucene/queryparser/surround/query/Test01Exceptions.java +++ b/lucene/queryparser/src/test/org/apache/lucene/queryparser/surround/query/Test01Exceptions.java @@ -22,7 +22,7 @@ import org.apache.lucene.util.LuceneTestCase; public class Test01Exceptions extends LuceneTestCase { /** Main for running test case by itself. */ - public static void main(String args[]) { + public static void main(String[] args) { TestRunner.run(new TestSuite(Test01Exceptions.class)); } diff --git a/lucene/queryparser/src/test/org/apache/lucene/queryparser/surround/query/Test02Boolean.java b/lucene/queryparser/src/test/org/apache/lucene/queryparser/surround/query/Test02Boolean.java index 7d55b79d706..85443c7ced5 100644 --- a/lucene/queryparser/src/test/org/apache/lucene/queryparser/surround/query/Test02Boolean.java +++ b/lucene/queryparser/src/test/org/apache/lucene/queryparser/surround/query/Test02Boolean.java @@ -21,7 +21,7 @@ import junit.textui.TestRunner; import org.apache.lucene.util.LuceneTestCase; public class Test02Boolean extends LuceneTestCase { - public static void main(String args[]) { + public static void main(String[] args) { TestRunner.run(new TestSuite(Test02Boolean.class)); } diff --git a/lucene/queryparser/src/test/org/apache/lucene/queryparser/surround/query/Test03Distance.java b/lucene/queryparser/src/test/org/apache/lucene/queryparser/surround/query/Test03Distance.java index 36554ec2fa4..d8ed0ec6b42 100644 --- a/lucene/queryparser/src/test/org/apache/lucene/queryparser/surround/query/Test03Distance.java +++ b/lucene/queryparser/src/test/org/apache/lucene/queryparser/surround/query/Test03Distance.java @@ -21,7 +21,7 @@ import junit.textui.TestRunner; import org.apache.lucene.util.LuceneTestCase; public class Test03Distance extends LuceneTestCase { - public static void main(String args[]) { + public static void main(String[] args) { TestRunner.run(new TestSuite(Test03Distance.class)); } diff --git a/lucene/queryparser/src/test/org/apache/lucene/queryparser/util/QueryParserTestBase.java b/lucene/queryparser/src/test/org/apache/lucene/queryparser/util/QueryParserTestBase.java index 789b8aaa00d..1d79f176b9d 100644 --- a/lucene/queryparser/src/test/org/apache/lucene/queryparser/util/QueryParserTestBase.java +++ b/lucene/queryparser/src/test/org/apache/lucene/queryparser/util/QueryParserTestBase.java @@ -1116,12 +1116,12 @@ public abstract class QueryParserTestBase extends LuceneTestCase { qp.setEnablePositionIncrements(true); String qtxt = "\"the words in poisitions pos02578 are stopped in this phrasequery\""; // 0 2 5 7 8 - int expectedPositions[] = {1, 3, 4, 6, 9}; + int[] expectedPositions = {1, 3, 4, 6, 9}; PhraseQuery pq = (PhraseQuery) getQuery(qtxt, qp); // System.out.println("Query text: "+qtxt); // System.out.println("Result: "+pq); - Term t[] = pq.getTerms(); - int pos[] = pq.getPositions(); + Term[] t = pq.getTerms(); + int[] pos = pq.getPositions(); for (int i = 0; i < t.length; i++) { // System.out.println(i+". "+t[i]+" pos: "+pos[i]); assertEquals( @@ -1270,7 +1270,7 @@ public abstract class QueryParserTestBase extends LuceneTestCase { CommonQueryParserConfiguration qp = getParserConfig(new MockAnalyzer(random())); qp.setAllowLeadingWildcard(true); - String prefixQueries[][] = { + String[][] prefixQueries = { { "a*", "ab*", "abc*", }, @@ -1278,7 +1278,7 @@ public abstract class QueryParserTestBase extends LuceneTestCase { {"o*", "op*", "opq*", "\\\\\\\\*"}, }; - String wildcardQueries[][] = { + String[][] wildcardQueries = { {"*a*", "*ab*", "*abc**", "ab*e*", "*g?", "*f?1", "abc**"}, {"*h*", "*hi*", "*hij**", "hi*k*", "*n?", "*m?1", "hij**"}, {"*o*", "*op*", "*opq**", "op*q*", "*u?", "*t?1", "opq**"}, diff --git a/lucene/sandbox/src/java/org/apache/lucene/sandbox/document/BigIntegerPoint.java b/lucene/sandbox/src/java/org/apache/lucene/sandbox/document/BigIntegerPoint.java index 886cd1e5c2f..615649f664d 100644 --- a/lucene/sandbox/src/java/org/apache/lucene/sandbox/document/BigIntegerPoint.java +++ b/lucene/sandbox/src/java/org/apache/lucene/sandbox/document/BigIntegerPoint.java @@ -149,12 +149,12 @@ public class BigIntegerPoint extends Field { // public helper methods (e.g. for queries) /** Encode single BigInteger dimension */ - public static void encodeDimension(BigInteger value, byte dest[], int offset) { + public static void encodeDimension(BigInteger value, byte[] dest, int offset) { NumericUtils.bigIntToSortableBytes(value, BYTES, dest, offset); } /** Decode single BigInteger dimension */ - public static BigInteger decodeDimension(byte value[], int offset) { + public static BigInteger decodeDimension(byte[] value, int offset) { return NumericUtils.sortableBytesToBigInt(value, offset, BYTES); } diff --git a/lucene/sandbox/src/java/org/apache/lucene/sandbox/document/HalfFloatPoint.java b/lucene/sandbox/src/java/org/apache/lucene/sandbox/document/HalfFloatPoint.java index 493838f421b..9e031f55118 100644 --- a/lucene/sandbox/src/java/org/apache/lucene/sandbox/document/HalfFloatPoint.java +++ b/lucene/sandbox/src/java/org/apache/lucene/sandbox/document/HalfFloatPoint.java @@ -297,12 +297,12 @@ public final class HalfFloatPoint extends Field { // public helper methods (e.g. for queries) /** Encode single float dimension */ - public static void encodeDimension(float value, byte dest[], int offset) { + public static void encodeDimension(float value, byte[] dest, int offset) { shortToSortableBytes(halfFloatToSortableShort(value), dest, offset); } /** Decode single float dimension */ - public static float decodeDimension(byte value[], int offset) { + public static float decodeDimension(byte[] value, int offset) { return sortableShortToHalfFloat(sortableBytesToShort(value, offset)); } diff --git a/lucene/sandbox/src/java/org/apache/lucene/sandbox/search/CombinedFieldQuery.java b/lucene/sandbox/src/java/org/apache/lucene/sandbox/search/CombinedFieldQuery.java index c4e00eabdcf..d3187a0896e 100644 --- a/lucene/sandbox/src/java/org/apache/lucene/sandbox/search/CombinedFieldQuery.java +++ b/lucene/sandbox/src/java/org/apache/lucene/sandbox/search/CombinedFieldQuery.java @@ -171,9 +171,9 @@ public final class CombinedFieldQuery extends Query implements Accountable { // sorted map for fields. private final TreeMap fieldAndWeights; // array of terms, sorted. - private final BytesRef terms[]; + private final BytesRef[] terms; // array of terms per field, sorted - private final Term fieldTerms[]; + private final Term[] fieldTerms; private final long ramBytesUsed; @@ -317,7 +317,7 @@ public final class CombinedFieldQuery extends Query implements Accountable { class CombinedFieldWeight extends Weight { private final IndexSearcher searcher; - private final TermStates termStates[]; + private final TermStates[] termStates; private final Similarity.SimScorer simWeight; CombinedFieldWeight(Query query, IndexSearcher searcher, ScoreMode scoreMode, float boost) diff --git a/lucene/spatial-extras/src/test/org/apache/lucene/spatial/StrategyTestCase.java b/lucene/spatial-extras/src/test/org/apache/lucene/spatial/StrategyTestCase.java index 295fb7cc801..307f740084b 100644 --- a/lucene/spatial-extras/src/test/org/apache/lucene/spatial/StrategyTestCase.java +++ b/lucene/spatial-extras/src/test/org/apache/lucene/spatial/StrategyTestCase.java @@ -215,7 +215,7 @@ public abstract class StrategyTestCase extends SpatialTestCase { } /** scores[] are in docId order */ - protected void checkValueSource(DoubleValuesSource vs, float scores[], float delta) + protected void checkValueSource(DoubleValuesSource vs, float[] scores, float delta) throws IOException { for (LeafReaderContext ctx : indexSearcher.getTopReaderContext().leaves()) { diff --git a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/Geo3DPoint.java b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/Geo3DPoint.java index 90643ed6b0c..7d7eaff65cf 100644 --- a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/Geo3DPoint.java +++ b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/Geo3DPoint.java @@ -223,12 +223,12 @@ public final class Geo3DPoint extends Field { /** Encode single dimension */ public static void encodeDimension( - double value, byte bytes[], int offset, PlanetModel planetModel) { + double value, byte[] bytes, int offset, PlanetModel planetModel) { NumericUtils.intToSortableBytes(planetModel.encodeValue(value), bytes, offset); } /** Decode single dimension */ - public static double decodeDimension(byte value[], int offset, PlanetModel planetModel) { + public static double decodeDimension(byte[] value, int offset, PlanetModel planetModel) { return planetModel.decodeValue(NumericUtils.sortableBytesToInt(value, offset)); } diff --git a/lucene/suggest/src/java/org/apache/lucene/search/spell/DirectSpellChecker.java b/lucene/suggest/src/java/org/apache/lucene/search/spell/DirectSpellChecker.java index 10bb1e9d092..d8d75e26763 100644 --- a/lucene/suggest/src/java/org/apache/lucene/search/spell/DirectSpellChecker.java +++ b/lucene/suggest/src/java/org/apache/lucene/search/spell/DirectSpellChecker.java @@ -359,7 +359,7 @@ public class DirectSpellChecker { // create the suggestword response, sort it, and trim it to size. - SuggestWord suggestions[] = new SuggestWord[terms.size()]; + SuggestWord[] suggestions = new SuggestWord[terms.size()]; int index = suggestions.length - 1; for (ScoreTerm s : terms) { SuggestWord suggestion = new SuggestWord(); @@ -375,7 +375,7 @@ public class DirectSpellChecker { ArrayUtil.timSort(suggestions, Collections.reverseOrder(comparator)); if (numSug < suggestions.length) { - SuggestWord trimmed[] = new SuggestWord[numSug]; + SuggestWord[] trimmed = new SuggestWord[numSug]; System.arraycopy(suggestions, 0, trimmed, 0, numSug); suggestions = trimmed; } diff --git a/lucene/suggest/src/java/org/apache/lucene/search/spell/LevenshteinDistance.java b/lucene/suggest/src/java/org/apache/lucene/search/spell/LevenshteinDistance.java index 94e63882acb..80b5897beed 100644 --- a/lucene/suggest/src/java/org/apache/lucene/search/spell/LevenshteinDistance.java +++ b/lucene/suggest/src/java/org/apache/lucene/search/spell/LevenshteinDistance.java @@ -33,9 +33,9 @@ public final class LevenshteinDistance implements StringDistance { public float getDistance(String target, String other) { char[] sa; int n; - int p[]; // 'previous' cost array, horizontally - int d[]; // cost array, horizontally - int _d[]; // placeholder to assist in swapping p and d + int[] p; // 'previous' cost array, horizontally + int[] d; // cost array, horizontally + int[] _d; // placeholder to assist in swapping p and d /* The difference between this impl. and the previous is that, rather diff --git a/lucene/suggest/src/java/org/apache/lucene/search/spell/LuceneLevenshteinDistance.java b/lucene/suggest/src/java/org/apache/lucene/search/spell/LuceneLevenshteinDistance.java index bd77de71875..a3fa15e4913 100644 --- a/lucene/suggest/src/java/org/apache/lucene/search/spell/LuceneLevenshteinDistance.java +++ b/lucene/suggest/src/java/org/apache/lucene/search/spell/LuceneLevenshteinDistance.java @@ -45,7 +45,7 @@ public final class LuceneLevenshteinDistance implements StringDistance { IntsRef targetPoints; IntsRef otherPoints; int n; - int d[][]; // cost array + int[][] d; // cost array // NOTE: if we cared, we could 3*m space instead of m*n space, similar to // what LevenshteinDistance does, except cycling thru a ring of three diff --git a/lucene/suggest/src/java/org/apache/lucene/search/spell/NGramDistance.java b/lucene/suggest/src/java/org/apache/lucene/search/spell/NGramDistance.java index 98559c47984..01fa35b4e7b 100644 --- a/lucene/suggest/src/java/org/apache/lucene/search/spell/NGramDistance.java +++ b/lucene/suggest/src/java/org/apache/lucene/search/spell/NGramDistance.java @@ -69,9 +69,9 @@ public class NGramDistance implements StringDistance { } char[] sa = new char[sl + n - 1]; - float p[]; // 'previous' cost array, horizontally - float d[]; // cost array, horizontally - float _d[]; // placeholder to assist in swapping p and d + float[] p; // 'previous' cost array, horizontally + float[] d; // cost array, horizontally + float[] _d; // placeholder to assist in swapping p and d // construct sa with prefix for (int i = 0; i < sa.length; i++) { diff --git a/lucene/suggest/src/java/org/apache/lucene/search/suggest/analyzing/AnalyzingSuggester.java b/lucene/suggest/src/java/org/apache/lucene/search/suggest/analyzing/AnalyzingSuggester.java index 011c48a8553..9139ec3d470 100644 --- a/lucene/suggest/src/java/org/apache/lucene/search/suggest/analyzing/AnalyzingSuggester.java +++ b/lucene/suggest/src/java/org/apache/lucene/search/suggest/analyzing/AnalyzingSuggester.java @@ -408,7 +408,7 @@ public class AnalyzingSuggester extends Lookup { String tempSortedFileName = null; count = 0; - byte buffer[] = new byte[8]; + byte[] buffer = new byte[8]; try { ByteArrayDataOutput output = new ByteArrayDataOutput(buffer); diff --git a/lucene/suggest/src/java/org/apache/lucene/search/suggest/analyzing/FuzzySuggester.java b/lucene/suggest/src/java/org/apache/lucene/search/suggest/analyzing/FuzzySuggester.java index 92c6777a02d..2ffa1106502 100644 --- a/lucene/suggest/src/java/org/apache/lucene/search/suggest/analyzing/FuzzySuggester.java +++ b/lucene/suggest/src/java/org/apache/lucene/search/suggest/analyzing/FuzzySuggester.java @@ -245,7 +245,7 @@ public final class FuzzySuggester extends AnalyzingSuggester { if (string.length <= nonFuzzyPrefix || string.length < minFuzzyLength) { subs.add(Automata.makeString(string.ints, string.offset, string.length)); } else { - int ints[] = new int[string.length - nonFuzzyPrefix]; + int[] ints = new int[string.length - nonFuzzyPrefix]; System.arraycopy(string.ints, string.offset + nonFuzzyPrefix, ints, 0, ints.length); // TODO: maybe add alphaMin to LevenshteinAutomata, // and pass 1 instead of 0? We probably don't want diff --git a/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/FuzzyCompletionQuery.java b/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/FuzzyCompletionQuery.java index a5cf50e14ae..91bbe98f885 100644 --- a/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/FuzzyCompletionQuery.java +++ b/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/FuzzyCompletionQuery.java @@ -172,7 +172,7 @@ public class FuzzyCompletionQuery extends PrefixCompletionQuery { if (string.length <= nonFuzzyPrefix || string.length < minFuzzyLength) { subs.add(Automata.makeString(string.ints, string.offset, string.length)); } else { - int ints[] = new int[string.length - nonFuzzyPrefix]; + int[] ints = new int[string.length - nonFuzzyPrefix]; System.arraycopy(string.ints, string.offset + nonFuzzyPrefix, ints, 0, ints.length); // TODO: maybe add alphaMin to LevenshteinAutomata, // and pass 1 instead of 0? We probably don't want diff --git a/lucene/suggest/src/test/org/apache/lucene/search/suggest/analyzing/TestAnalyzingInfixSuggester.java b/lucene/suggest/src/test/org/apache/lucene/search/suggest/analyzing/TestAnalyzingInfixSuggester.java index 1c9b45efa7a..c32f36d9b5f 100644 --- a/lucene/suggest/src/test/org/apache/lucene/search/suggest/analyzing/TestAnalyzingInfixSuggester.java +++ b/lucene/suggest/src/test/org/apache/lucene/search/suggest/analyzing/TestAnalyzingInfixSuggester.java @@ -53,7 +53,7 @@ import org.junit.Test; public class TestAnalyzingInfixSuggester extends LuceneTestCase { public void testBasic() throws Exception { - Input keys[] = + Input[] keys = new Input[] { new Input("lend me your ear", 8, new BytesRef("foobar")), new Input("a penny saved is a penny earned", 10, new BytesRef("foobaz")), @@ -152,7 +152,7 @@ public class TestAnalyzingInfixSuggester extends LuceneTestCase { } public void testAfterLoad() throws Exception { - Input keys[] = + Input[] keys = new Input[] { new Input("lend me your ear", 8, new BytesRef("foobar")), new Input("a penny saved is a penny earned", 10, new BytesRef("foobaz")), @@ -202,7 +202,7 @@ public class TestAnalyzingInfixSuggester extends LuceneTestCase { @SuppressWarnings("unchecked") public void testHighlightAsObject() throws Exception { - Input keys[] = + Input[] keys = new Input[] { new Input("a penny saved is a penny earned", 10, new BytesRef("foobaz")), }; @@ -290,7 +290,7 @@ public class TestAnalyzingInfixSuggester extends LuceneTestCase { } public void testRandomMinPrefixLength() throws Exception { - Input keys[] = + Input[] keys = new Input[] { new Input("lend me your ear", 8, new BytesRef("foobar")), new Input("a penny saved is a penny earned", 10, new BytesRef("foobaz")), @@ -367,7 +367,7 @@ public class TestAnalyzingInfixSuggester extends LuceneTestCase { } public void testHighlight() throws Exception { - Input keys[] = + Input[] keys = new Input[] { new Input("a penny saved is a penny earned", 10, new BytesRef("foobaz")), }; @@ -385,7 +385,7 @@ public class TestAnalyzingInfixSuggester extends LuceneTestCase { } public void testHighlightCaseChange() throws Exception { - Input keys[] = + Input[] keys = new Input[] { new Input("a Penny saved is a penny earned", 10, new BytesRef("foobaz")), }; @@ -422,7 +422,7 @@ public class TestAnalyzingInfixSuggester extends LuceneTestCase { } public void testDoubleClose() throws Exception { - Input keys[] = + Input[] keys = new Input[] { new Input("a penny saved is a penny earned", 10, new BytesRef("foobaz")), }; @@ -458,7 +458,7 @@ public class TestAnalyzingInfixSuggester extends LuceneTestCase { AnalyzingInfixSuggester suggester = new AnalyzingInfixSuggester(newDirectory(), indexAnalyzer, queryAnalyzer, 3, false); - Input keys[] = + Input[] keys = new Input[] { new Input("a bob for apples", 10, new BytesRef("foobaz")), }; @@ -861,7 +861,7 @@ public class TestAnalyzingInfixSuggester extends LuceneTestCase { } public void testBasicNRT() throws Exception { - Input keys[] = + Input[] keys = new Input[] { new Input("lend me your ear", 8, new BytesRef("foobar")), }; @@ -1025,7 +1025,7 @@ public class TestAnalyzingInfixSuggester extends LuceneTestCase { // LUCENE-5528 and LUCENE-6464 public void testBasicContext() throws Exception { - Input keys[] = + Input[] keys = new Input[] { new Input("lend me your ear", 8, new BytesRef("foobar"), asSet("foo", "bar")), new Input( @@ -1320,7 +1320,7 @@ public class TestAnalyzingInfixSuggester extends LuceneTestCase { byte[] context3 = new byte[1]; context3[0] = (byte) 0xff; - Input keys[] = + Input[] keys = new Input[] { new Input("lend me your ear", 8, new BytesRef("foobar"), asSet(context1, context2)), new Input( @@ -1376,7 +1376,7 @@ public class TestAnalyzingInfixSuggester extends LuceneTestCase { public void testContextNotAllTermsRequired() throws Exception { - Input keys[] = + Input[] keys = new Input[] { new Input("lend me your ear", 8, new BytesRef("foobar"), asSet("foo", "bar")), new Input( @@ -1561,7 +1561,7 @@ public class TestAnalyzingInfixSuggester extends LuceneTestCase { suggester -> expectThrows(IllegalStateException.class, suggester::refresh)); } - private Input sharedInputs[] = + private Input[] sharedInputs = new Input[] { new Input("lend me your ear", 8, new BytesRef("foobar")), new Input("a penny saved is a penny earned", 10, new BytesRef("foobaz")), diff --git a/lucene/suggest/src/test/org/apache/lucene/search/suggest/analyzing/TestAnalyzingSuggester.java b/lucene/suggest/src/test/org/apache/lucene/search/suggest/analyzing/TestAnalyzingSuggester.java index c0c96c34ffe..bff93265de9 100644 --- a/lucene/suggest/src/test/org/apache/lucene/search/suggest/analyzing/TestAnalyzingSuggester.java +++ b/lucene/suggest/src/test/org/apache/lucene/search/suggest/analyzing/TestAnalyzingSuggester.java @@ -233,7 +233,7 @@ public class TestAnalyzingSuggester extends LuceneTestCase { public void testStandard() throws Exception { final String input = "the ghost of christmas past the"; // trailing stopword there just to perturb possible bugs - Input keys[] = + Input[] keys = new Input[] { new Input(input, 50), }; @@ -407,7 +407,7 @@ public class TestAnalyzingSuggester extends LuceneTestCase { new CannedTokenStream( token("wifi", 1, 1), token("hotspot", 0, 2), token("network", 1, 1))); - Input keys[] = + Input[] keys = new Input[] { new Input("wifi network is slow", 50), new Input("wi fi network is fast", 10), }; @@ -444,7 +444,7 @@ public class TestAnalyzingSuggester extends LuceneTestCase { new CannedTokenStream(token("ba", 1, 1), token("xd", 1, 1)), new CannedTokenStream(token("ab", 1, 1), token("ba", 0, 1), token("x", 1, 1))); - Input keys[] = + Input[] keys = new Input[] { new Input("ab xc", 50), new Input("ba xd", 50), }; diff --git a/lucene/suggest/src/test/org/apache/lucene/search/suggest/analyzing/TestBlendedInfixSuggester.java b/lucene/suggest/src/test/org/apache/lucene/search/suggest/analyzing/TestBlendedInfixSuggester.java index 95114ed346d..605766293e3 100644 --- a/lucene/suggest/src/test/org/apache/lucene/search/suggest/analyzing/TestBlendedInfixSuggester.java +++ b/lucene/suggest/src/test/org/apache/lucene/search/suggest/analyzing/TestBlendedInfixSuggester.java @@ -40,7 +40,7 @@ public class TestBlendedInfixSuggester extends LuceneTestCase { /** Test the weight transformation depending on the position of the matching term. */ public void testBlendedSort() throws IOException { BytesRef payload = new BytesRef("star"); - Input keys[] = + Input[] keys = new Input[] {new Input("star wars: episode v - the empire strikes back", 8, payload)}; BlendedInfixSuggester suggester = getBlendedInfixSuggester(keys); @@ -54,7 +54,7 @@ public class TestBlendedInfixSuggester extends LuceneTestCase { public void testBlendedSort_fieldWeightUnitary_shouldRankSuggestionsByPositionMatch() throws IOException { BytesRef payload = new BytesRef("star"); - Input keys[] = + Input[] keys = new Input[] {new Input("star wars: episode v - the empire strikes back", 1, payload)}; BlendedInfixSuggester suggester = getBlendedInfixSuggester(keys); @@ -68,7 +68,7 @@ public class TestBlendedInfixSuggester extends LuceneTestCase { public void testBlendedSort_fieldWeightZero_shouldRankSuggestionsByPositionMatch() throws IOException { BytesRef payload = new BytesRef("star"); - Input keys[] = + Input[] keys = new Input[] {new Input("star wars: episode v - the empire strikes back", 0, payload)}; BlendedInfixSuggester suggester = getBlendedInfixSuggester(keys); @@ -83,7 +83,7 @@ public class TestBlendedInfixSuggester extends LuceneTestCase { testBlendedSort_fieldWeightLongMax_shouldRankSuggestionsByPositionMatchWithNoOverflow() throws IOException { BytesRef payload = new BytesRef("star"); - Input keys[] = + Input[] keys = new Input[] { new Input("star wars: episode v - the empire strikes back", Long.MAX_VALUE, payload) }; @@ -135,7 +135,7 @@ public class TestBlendedInfixSuggester extends LuceneTestCase { BytesRef pl = new BytesRef("lake"); long w = 20; - Input keys[] = new Input[] {new Input("top of the lake", w, pl)}; + Input[] keys = new Input[] {new Input("top of the lake", w, pl)}; Path tempDir = createTempDir("BlendedInfixSuggesterTest"); Analyzer a = new StandardAnalyzer(CharArraySet.EMPTY_SET); @@ -201,7 +201,7 @@ public class TestBlendedInfixSuggester extends LuceneTestCase { BytesRef star = new BytesRef("star"); BytesRef ret = new BytesRef("ret"); - Input keys[] = + Input[] keys = new Input[] { new Input("top of the lake", 18, lake), new Input("star wars: episode v - the empire strikes back", 12, star), @@ -264,7 +264,7 @@ public class TestBlendedInfixSuggester extends LuceneTestCase { BytesRef payload = new BytesRef("lake"); - Input keys[] = new Input[] {new Input("top of the lake", 8, payload)}; + Input[] keys = new Input[] {new Input("top of the lake", 8, payload)}; BlendedInfixSuggester suggester = getBlendedInfixSuggester(keys); @@ -339,7 +339,7 @@ public class TestBlendedInfixSuggester extends LuceneTestCase { public void testSuggesterCountForAllLookups() throws IOException { - Input keys[] = + Input[] keys = new Input[] { new Input("lend me your ears", 1), new Input("as you sow so shall you reap", 1), }; @@ -398,7 +398,7 @@ public class TestBlendedInfixSuggester extends LuceneTestCase { BytesRef star = new BytesRef("star"); BytesRef ret = new BytesRef("ret"); - Input keys[] = + Input[] keys = new Input[] { new Input("top of the lake", 15, lake), new Input("star wars: episode v - the empire strikes back", 12, star), diff --git a/lucene/suggest/src/test/org/apache/lucene/search/suggest/analyzing/TestFuzzySuggester.java b/lucene/suggest/src/test/org/apache/lucene/search/suggest/analyzing/TestFuzzySuggester.java index 472a2879a17..a5603cc0375 100644 --- a/lucene/suggest/src/test/org/apache/lucene/search/suggest/analyzing/TestFuzzySuggester.java +++ b/lucene/suggest/src/test/org/apache/lucene/search/suggest/analyzing/TestFuzzySuggester.java @@ -128,7 +128,7 @@ public class TestFuzzySuggester extends LuceneTestCase { /** this is basically the WFST test ported to KeywordAnalyzer. so it acts the same */ public void testKeyword() throws Exception { - Input keys[] = + Input[] keys = new Input[] { new Input("foo", 50), new Input("bar", 10), @@ -207,7 +207,7 @@ public class TestFuzzySuggester extends LuceneTestCase { /** basic "standardanalyzer" test with stopword removal */ public void testStandard() throws Exception { - Input keys[] = + Input[] keys = new Input[] { new Input("the ghost of christmas past", 50), }; @@ -301,7 +301,7 @@ public class TestFuzzySuggester extends LuceneTestCase { new CannedTokenStream( token("wifi", 1, 1), token("hotspot", 0, 2), token("network", 1, 1))); - Input keys[] = + Input[] keys = new Input[] { new Input("wifi network is slow", 50), new Input("wi fi network is fast", 10), }; @@ -348,7 +348,7 @@ public class TestFuzzySuggester extends LuceneTestCase { new CannedTokenStream(token("ba", 1, 1), token("xd", 1, 1)), new CannedTokenStream(token("ab", 1, 1), token("ba", 0, 1), token("x", 1, 1))); - Input keys[] = + Input[] keys = new Input[] { new Input("ab xc", 50), new Input("ba xd", 50), }; @@ -1207,7 +1207,7 @@ public class TestFuzzySuggester extends LuceneTestCase { IntsRef targetPoints; IntsRef otherPoints; int n; - int d[][]; // cost array + int[][] d; // cost array // NOTE: if we cared, we could 3*m space instead of m*n space, similar to // what LevenshteinDistance does, except cycling thru a ring of three diff --git a/lucene/suggest/src/test/org/apache/lucene/search/suggest/document/TestSuggestField.java b/lucene/suggest/src/test/org/apache/lucene/search/suggest/document/TestSuggestField.java index 36d16c3d255..9000e019337 100644 --- a/lucene/suggest/src/test/org/apache/lucene/search/suggest/document/TestSuggestField.java +++ b/lucene/suggest/src/test/org/apache/lucene/search/suggest/document/TestSuggestField.java @@ -861,7 +861,7 @@ public class TestSuggestField extends LuceneTestCase { DirectoryReader reader = iw.getReader(); int numThreads = TestUtil.nextInt(random(), 2, 7); - Thread threads[] = new Thread[numThreads]; + Thread[] threads = new Thread[numThreads]; final CyclicBarrier startingGun = new CyclicBarrier(numThreads + 1); final CopyOnWriteArrayList errors = new CopyOnWriteArrayList<>(); final SuggestIndexSearcher indexSearcher = new SuggestIndexSearcher(reader); diff --git a/lucene/suggest/src/test/org/apache/lucene/search/suggest/fst/TestWFSTCompletion.java b/lucene/suggest/src/test/org/apache/lucene/search/suggest/fst/TestWFSTCompletion.java index 2ed0f80fc1e..c2f4fbe3598 100644 --- a/lucene/suggest/src/test/org/apache/lucene/search/suggest/fst/TestWFSTCompletion.java +++ b/lucene/suggest/src/test/org/apache/lucene/search/suggest/fst/TestWFSTCompletion.java @@ -28,7 +28,7 @@ import org.apache.lucene.util.TestUtil; public class TestWFSTCompletion extends LuceneTestCase { public void testBasic() throws Exception { - Input keys[] = + Input[] keys = new Input[] { new Input("foo", 50), new Input("bar", 10), diff --git a/lucene/test-framework/src/java/org/apache/lucene/analysis/BaseTokenStreamTestCase.java b/lucene/test-framework/src/java/org/apache/lucene/analysis/BaseTokenStreamTestCase.java index 188ffc0c37e..71cf29f734f 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/analysis/BaseTokenStreamTestCase.java +++ b/lucene/test-framework/src/java/org/apache/lucene/analysis/BaseTokenStreamTestCase.java @@ -123,11 +123,11 @@ public abstract class BaseTokenStreamTestCase extends LuceneTestCase { public static void assertTokenStreamContents( TokenStream ts, String[] output, - int startOffsets[], - int endOffsets[], - String types[], - int posIncrements[], - int posLengths[], + int[] startOffsets, + int[] endOffsets, + String[] types, + int[] posIncrements, + int[] posLengths, Integer finalOffset, Integer finalPosInc, boolean[] keywordAtts, @@ -408,11 +408,11 @@ public abstract class BaseTokenStreamTestCase extends LuceneTestCase { public static void assertTokenStreamContents( TokenStream ts, String[] output, - int startOffsets[], - int endOffsets[], - String types[], - int posIncrements[], - int posLengths[], + int[] startOffsets, + int[] endOffsets, + String[] types, + int[] posIncrements, + int[] posLengths, Integer finalOffset, boolean[] keywordAtts, boolean graphOffsetsAreCorrect) @@ -436,11 +436,11 @@ public abstract class BaseTokenStreamTestCase extends LuceneTestCase { public static void assertTokenStreamContents( TokenStream ts, String[] output, - int startOffsets[], - int endOffsets[], - String types[], - int posIncrements[], - int posLengths[], + int[] startOffsets, + int[] endOffsets, + String[] types, + int[] posIncrements, + int[] posLengths, Integer finalOffset, Integer finalPosInc, boolean[] keywordAtts, @@ -466,11 +466,11 @@ public abstract class BaseTokenStreamTestCase extends LuceneTestCase { public static void assertTokenStreamContents( TokenStream ts, String[] output, - int startOffsets[], - int endOffsets[], - String types[], - int posIncrements[], - int posLengths[], + int[] startOffsets, + int[] endOffsets, + String[] types, + int[] posIncrements, + int[] posLengths, Integer finalOffset, boolean graphOffsetsAreCorrect) throws IOException { @@ -490,11 +490,11 @@ public abstract class BaseTokenStreamTestCase extends LuceneTestCase { public static void assertTokenStreamContents( TokenStream ts, String[] output, - int startOffsets[], - int endOffsets[], - String types[], - int posIncrements[], - int posLengths[], + int[] startOffsets, + int[] endOffsets, + String[] types, + int[] posIncrements, + int[] posLengths, Integer finalOffset) throws IOException { assertTokenStreamContents( @@ -504,10 +504,10 @@ public abstract class BaseTokenStreamTestCase extends LuceneTestCase { public static void assertTokenStreamContents( TokenStream ts, String[] output, - int startOffsets[], - int endOffsets[], - String types[], - int posIncrements[], + int[] startOffsets, + int[] endOffsets, + String[] types, + int[] posIncrements, Integer finalOffset) throws IOException { assertTokenStreamContents( @@ -517,10 +517,10 @@ public abstract class BaseTokenStreamTestCase extends LuceneTestCase { public static void assertTokenStreamContents( TokenStream ts, String[] output, - int startOffsets[], - int endOffsets[], - String types[], - int posIncrements[]) + int[] startOffsets, + int[] endOffsets, + String[] types, + int[] posIncrements) throws IOException { assertTokenStreamContents( ts, output, startOffsets, endOffsets, types, posIncrements, null, null); @@ -529,10 +529,10 @@ public abstract class BaseTokenStreamTestCase extends LuceneTestCase { public static void assertTokenStreamContents( TokenStream ts, String[] output, - int startOffsets[], - int endOffsets[], - String types[], - int posIncrements[], + int[] startOffsets, + int[] endOffsets, + String[] types, + int[] posIncrements, int[] posLengths) throws IOException { assertTokenStreamContents( @@ -554,18 +554,18 @@ public abstract class BaseTokenStreamTestCase extends LuceneTestCase { } public static void assertTokenStreamContents( - TokenStream ts, String[] output, int startOffsets[], int endOffsets[]) throws IOException { + TokenStream ts, String[] output, int[] startOffsets, int[] endOffsets) throws IOException { assertTokenStreamContents(ts, output, startOffsets, endOffsets, null, null, null, null); } public static void assertTokenStreamContents( - TokenStream ts, String[] output, int startOffsets[], int endOffsets[], Integer finalOffset) + TokenStream ts, String[] output, int[] startOffsets, int[] endOffsets, Integer finalOffset) throws IOException { assertTokenStreamContents(ts, output, startOffsets, endOffsets, null, null, null, finalOffset); } public static void assertTokenStreamContents( - TokenStream ts, String[] output, int startOffsets[], int endOffsets[], int[] posIncrements) + TokenStream ts, String[] output, int[] startOffsets, int[] endOffsets, int[] posIncrements) throws IOException { assertTokenStreamContents( ts, output, startOffsets, endOffsets, null, posIncrements, null, null); @@ -574,8 +574,8 @@ public abstract class BaseTokenStreamTestCase extends LuceneTestCase { public static void assertTokenStreamContents( TokenStream ts, String[] output, - int startOffsets[], - int endOffsets[], + int[] startOffsets, + int[] endOffsets, int[] posIncrements, Integer finalOffset) throws IOException { @@ -586,8 +586,8 @@ public abstract class BaseTokenStreamTestCase extends LuceneTestCase { public static void assertTokenStreamContents( TokenStream ts, String[] output, - int startOffsets[], - int endOffsets[], + int[] startOffsets, + int[] endOffsets, int[] posIncrements, int[] posLengths, Integer finalOffset) @@ -600,10 +600,10 @@ public abstract class BaseTokenStreamTestCase extends LuceneTestCase { Analyzer a, String input, String[] output, - int startOffsets[], - int endOffsets[], - String types[], - int posIncrements[]) + int[] startOffsets, + int[] endOffsets, + String[] types, + int[] posIncrements) throws IOException { assertTokenStreamContents( a.tokenStream("dummy", input), @@ -622,11 +622,11 @@ public abstract class BaseTokenStreamTestCase extends LuceneTestCase { Analyzer a, String input, String[] output, - int startOffsets[], - int endOffsets[], - String types[], - int posIncrements[], - int posLengths[]) + int[] startOffsets, + int[] endOffsets, + String[] types, + int[] posIncrements, + int[] posLengths) throws IOException { assertTokenStreamContents( a.tokenStream("dummy", input), @@ -645,11 +645,11 @@ public abstract class BaseTokenStreamTestCase extends LuceneTestCase { Analyzer a, String input, String[] output, - int startOffsets[], - int endOffsets[], - String types[], - int posIncrements[], - int posLengths[], + int[] startOffsets, + int[] endOffsets, + String[] types, + int[] posIncrements, + int[] posLengths, boolean graphOffsetsAreCorrect) throws IOException { assertTokenStreamContents( @@ -670,11 +670,11 @@ public abstract class BaseTokenStreamTestCase extends LuceneTestCase { Analyzer a, String input, String[] output, - int startOffsets[], - int endOffsets[], - String types[], - int posIncrements[], - int posLengths[], + int[] startOffsets, + int[] endOffsets, + String[] types, + int[] posIncrements, + int[] posLengths, boolean graphOffsetsAreCorrect, byte[][] payloads) throws IOException { @@ -728,7 +728,7 @@ public abstract class BaseTokenStreamTestCase extends LuceneTestCase { } public static void assertAnalyzesTo( - Analyzer a, String input, String[] output, int startOffsets[], int endOffsets[]) + Analyzer a, String input, String[] output, int[] startOffsets, int[] endOffsets) throws IOException { assertAnalyzesTo(a, input, output, startOffsets, endOffsets, null, null, null); } @@ -737,8 +737,8 @@ public abstract class BaseTokenStreamTestCase extends LuceneTestCase { Analyzer a, String input, String[] output, - int startOffsets[], - int endOffsets[], + int[] startOffsets, + int[] endOffsets, int[] posIncrements) throws IOException { assertAnalyzesTo(a, input, output, startOffsets, endOffsets, null, posIncrements, null); @@ -930,7 +930,7 @@ public abstract class BaseTokenStreamTestCase extends LuceneTestCase { // so this should only really fail from another thread if it's an actual thread problem int numThreads = TestUtil.nextInt(random, 2, 4); final CountDownLatch startingGun = new CountDownLatch(1); - AnalysisThread threads[] = new AnalysisThread[numThreads]; + AnalysisThread[] threads = new AnalysisThread[numThreads]; for (int i = 0; i < threads.length; i++) { threads[i] = new AnalysisThread( diff --git a/lucene/test-framework/src/java/org/apache/lucene/analysis/CollationTestBase.java b/lucene/test-framework/src/java/org/apache/lucene/analysis/CollationTestBase.java index 92beb054ab9..80c2b0f5398 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/analysis/CollationTestBase.java +++ b/lucene/test-framework/src/java/org/apache/lucene/analysis/CollationTestBase.java @@ -178,7 +178,7 @@ public abstract class CollationTestBase extends LuceneTestCase { } } - Thread threads[] = new Thread[numThreads]; + Thread[] threads = new Thread[numThreads]; for (int i = 0; i < numThreads; i++) { threads[i] = new Thread() { diff --git a/lucene/test-framework/src/java/org/apache/lucene/analysis/MockTokenizer.java b/lucene/test-framework/src/java/org/apache/lucene/analysis/MockTokenizer.java index 02d189cd0d0..ed04a533673 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/analysis/MockTokenizer.java +++ b/lucene/test-framework/src/java/org/apache/lucene/analysis/MockTokenizer.java @@ -161,7 +161,7 @@ public class MockTokenizer extends Tokenizer { if (cp < 0) { break; } else if (isTokenChar(cp)) { - char chars[] = new char[2]; + char[] chars = new char[2]; int endOffset; do { int len = Character.toChars(normalize(cp), chars, 0); @@ -259,21 +259,21 @@ public class MockTokenizer extends Tokenizer { case 0: { // read(char[]) - char c[] = new char[1]; + char[] c = new char[1]; int ret = input.read(c); return ret < 0 ? ret : c[0]; } case 1: { // read(char[], int, int) - char c[] = new char[2]; + char[] c = new char[2]; int ret = input.read(c, 1, 1); return ret < 0 ? ret : c[1]; } case 2: { // read(CharBuffer) - char c[] = new char[1]; + char[] c = new char[1]; CharBuffer cb = CharBuffer.wrap(c); int ret = input.read(cb); return ret < 0 ? ret : c[0]; diff --git a/lucene/test-framework/src/java/org/apache/lucene/analysis/VocabularyAssert.java b/lucene/test-framework/src/java/org/apache/lucene/analysis/VocabularyAssert.java index 70087b232a5..5e2bbe1075a 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/analysis/VocabularyAssert.java +++ b/lucene/test-framework/src/java/org/apache/lucene/analysis/VocabularyAssert.java @@ -51,7 +51,7 @@ public class VocabularyAssert { String inputLine = null; while ((inputLine = vocReader.readLine()) != null) { if (inputLine.startsWith("#") || inputLine.trim().length() == 0) continue; /* comment */ - String words[] = inputLine.split("\t"); + String[] words = inputLine.split("\t"); BaseTokenStreamTestCase.checkOneTerm(a, words[0], words[1]); } } diff --git a/lucene/test-framework/src/java/org/apache/lucene/geo/BaseGeoPointTestCase.java b/lucene/test-framework/src/java/org/apache/lucene/geo/BaseGeoPointTestCase.java index 5b18e0c74d9..46da94fba80 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/geo/BaseGeoPointTestCase.java +++ b/lucene/test-framework/src/java/org/apache/lucene/geo/BaseGeoPointTestCase.java @@ -1612,7 +1612,7 @@ public abstract class BaseGeoPointTestCase extends LuceneTestCase { iwc.setMergeScheduler(new SerialMergeScheduler()); RandomIndexWriter writer = new RandomIndexWriter(random(), directory, iwc); - for (double p[] : pts) { + for (double[] p : pts) { Document doc = new Document(); addPointToDoc("point", doc, p[0], p[1]); writer.addDocument(doc); diff --git a/lucene/test-framework/src/java/org/apache/lucene/geo/BaseXYPointTestCase.java b/lucene/test-framework/src/java/org/apache/lucene/geo/BaseXYPointTestCase.java index bcdecf794f1..b6cc893d5d1 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/geo/BaseXYPointTestCase.java +++ b/lucene/test-framework/src/java/org/apache/lucene/geo/BaseXYPointTestCase.java @@ -1448,7 +1448,7 @@ public abstract class BaseXYPointTestCase extends LuceneTestCase { iwc.setMergeScheduler(new SerialMergeScheduler()); RandomIndexWriter writer = new RandomIndexWriter(random(), directory, iwc); - for (double p[] : pts) { + for (double[] p : pts) { Document doc = new Document(); addPointToDoc("point", doc, (float) p[0], (float) p[1]); writer.addDocument(doc); diff --git a/lucene/test-framework/src/java/org/apache/lucene/geo/GeoTestUtil.java b/lucene/test-framework/src/java/org/apache/lucene/geo/GeoTestUtil.java index 4a6294a6b21..7a3f8abadc4 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/geo/GeoTestUtil.java +++ b/lucene/test-framework/src/java/org/apache/lucene/geo/GeoTestUtil.java @@ -238,9 +238,9 @@ public class GeoTestUtil { // see http://www-ma2.upc.es/geoc/Schirra-pointPolygon.pdf for more info on some of these // strategies public static double[] nextPointNear(Polygon polygon) { - double polyLats[] = polygon.getPolyLats(); - double polyLons[] = polygon.getPolyLons(); - Polygon holes[] = polygon.getHoles(); + double[] polyLats = polygon.getPolyLats(); + double[] polyLons = polygon.getPolyLons(); + Polygon[] holes = polygon.getHoles(); // if there are any holes, target them aggressively if (holes.length > 0 && random().nextInt(3) == 0) { @@ -268,8 +268,8 @@ public class GeoTestUtil { // target points around the bounding box edges Polygon container = boxPolygon(new Rectangle(polygon.minLat, polygon.maxLat, polygon.minLon, polygon.maxLon)); - double containerLats[] = container.getPolyLats(); - double containerLons[] = container.getPolyLons(); + double[] containerLats = container.getPolyLats(); + double[] containerLons = container.getPolyLons(); int startVertex = random().nextInt(containerLats.length - 1); return nextPointAroundLine( containerLats[startVertex], containerLons[startVertex], @@ -288,11 +288,11 @@ public class GeoTestUtil { /** Returns next box for testing near a Polygon */ public static Rectangle nextBoxNear(Polygon polygon) { - final double point1[]; - final double point2[]; + final double[] point1; + final double[] point2; // if there are any holes, target them aggressively - Polygon holes[] = polygon.getHoles(); + Polygon[] holes = polygon.getHoles(); if (holes.length > 0 && random().nextInt(3) == 0) { return nextBoxNear(holes[random().nextInt(holes.length)]); } @@ -307,8 +307,8 @@ public class GeoTestUtil { point1 = nextPointNear(polygon); point2 = new double[2]; // now figure out a good delta: we use a rough heuristic, up to the length of an edge - double polyLats[] = polygon.getPolyLats(); - double polyLons[] = polygon.getPolyLons(); + double[] polyLats = polygon.getPolyLats(); + double[] polyLons = polygon.getPolyLons(); int vertex = random().nextInt(polyLats.length - 1); double deltaX = polyLons[vertex + 1] - polyLons[vertex]; double deltaY = polyLats[vertex + 1] - polyLats[vertex]; @@ -644,7 +644,7 @@ public class GeoTestUtil { for (Object o : flattened) { // tostring if (o instanceof double[]) { - double point[] = (double[]) o; + double[] point = (double[]) o; sb.append("\n"); @@ -661,7 +661,7 @@ public class GeoTestUtil { style = "fill:lightskyblue;stroke:black;stroke-width:0.2%;stroke-dasharray:0.5%,1%;"; opacity = "0.3"; } else if (o instanceof double[]) { - double point[] = (double[]) o; + double[] point = (double[]) o; gon = boxPolygon( new Rectangle( @@ -677,8 +677,8 @@ public class GeoTestUtil { opacity = "0.5"; } // polygon - double polyLats[] = gon.getPolyLats(); - double polyLons[] = gon.getPolyLons(); + double[] polyLats = gon.getPolyLats(); + double[] polyLons = gon.getPolyLons(); sb.append(" 0) { @@ -688,8 +688,8 @@ public class GeoTestUtil { } sb.append("\" style=\"").append(style).append("\"/>\n"); for (Polygon hole : gon.getHoles()) { - double holeLats[] = hole.getPolyLats(); - double holeLons[] = hole.getPolyLons(); + double[] holeLats = hole.getPolyLats(); + double[] holeLons = hole.getPolyLons(); sb.append(" 0) { @@ -743,8 +743,8 @@ public class GeoTestUtil { if (polygon.getHoles().length > 0) { throw new UnsupportedOperationException("this testing method does not support holes"); } - double polyLats[] = polygon.getPolyLats(); - double polyLons[] = polygon.getPolyLons(); + double[] polyLats = polygon.getPolyLats(); + double[] polyLons = polygon.getPolyLons(); // bounding box check required due to rounding errors (we don't solve that problem) if (latitude < polygon.minLat || latitude > polygon.maxLat @@ -756,8 +756,8 @@ public class GeoTestUtil { boolean c = false; int i, j; int nvert = polyLats.length; - double verty[] = polyLats; - double vertx[] = polyLons; + double[] verty = polyLats; + double[] vertx = polyLons; double testy = latitude; double testx = longitude; for (i = 0, j = 1; j < nvert; ++i, ++j) { diff --git a/lucene/test-framework/src/java/org/apache/lucene/geo/ShapeTestUtil.java b/lucene/test-framework/src/java/org/apache/lucene/geo/ShapeTestUtil.java index bc7ec58c149..1cd77ccd31b 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/geo/ShapeTestUtil.java +++ b/lucene/test-framework/src/java/org/apache/lucene/geo/ShapeTestUtil.java @@ -280,8 +280,8 @@ public class ShapeTestUtil { if (polygon.getHoles().length > 0) { throw new UnsupportedOperationException("this testing method does not support holes"); } - double polyXs[] = XYEncodingUtils.floatArrayToDoubleArray(polygon.getPolyX()); - double polyYs[] = XYEncodingUtils.floatArrayToDoubleArray(polygon.getPolyY()); + double[] polyXs = XYEncodingUtils.floatArrayToDoubleArray(polygon.getPolyX()); + double[] polyYs = XYEncodingUtils.floatArrayToDoubleArray(polygon.getPolyY()); // bounding box check required due to rounding errors (we don't solve that problem) if (x < polygon.minX || x > polygon.maxX || y < polygon.minY || y > polygon.maxY) { return false; @@ -290,8 +290,8 @@ public class ShapeTestUtil { boolean c = false; int i, j; int nvert = polyYs.length; - double verty[] = polyYs; - double vertx[] = polyXs; + double[] verty = polyYs; + double[] vertx = polyXs; double testy = y; double testx = x; for (i = 0, j = 1; j < nvert; ++i, ++j) { diff --git a/lucene/test-framework/src/java/org/apache/lucene/index/BaseCompoundFormatTestCase.java b/lucene/test-framework/src/java/org/apache/lucene/index/BaseCompoundFormatTestCase.java index 5545b6e9e7b..cb207554b43 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/index/BaseCompoundFormatTestCase.java +++ b/lucene/test-framework/src/java/org/apache/lucene/index/BaseCompoundFormatTestCase.java @@ -71,7 +71,7 @@ public abstract class BaseCompoundFormatTestCase extends BaseIndexFileFormatTest * 1, 10, 100 bytes. */ public void testSingleFile() throws IOException { - int data[] = new int[] {0, 1, 10, 100}; + int[] data = new int[] {0, 1, 10, 100}; for (int i = 0; i < data.length; i++) { String testfile = "_" + i + ".test"; Directory dir = newDirectory(); @@ -95,7 +95,7 @@ public abstract class BaseCompoundFormatTestCase extends BaseIndexFileFormatTest /** This test creates compound file based on two files. */ public void testTwoFiles() throws IOException { - String files[] = {"_123.d1", "_123.d2"}; + String[] files = {"_123.d1", "_123.d2"}; Directory dir = newDirectory(); SegmentInfo si = newSegmentInfo(dir, "_123"); createSequenceFile(dir, files[0], (byte) 0, 15, si.getId(), "suffix"); @@ -634,7 +634,7 @@ public abstract class BaseCompoundFormatTestCase extends BaseIndexFileFormatTest Directory cr = createLargeCFS(dir); IndexInput is = cr.openInput("_123.f2", newIOContext(random())); is.seek(is.length() - 10); - byte b[] = new byte[100]; + byte[] b = new byte[100]; is.readBytes(b, 0, 10); // Single byte read past end of file @@ -714,8 +714,8 @@ public abstract class BaseCompoundFormatTestCase extends BaseIndexFileFormatTest assertEquals(msg + " length", expected.length(), test.length()); assertEquals(msg + " position", expected.getFilePointer(), test.getFilePointer()); - byte expectedBuffer[] = new byte[512]; - byte testBuffer[] = new byte[expectedBuffer.length]; + byte[] expectedBuffer = new byte[512]; + byte[] testBuffer = new byte[expectedBuffer.length]; long remainder = expected.length() - expected.getFilePointer(); while (remainder > 0) { diff --git a/lucene/test-framework/src/java/org/apache/lucene/index/BaseDocValuesFormatTestCase.java b/lucene/test-framework/src/java/org/apache/lucene/index/BaseDocValuesFormatTestCase.java index 582b60f5ff5..32ba8de6fe8 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/index/BaseDocValuesFormatTestCase.java +++ b/lucene/test-framework/src/java/org/apache/lucene/index/BaseDocValuesFormatTestCase.java @@ -1058,7 +1058,7 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes conf.setMergePolicy(newLogMergePolicy()); RandomIndexWriter iwriter = new RandomIndexWriter(random(), directory, conf); Document doc = new Document(); - byte bytes[] = new byte[32766]; + byte[] bytes = new byte[32766]; random().nextBytes(bytes); BytesRef b = newBytesRef(bytes); doc.add(new BinaryDocValuesField("dv", b)); @@ -1084,7 +1084,7 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes conf.setMergePolicy(newLogMergePolicy()); RandomIndexWriter iwriter = new RandomIndexWriter(random(), directory, conf); Document doc = new Document(); - byte bytes[] = new byte[32766]; + byte[] bytes = new byte[32766]; random().nextBytes(bytes); BytesRef b = newBytesRef(bytes); doc.add(new SortedDocValuesField("dv", b)); @@ -1140,7 +1140,7 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes IndexReader ireader = DirectoryReader.open(directory); // read-only=true assert ireader.leaves().size() == 1; SortedDocValues dv = DocValues.getSorted(ireader.leaves().get(0).reader(), "dv"); - byte mybytes[] = new byte[20]; + byte[] mybytes = new byte[20]; assertEquals(0, dv.nextDoc()); assertEquals("boo!", dv.lookupOrd(dv.ordValue()).utf8ToString()); assertFalse(dv.lookupOrd(dv.ordValue()).bytes == mybytes); @@ -1360,7 +1360,7 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes doc.add(new StringField("id", Integer.toString(i), Field.Store.NO)); int valueCount = (int) counts.getAsLong(); - long valueArray[] = new long[valueCount]; + long[] valueArray = new long[valueCount]; for (int j = 0; j < valueCount; j++) { long value = values.getAsLong(); valueArray[j] = value; @@ -1399,11 +1399,11 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes if (i > docValues.docID()) { docValues.nextDoc(); } - String expected[] = r.document(i).getValues("stored"); + String[] expected = r.document(i).getValues("stored"); if (i < docValues.docID()) { assertEquals(0, expected.length); } else { - String actual[] = new String[docValues.docValueCount()]; + String[] actual = new String[docValues.docValueCount()]; for (int j = 0; j < actual.length; j++) { actual[j] = Long.toString(docValues.nextValue()); } @@ -1587,7 +1587,7 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes doTestBinaryVsStoredFields( density, () -> { - byte buffer[] = new byte[fixedLength]; + byte[] buffer = new byte[fixedLength]; random().nextBytes(buffer); return buffer; }); @@ -1609,7 +1609,7 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes density, () -> { final int length = random().nextInt(10); - byte buffer[] = new byte[length]; + byte[] buffer = new byte[length]; random().nextBytes(buffer); return buffer; }); @@ -2248,7 +2248,7 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes LeafReader r = context.reader(); SortedSetDocValues docValues = r.getSortedSetDocValues("dv"); for (int i = 0; i < r.maxDoc(); i++) { - String stringValues[] = r.document(i).getValues("stored"); + String[] stringValues = r.document(i).getValues("stored"); if (docValues != null) { if (docValues.docID() < i) { docValues.nextDoc(); @@ -2283,7 +2283,7 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes LeafReader r = context.reader(); SortedSetDocValues docValues = r.getSortedSetDocValues("dv"); for (int i = 0; i < r.maxDoc(); i++) { - String stringValues[] = r.document(i).getValues("stored"); + String[] stringValues = r.document(i).getValues("stored"); if (docValues.docID() < i) { docValues.nextDoc(); } @@ -2643,7 +2643,7 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes for (int i = 0; i < numDocs; i++) { idField.setStringValue(Integer.toString(i)); int length = TestUtil.nextInt(random(), 0, 8); - byte buffer[] = new byte[length]; + byte[] buffer = new byte[length]; random().nextBytes(buffer); storedBinField.setBytesValue(buffer); dvBinField.setBytesValue(buffer); @@ -2668,7 +2668,7 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes // compare final DirectoryReader ir = DirectoryReader.open(dir); int numThreads = TestUtil.nextInt(random(), 2, 7); - Thread threads[] = new Thread[numThreads]; + Thread[] threads = new Thread[numThreads]; final CountDownLatch startingGun = new CountDownLatch(1); for (int i = 0; i < threads.length; i++) { @@ -2730,7 +2730,7 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes for (int i = 0; i < numDocs; i++) { idField.setStringValue(Integer.toString(i)); int length = TestUtil.nextInt(random(), 0, 8); - byte buffer[] = new byte[length]; + byte[] buffer = new byte[length]; random().nextBytes(buffer); storedBinField.setBytesValue(buffer); dvBinField.setBytesValue(buffer); @@ -2784,7 +2784,7 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes // compare final DirectoryReader ir = DirectoryReader.open(dir); int numThreads = TestUtil.nextInt(random(), 2, 7); - Thread threads[] = new Thread[numThreads]; + Thread[] threads = new Thread[numThreads]; final CountDownLatch startingGun = new CountDownLatch(1); for (int i = 0; i < threads.length; i++) { @@ -2823,7 +2823,7 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes } } - String values[] = r.document(j).getValues("storedSortedSet"); + String[] values = r.document(j).getValues("storedSortedSet"); if (values.length > 0) { assertNotNull(sortedSet); assertEquals(j, sortedSet.nextDoc()); @@ -2836,7 +2836,7 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes assertEquals(SortedSetDocValues.NO_MORE_ORDS, sortedSet.nextOrd()); } - String numValues[] = r.document(j).getValues("storedSortedNumeric"); + String[] numValues = r.document(j).getValues("storedSortedNumeric"); if (numValues.length > 0) { assertNotNull(sortedNumeric); assertEquals(j, sortedNumeric.nextDoc()); @@ -2908,7 +2908,7 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes for (int i = 0; i < 10; i++) { final DirectoryReader r = DirectoryReader.open(dir); final CountDownLatch startingGun = new CountDownLatch(1); - Thread threads[] = new Thread[TestUtil.nextInt(random(), 4, 10)]; + Thread[] threads = new Thread[TestUtil.nextInt(random(), 4, 10)]; for (int tid = 0; tid < threads.length; tid++) { threads[tid] = new Thread() { diff --git a/lucene/test-framework/src/java/org/apache/lucene/index/BaseNormsFormatTestCase.java b/lucene/test-framework/src/java/org/apache/lucene/index/BaseNormsFormatTestCase.java index defd35e1a08..c47aa001b71 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/index/BaseNormsFormatTestCase.java +++ b/lucene/test-framework/src/java/org/apache/lucene/index/BaseNormsFormatTestCase.java @@ -502,7 +502,7 @@ public abstract class BaseNormsFormatTestCase extends BaseIndexFileFormatTestCas } } } - long norms[] = new long[numDocsWithField]; + long[] norms = new long[numDocsWithField]; for (int i = 0; i < numDocsWithField; i++) { norms[i] = longs.getAsLong(); } @@ -583,10 +583,10 @@ public abstract class BaseNormsFormatTestCase extends BaseIndexFileFormatTestCas } static class CannedNormSimilarity extends Similarity { - final long norms[]; + final long[] norms; int index = 0; - CannedNormSimilarity(long norms[]) { + CannedNormSimilarity(long[] norms) { this.norms = norms; } @@ -707,7 +707,7 @@ public abstract class BaseNormsFormatTestCase extends BaseIndexFileFormatTestCas } } - long norms[] = new long[numDocsWithField]; + long[] norms = new long[numDocsWithField]; for (int i = 0; i < numDocsWithField; i++) { norms[i] = random().nextLong(); } diff --git a/lucene/test-framework/src/java/org/apache/lucene/index/BaseSegmentInfoFormatTestCase.java b/lucene/test-framework/src/java/org/apache/lucene/index/BaseSegmentInfoFormatTestCase.java index 412567ac5b4..069d0418f13 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/index/BaseSegmentInfoFormatTestCase.java +++ b/lucene/test-framework/src/java/org/apache/lucene/index/BaseSegmentInfoFormatTestCase.java @@ -55,7 +55,7 @@ public abstract class BaseSegmentInfoFormatTestCase extends BaseIndexFileFormatT public void testFiles() throws Exception { Directory dir = newDirectory(); Codec codec = getCodec(); - byte id[] = StringHelper.randomId(); + byte[] id = StringHelper.randomId(); SegmentInfo info = new SegmentInfo( dir, @@ -80,7 +80,7 @@ public abstract class BaseSegmentInfoFormatTestCase extends BaseIndexFileFormatT public void testAddsSelfToFiles() throws Exception { Directory dir = newDirectory(); Codec codec = getCodec(); - byte id[] = StringHelper.randomId(); + byte[] id = StringHelper.randomId(); SegmentInfo info = new SegmentInfo( dir, @@ -120,7 +120,7 @@ public abstract class BaseSegmentInfoFormatTestCase extends BaseIndexFileFormatT public void testDiagnostics() throws Exception { Directory dir = newDirectory(); Codec codec = getCodec(); - byte id[] = StringHelper.randomId(); + byte[] id = StringHelper.randomId(); Map diagnostics = new HashMap<>(); diagnostics.put("key1", "value1"); diagnostics.put("key2", "value2"); @@ -156,7 +156,7 @@ public abstract class BaseSegmentInfoFormatTestCase extends BaseIndexFileFormatT public void testAttributes() throws Exception { Directory dir = newDirectory(); Codec codec = getCodec(); - byte id[] = StringHelper.randomId(); + byte[] id = StringHelper.randomId(); Map attributes = new HashMap<>(); attributes.put("key1", "value1"); attributes.put("key2", "value2"); @@ -192,7 +192,7 @@ public abstract class BaseSegmentInfoFormatTestCase extends BaseIndexFileFormatT public void testUniqueID() throws Exception { Codec codec = getCodec(); Directory dir = newDirectory(); - byte id[] = StringHelper.randomId(); + byte[] id = StringHelper.randomId(); SegmentInfo info = new SegmentInfo( dir, @@ -219,7 +219,7 @@ public abstract class BaseSegmentInfoFormatTestCase extends BaseIndexFileFormatT for (Version v : getVersions()) { for (Version minV : new Version[] {v, null}) { Directory dir = newDirectory(); - byte id[] = StringHelper.randomId(); + byte[] id = StringHelper.randomId(); SegmentInfo info = new SegmentInfo( dir, @@ -356,7 +356,7 @@ public abstract class BaseSegmentInfoFormatTestCase extends BaseIndexFileFormatT Directory dir = newDirectory(); Codec codec = getCodec(); - byte id[] = StringHelper.randomId(); + byte[] id = StringHelper.randomId(); SegmentInfo info = new SegmentInfo( dir, @@ -396,7 +396,7 @@ public abstract class BaseSegmentInfoFormatTestCase extends BaseIndexFileFormatT MockDirectoryWrapper dir = newMockDirectory(); dir.failOn(fail); Codec codec = getCodec(); - byte id[] = StringHelper.randomId(); + byte[] id = StringHelper.randomId(); SegmentInfo info = new SegmentInfo( dir, @@ -441,7 +441,7 @@ public abstract class BaseSegmentInfoFormatTestCase extends BaseIndexFileFormatT MockDirectoryWrapper dir = newMockDirectory(); dir.failOn(fail); Codec codec = getCodec(); - byte id[] = StringHelper.randomId(); + byte[] id = StringHelper.randomId(); SegmentInfo info = new SegmentInfo( dir, @@ -486,7 +486,7 @@ public abstract class BaseSegmentInfoFormatTestCase extends BaseIndexFileFormatT MockDirectoryWrapper dir = newMockDirectory(); dir.failOn(fail); Codec codec = getCodec(); - byte id[] = StringHelper.randomId(); + byte[] id = StringHelper.randomId(); SegmentInfo info = new SegmentInfo( dir, @@ -532,7 +532,7 @@ public abstract class BaseSegmentInfoFormatTestCase extends BaseIndexFileFormatT MockDirectoryWrapper dir = newMockDirectory(); dir.failOn(fail); Codec codec = getCodec(); - byte id[] = StringHelper.randomId(); + byte[] id = StringHelper.randomId(); SegmentInfo info = new SegmentInfo( dir, @@ -593,7 +593,7 @@ public abstract class BaseSegmentInfoFormatTestCase extends BaseIndexFileFormatT diagnostics.put( TestUtil.randomUnicodeString(random()), TestUtil.randomUnicodeString(random())); } - byte id[] = new byte[StringHelper.ID_LENGTH]; + byte[] id = new byte[StringHelper.ID_LENGTH]; random().nextBytes(id); Map attributes = new HashMap<>(); diff --git a/lucene/test-framework/src/java/org/apache/lucene/index/BaseStoredFieldsFormatTestCase.java b/lucene/test-framework/src/java/org/apache/lucene/index/BaseStoredFieldsFormatTestCase.java index a6af7a29421..cc57c5612ac 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/index/BaseStoredFieldsFormatTestCase.java +++ b/lucene/test-framework/src/java/org/apache/lucene/index/BaseStoredFieldsFormatTestCase.java @@ -799,7 +799,7 @@ public abstract class BaseStoredFieldsFormatTestCase extends BaseIndexFileFormat /** mix up field numbers, merge, and check that data is correct */ public void testMismatchedFields() throws Exception { - Directory dirs[] = new Directory[10]; + Directory[] dirs = new Directory[10]; for (int i = 0; i < dirs.length; i++) { Directory dir = newDirectory(); IndexWriterConfig iwc = new IndexWriterConfig(null); diff --git a/lucene/test-framework/src/java/org/apache/lucene/search/CheckHits.java b/lucene/test-framework/src/java/org/apache/lucene/search/CheckHits.java index 96dc555a6d2..13ba68d3146 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/search/CheckHits.java +++ b/lucene/test-framework/src/java/org/apache/lucene/search/CheckHits.java @@ -344,7 +344,7 @@ public class CheckHits { if (!deep) return; - Explanation detail[] = expl.getDetails(); + Explanation[] detail = expl.getDetails(); // TODO: can we improve this entire method? it's really geared to work only with TF/IDF if (expl.getDescription().endsWith("computed from:")) { return; // something more complicated. diff --git a/lucene/test-framework/src/java/org/apache/lucene/search/QueryUtils.java b/lucene/test-framework/src/java/org/apache/lucene/search/QueryUtils.java index 48ed7b261ae..2eb4f33645b 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/search/QueryUtils.java +++ b/lucene/test-framework/src/java/org/apache/lucene/search/QueryUtils.java @@ -288,7 +288,7 @@ public class QueryUtils { final int skip_op = 0; final int next_op = 1; - final int orders[][] = { + final int[][] orders = { {next_op}, {skip_op}, {skip_op, next_op}, @@ -299,17 +299,17 @@ public class QueryUtils { }; for (int k = 0; k < orders.length; k++) { - final int order[] = orders[k]; + final int[] order = orders[k]; // System.out.print("Order:");for (int i = 0; i < order.length; i++) // System.out.print(order[i]==skip_op ? " skip()":" next()"); // System.out.println(); - final int opidx[] = {0}; - final int lastDoc[] = {-1}; + final int[] opidx = {0}; + final int[] lastDoc = {-1}; // FUTURE: ensure scorer.doc()==-1 final float maxDiff = 1e-5f; - final LeafReader lastReader[] = {null}; + final LeafReader[] lastReader = {null}; s.search( q, @@ -503,8 +503,8 @@ public class QueryUtils { public static void checkFirstSkipTo(final Query q, final IndexSearcher s) throws IOException { // System.out.println("checkFirstSkipTo: "+q); final float maxDiff = 1e-3f; - final int lastDoc[] = {-1}; - final LeafReader lastReader[] = {null}; + final int[] lastDoc = {-1}; + final LeafReader[] lastReader = {null}; final List context = s.getTopReaderContext().leaves(); Query rewritten = s.rewrite(q); s.search( diff --git a/lucene/test-framework/src/java/org/apache/lucene/store/BaseDirectoryTestCase.java b/lucene/test-framework/src/java/org/apache/lucene/store/BaseDirectoryTestCase.java index d6738f631ac..ddc6084f2c9 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/store/BaseDirectoryTestCase.java +++ b/lucene/test-framework/src/java/org/apache/lucene/store/BaseDirectoryTestCase.java @@ -75,7 +75,7 @@ public abstract class BaseDirectoryTestCase extends LuceneTestCase { private void runCopyFrom(Directory source, Directory dest) throws IOException { IndexOutput output = source.createOutput("foobar", newIOContext(random())); - byte bytes[] = RandomBytes.randomBytesOfLength(random(), 20000); + byte[] bytes = RandomBytes.randomBytesOfLength(random(), 20000); output.writeBytes(bytes, bytes.length); output.close(); @@ -83,7 +83,7 @@ public abstract class BaseDirectoryTestCase extends LuceneTestCase { assertTrue(slowFileExists(dest, "foobaz")); IndexInput input = dest.openInput("foobaz", newIOContext(random())); - byte bytes2[] = new byte[bytes.length]; + byte[] bytes2 = new byte[bytes.length]; input.readBytes(bytes2, 0, bytes2.length); input.close(); @@ -94,7 +94,7 @@ public abstract class BaseDirectoryTestCase extends LuceneTestCase { try (Directory dir = getDirectory(createTempDir("testRename"))) { IndexOutput output = dir.createOutput("foobar", newIOContext(random())); int numBytes = random().nextInt(20000); - byte bytes[] = new byte[numBytes]; + byte[] bytes = new byte[numBytes]; random().nextBytes(bytes); output.writeBytes(bytes, bytes.length); output.close(); @@ -102,7 +102,7 @@ public abstract class BaseDirectoryTestCase extends LuceneTestCase { dir.rename("foobar", "foobaz"); IndexInput input = dir.openInput("foobaz", newIOContext(random())); - byte bytes2[] = new byte[numBytes]; + byte[] bytes2 = new byte[numBytes]; input.readBytes(bytes2, 0, bytes2.length); assertEquals(input.length(), numBytes); input.close(); @@ -491,7 +491,7 @@ public abstract class BaseDirectoryTestCase extends LuceneTestCase { public void testChecksum() throws Exception { CRC32 expected = new CRC32(); int numBytes = random().nextInt(20000); - byte bytes[] = new byte[numBytes]; + byte[] bytes = new byte[numBytes]; random().nextBytes(bytes); expected.update(bytes); @@ -784,7 +784,7 @@ public abstract class BaseDirectoryTestCase extends LuceneTestCase { public void testCopyBytesWithThreads() throws Exception { try (Directory d = getDirectory(createTempDir("testCopyBytesWithThreads"))) { int headerLen = 100; - byte data[] = RandomBytes.randomBytesOfLengthBetween(random(), headerLen + 1, 10000); + byte[] data = RandomBytes.randomBytesOfLengthBetween(random(), headerLen + 1, 10000); IndexOutput output = d.createOutput("data", IOContext.DEFAULT); output.writeBytes(data, 0, data.length); @@ -799,7 +799,7 @@ public abstract class BaseDirectoryTestCase extends LuceneTestCase { // now make N copies of the remaining bytes int threads = 10; CyclicBarrier start = new CyclicBarrier(threads); - Thread copies[] = + Thread[] copies = IntStream.range(0, threads) .mapToObj( (i) -> { @@ -878,7 +878,7 @@ public abstract class BaseDirectoryTestCase extends LuceneTestCase { try (Directory dir = getDirectory(createTempDir("testLongs"))) { IndexOutput output = dir.createOutput("longs", newIOContext(random())); int num = TestUtil.nextInt(random(), 50, 3000); - long longs[] = new long[num]; + long[] longs = new long[num]; for (int i = 0; i < longs.length; i++) { longs[i] = TestUtil.nextLong(random(), Long.MIN_VALUE, Long.MAX_VALUE); output.writeLong(longs[i]); @@ -905,7 +905,7 @@ public abstract class BaseDirectoryTestCase extends LuceneTestCase { for (int i = 0; i < 7; i++) { String name = "longs-" + i; IndexOutput o = dir.createOutput(name, newIOContext(random())); - byte junk[] = new byte[i]; + byte[] junk = new byte[i]; random().nextBytes(junk); o.writeBytes(junk, junk.length); input.seek(0); @@ -927,7 +927,7 @@ public abstract class BaseDirectoryTestCase extends LuceneTestCase { try (Directory dir = getDirectory(createTempDir("testInts"))) { IndexOutput output = dir.createOutput("ints", newIOContext(random())); int num = TestUtil.nextInt(random(), 50, 3000); - int ints[] = new int[num]; + int[] ints = new int[num]; for (int i = 0; i < ints.length; i++) { ints[i] = random().nextInt(); output.writeInt(ints[i]); @@ -954,7 +954,7 @@ public abstract class BaseDirectoryTestCase extends LuceneTestCase { for (int i = 0; i < 7; i++) { String name = "ints-" + i; IndexOutput o = dir.createOutput(name, newIOContext(random())); - byte junk[] = new byte[i]; + byte[] junk = new byte[i]; random().nextBytes(junk); o.writeBytes(junk, junk.length); input.seek(0); @@ -975,7 +975,7 @@ public abstract class BaseDirectoryTestCase extends LuceneTestCase { try (Directory dir = getDirectory(createTempDir("testShorts"))) { IndexOutput output = dir.createOutput("shorts", newIOContext(random())); int num = TestUtil.nextInt(random(), 50, 3000); - short shorts[] = new short[num]; + short[] shorts = new short[num]; for (int i = 0; i < shorts.length; i++) { shorts[i] = (short) random().nextInt(); output.writeShort(shorts[i]); @@ -1002,7 +1002,7 @@ public abstract class BaseDirectoryTestCase extends LuceneTestCase { for (int i = 0; i < 7; i++) { String name = "shorts-" + i; IndexOutput o = dir.createOutput(name, newIOContext(random())); - byte junk[] = new byte[i]; + byte[] junk = new byte[i]; random().nextBytes(junk); o.writeBytes(junk, junk.length); input.seek(0); @@ -1023,7 +1023,7 @@ public abstract class BaseDirectoryTestCase extends LuceneTestCase { try (Directory dir = getDirectory(createTempDir("testBytes"))) { IndexOutput output = dir.createOutput("bytes", newIOContext(random())); int num = TestUtil.nextInt(random(), 50, 3000); - byte bytes[] = new byte[num]; + byte[] bytes = new byte[num]; random().nextBytes(bytes); for (int i = 0; i < bytes.length; i++) { output.writeByte(bytes[i]); @@ -1050,7 +1050,7 @@ public abstract class BaseDirectoryTestCase extends LuceneTestCase { for (int i = 0; i < 7; i++) { String name = "bytes-" + i; IndexOutput o = dir.createOutput(name, newIOContext(random())); - byte junk[] = new byte[i]; + byte[] junk = new byte[i]; random().nextBytes(junk); o.writeBytes(junk, junk.length); input.seek(0); @@ -1077,7 +1077,7 @@ public abstract class BaseDirectoryTestCase extends LuceneTestCase { } else { num = TestUtil.nextInt(random(), 50, 250); } - byte bytes[] = new byte[num]; + byte[] bytes = new byte[num]; random().nextBytes(bytes); for (int i = 0; i < bytes.length; i++) { output.writeByte(bytes[i]); @@ -1098,7 +1098,7 @@ public abstract class BaseDirectoryTestCase extends LuceneTestCase { IndexInput slice2 = slice1.slice("slice2", j, num - i - j); assertEquals(0, slice2.getFilePointer()); assertEquals(num - i - j, slice2.length()); - byte data[] = new byte[num]; + byte[] data = new byte[num]; System.arraycopy(bytes, 0, data, 0, i + j); if (random().nextBoolean()) { // read the bytes for this slice-of-slice diff --git a/lucene/test-framework/src/java/org/apache/lucene/util/LuceneTestCase.java b/lucene/test-framework/src/java/org/apache/lucene/util/LuceneTestCase.java index a402319acdc..975a0ea911a 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/util/LuceneTestCase.java +++ b/lucene/test-framework/src/java/org/apache/lucene/util/LuceneTestCase.java @@ -1313,8 +1313,8 @@ public abstract class LuceneTestCase extends Assert { } if (VERBOSE && didChange) { String current = c.toString(); - String previousLines[] = previous.split("\n"); - String currentLines[] = current.split("\n"); + String[] previousLines = previous.split("\n"); + String[] currentLines = current.split("\n"); StringBuilder diff = new StringBuilder(); // this should always be the case, diff each line @@ -1652,7 +1652,7 @@ public abstract class LuceneTestCase extends Assert { * @see LUCENE-4020 */ public static TimeZone randomTimeZone(Random random) { - String tzIds[] = TimeZone.getAvailableIDs(); + String[] tzIds = TimeZone.getAvailableIDs(); return TimeZone.getTimeZone(tzIds[random.nextInt(tzIds.length)]); } @@ -2385,7 +2385,7 @@ public abstract class LuceneTestCase extends Assert { } } else if (code == 2) { // term, but ensure a non-zero offset - byte newbytes[] = new byte[term.length + 5]; + byte[] newbytes = new byte[term.length + 5]; System.arraycopy(term.bytes, term.offset, newbytes, 5, term.length); tests.add(new BytesRef(newbytes, 5, term.length)); } else if (code == 3) { diff --git a/lucene/test-framework/src/test/org/apache/lucene/codecs/lucene90/compressing/TestCompressingStoredFieldsFormat.java b/lucene/test-framework/src/test/org/apache/lucene/codecs/lucene90/compressing/TestCompressingStoredFieldsFormat.java index 71acf4f89e1..ad1f7866539 100644 --- a/lucene/test-framework/src/test/org/apache/lucene/codecs/lucene90/compressing/TestCompressingStoredFieldsFormat.java +++ b/lucene/test-framework/src/test/org/apache/lucene/codecs/lucene90/compressing/TestCompressingStoredFieldsFormat.java @@ -99,7 +99,7 @@ public class TestCompressingStoredFieldsFormat extends BaseStoredFieldsFormatTes } public void testZFloat() throws Exception { - byte buffer[] = new byte[5]; // we never need more than 5 bytes + byte[] buffer = new byte[5]; // we never need more than 5 bytes ByteArrayDataOutput out = new ByteArrayDataOutput(buffer); ByteArrayDataInput in = new ByteArrayDataInput(buffer); @@ -120,7 +120,7 @@ public class TestCompressingStoredFieldsFormat extends BaseStoredFieldsFormatTes } // round-trip special values - float special[] = { + float[] special = { -0.0f, +0.0f, Float.NEGATIVE_INFINITY, @@ -156,7 +156,7 @@ public class TestCompressingStoredFieldsFormat extends BaseStoredFieldsFormatTes } public void testZDouble() throws Exception { - byte buffer[] = new byte[9]; // we never need more than 9 bytes + byte[] buffer = new byte[9]; // we never need more than 9 bytes ByteArrayDataOutput out = new ByteArrayDataOutput(buffer); ByteArrayDataInput in = new ByteArrayDataInput(buffer); @@ -177,7 +177,7 @@ public class TestCompressingStoredFieldsFormat extends BaseStoredFieldsFormatTes } // round-trip special values - double special[] = { + double[] special = { -0.0d, +0.0d, Double.NEGATIVE_INFINITY, @@ -223,7 +223,7 @@ public class TestCompressingStoredFieldsFormat extends BaseStoredFieldsFormatTes } public void testTLong() throws Exception { - byte buffer[] = new byte[10]; // we never need more than 10 bytes + byte[] buffer = new byte[10]; // we never need more than 10 bytes ByteArrayDataOutput out = new ByteArrayDataOutput(buffer); ByteArrayDataInput in = new ByteArrayDataInput(buffer); diff --git a/lucene/test-framework/src/test/org/apache/lucene/mockfile/TestDisableFsyncFS.java b/lucene/test-framework/src/test/org/apache/lucene/mockfile/TestDisableFsyncFS.java index 13ec353d3b4..49c6ebe4744 100644 --- a/lucene/test-framework/src/test/org/apache/lucene/mockfile/TestDisableFsyncFS.java +++ b/lucene/test-framework/src/test/org/apache/lucene/mockfile/TestDisableFsyncFS.java @@ -42,7 +42,7 @@ public class TestDisableFsyncFS extends MockFileSystemTestCase { StandardOpenOption.CREATE_NEW, StandardOpenOption.READ, StandardOpenOption.WRITE); - byte bytes[] = new byte[128]; + byte[] bytes = new byte[128]; random().nextBytes(bytes); file.write(ByteBuffer.wrap(bytes)); file.force(true);