LUCENE-1257: More Java5 updates

git-svn-id: https://svn.apache.org/repos/asf/lucene/java/trunk@829013 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Uwe Schindler 2009-10-23 11:54:03 +00:00
parent 04d00642e4
commit a5df94c249
52 changed files with 226 additions and 260 deletions

View File

@ -34,7 +34,7 @@ public class Points {
// stat points ordered by their start time.
// for now we collect points as TaskStats objects.
// later might optimize to collect only native data.
private ArrayList points = new ArrayList();
private ArrayList<TaskStats> points = new ArrayList<TaskStats>();
private int nextTaskRunNum = 0;
@ -50,7 +50,7 @@ public class Points {
* the actual task stats are returned, so caller should not modify this task stats.
* @return current {@link TaskStats}.
*/
public List taskStats () {
public List<TaskStats> taskStats () {
return points;
}

View File

@ -46,7 +46,7 @@ public class CommitIndexTask extends PerfTask {
if (iw != null) {
if (commitUserData == null) iw.commit();
else {
Map map = new HashMap();
Map<String,String> map = new HashMap<String,String>();
map.put(OpenReaderTask.USER_DATA, commitUserData);
iw.commit(map);
}

View File

@ -43,7 +43,7 @@ public class FlushReaderTask extends PerfTask {
public int doLogic() throws IOException {
IndexReader reader = getRunData().getIndexReader();
if (userData != null) {
Map map = new HashMap();
Map<String,String> map = new HashMap<String,String>();
map.put(OpenReaderTask.USER_DATA, userData);
reader.flush(map);
} else {

View File

@ -30,12 +30,12 @@ import java.lang.reflect.Constructor;
*
*/
public class NewAnalyzerTask extends PerfTask {
private List/*<String>*/ analyzerClassNames;
private List<String> analyzerClassNames;
private int current;
public NewAnalyzerTask(PerfRunData runData) {
super(runData);
analyzerClassNames = new ArrayList();
analyzerClassNames = new ArrayList<String>();
}
public static final Analyzer createAnalyzer(String className) throws Exception{
@ -57,7 +57,7 @@ public class NewAnalyzerTask extends PerfTask {
{
current = 0;
}
className = (String) analyzerClassNames.get(current++);
className = analyzerClassNames.get(current++);
if (className == null || className.equals(""))
{
className = "org.apache.lucene.analysis.standard.StandardAnalyzer";

View File

@ -19,7 +19,6 @@ package org.apache.lucene.benchmark.byTask.tasks;
import java.io.IOException;
import java.util.Collection;
import java.util.Iterator;
import java.util.Map;
import org.apache.lucene.benchmark.byTask.PerfRunData;
@ -59,14 +58,12 @@ public class OpenReaderTask extends PerfTask {
public static IndexReader openCommitPoint(String userData, Directory dir, Config config, boolean readOnly) throws IOException {
IndexReader r = null;
Collection commits = IndexReader.listCommits(dir);
Iterator i = commits.iterator();
while (i.hasNext()) {
IndexCommit ic = (IndexCommit)i.next();
Map map = ic.getUserData();
Collection<IndexCommit> commits = IndexReader.listCommits(dir);
for (final IndexCommit ic : commits) {
Map<String,String> map = ic.getUserData();
String ud = null;
if (map != null) {
ud = (String)map.get(USER_DATA);
ud = map.get(USER_DATA);
}
if (ud != null && ud.equals(userData)) {
IndexDeletionPolicy indexDeletionPolicy = CreateIndexTask.getIndexDeletionPolicy(config);

View File

@ -20,7 +20,7 @@ package org.apache.lucene.benchmark.byTask.tasks;
import java.io.IOException;
import java.util.Collection;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Set;
@ -128,9 +128,8 @@ public abstract class ReadTask extends PerfTask {
Document document = retrieveDoc(ir, id);
res += document != null ? 1 : 0;
if (numHighlight > 0 && m < numHighlight) {
Collection/*<String>*/ fieldsToHighlight = getFieldsToHighlight(document);
for (Iterator iterator = fieldsToHighlight.iterator(); iterator.hasNext();) {
String field = (String) iterator.next();
Collection<String> fieldsToHighlight = getFieldsToHighlight(document);
for (final String field : fieldsToHighlight) {
String text = document.get(field);
res += highlighter.doHighlight(ir, id, field, document, analyzer, text);
}
@ -291,11 +290,10 @@ public abstract class ReadTask extends PerfTask {
* @param document The Document
* @return A Collection of Field names (Strings)
*/
protected Collection/*<String>*/ getFieldsToHighlight(Document document) {
List/*<Fieldable>*/ fieldables = document.getFields();
Set/*<String>*/ result = new HashSet(fieldables.size());
for (Iterator iterator = fieldables.iterator(); iterator.hasNext();) {
Fieldable fieldable = (Fieldable) iterator.next();
protected Collection<String> getFieldsToHighlight(Document document) {
List<Fieldable> fieldables = document.getFields();
Set<String> result = new HashSet<String>(fieldables.size());
for (final Fieldable fieldable : fieldables) {
result.add(fieldable.name());
}
return result;

View File

@ -25,7 +25,7 @@ import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.benchmark.byTask.PerfRunData;
import org.apache.lucene.benchmark.byTask.feeds.DocMaker;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.document.Fieldable;
/**
* Simple task to test performance of tokenizers. It just
@ -59,12 +59,10 @@ public class ReadTokensTask extends PerfTask {
}
public int doLogic() throws Exception {
List fields = doc.getFields();
final int numField = fields.size();
List<Fieldable> fields = doc.getFields();
Analyzer analyzer = getRunData().getAnalyzer();
int tokenCount = 0;
for(int i=0;i<numField;i++) {
final Field field = (Field) fields.get(i);
for(final Fieldable field : fields) {
final TokenStream stream;
final TokenStream streamValue = field.tokenStreamValue();

View File

@ -17,7 +17,6 @@ package org.apache.lucene.benchmark.byTask.tasks;
* limitations under the License.
*/
import java.util.Iterator;
import java.util.List;
import org.apache.lucene.benchmark.byTask.PerfRunData;
@ -48,16 +47,14 @@ public class RepAllTask extends ReportTask {
* Report detailed statistics as a string
* @return the report
*/
protected Report reportAll(List taskStats) {
String longestOp = longestOp(taskStats.iterator());
protected Report reportAll(List<TaskStats> taskStats) {
String longestOp = longestOp(taskStats);
boolean first = true;
StringBuffer sb = new StringBuffer();
sb.append(tableTitle(longestOp));
sb.append(newline);
int reported = 0;
Iterator it = taskStats.iterator();
while (it.hasNext()) {
TaskStats stat = (TaskStats) it.next();
for (final TaskStats stat : taskStats) {
if (stat.getElapsed()>=0) { // consider only tasks that ended
if (!first) {
sb.append(newline);

View File

@ -17,7 +17,6 @@ package org.apache.lucene.benchmark.byTask.tasks;
* limitations under the License.
*/
import java.util.Iterator;
import java.util.List;
import org.apache.lucene.benchmark.byTask.PerfRunData;
@ -46,15 +45,14 @@ public class RepSelectByPrefTask extends RepSumByPrefTask {
return 0;
}
protected Report reportSelectByPrefix(List taskStats) {
String longestOp = longestOp(taskStats.iterator());
protected Report reportSelectByPrefix(List<TaskStats> taskStats) {
String longestOp = longestOp(taskStats);
boolean first = true;
StringBuffer sb = new StringBuffer();
sb.append(tableTitle(longestOp));
sb.append(newline);
int reported = 0;
for (Iterator it = taskStats.iterator(); it.hasNext();) {
TaskStats stat = (TaskStats) it.next();
for (final TaskStats stat : taskStats) {
if (stat.getElapsed()>=0 && stat.getTask().getName().startsWith(prefix)) { // only ended tasks with proper name
reported++;
if (!first) {

View File

@ -17,7 +17,6 @@ package org.apache.lucene.benchmark.byTask.tasks;
* limitations under the License.
*/
import java.util.Iterator;
import java.util.LinkedHashMap;
import java.util.List;
@ -51,17 +50,16 @@ public class RepSumByNameRoundTask extends ReportTask {
* Report statistics as a string, aggregate for tasks named the same, and from the same round.
* @return the report
*/
protected Report reportSumByNameRound(List taskStats) {
protected Report reportSumByNameRound(List<TaskStats> taskStats) {
// aggregate by task name and round
LinkedHashMap p2 = new LinkedHashMap();
LinkedHashMap<String,TaskStats> p2 = new LinkedHashMap<String,TaskStats>();
int reported = 0;
for (Iterator it = taskStats.iterator(); it.hasNext();) {
TaskStats stat1 = (TaskStats) it.next();
for (final TaskStats stat1 : taskStats) {
if (stat1.getElapsed()>=0) { // consider only tasks that ended
reported++;
String name = stat1.getTask().getName();
String rname = stat1.getRound()+"."+name; // group by round
TaskStats stat2 = (TaskStats) p2.get(rname);
TaskStats stat2 = p2.get(rname);
if (stat2 == null) {
try {
stat2 = (TaskStats) stat1.clone();

View File

@ -17,7 +17,6 @@ package org.apache.lucene.benchmark.byTask.tasks;
* limitations under the License.
*/
import java.util.Iterator;
import java.util.LinkedHashMap;
import java.util.List;
@ -51,16 +50,15 @@ public class RepSumByNameTask extends ReportTask {
* Report statistics as a string, aggregate for tasks named the same.
* @return the report
*/
protected Report reportSumByName(List taskStats) {
protected Report reportSumByName(List<TaskStats> taskStats) {
// aggregate by task name
int reported = 0;
LinkedHashMap p2 = new LinkedHashMap();
for (Iterator it = taskStats.iterator(); it.hasNext();) {
TaskStats stat1 = (TaskStats) it.next();
LinkedHashMap<String,TaskStats> p2 = new LinkedHashMap<String,TaskStats>();
for (final TaskStats stat1: taskStats) {
if (stat1.getElapsed()>=0) { // consider only tasks that ended
reported++;
String name = stat1.getTask().getName();
TaskStats stat2 = (TaskStats) p2.get(name);
TaskStats stat2 = p2.get(name);
if (stat2 == null) {
try {
stat2 = (TaskStats) stat1.clone();

View File

@ -17,7 +17,6 @@ package org.apache.lucene.benchmark.byTask.tasks;
* limitations under the License.
*/
import java.util.Iterator;
import java.util.LinkedHashMap;
import java.util.List;
@ -47,17 +46,16 @@ public class RepSumByPrefRoundTask extends RepSumByPrefTask {
return 0;
}
protected Report reportSumByPrefixRound(List taskStats) {
protected Report reportSumByPrefixRound(List<TaskStats> taskStats) {
// aggregate by task name and by round
int reported = 0;
LinkedHashMap p2 = new LinkedHashMap();
for (Iterator it = taskStats.iterator(); it.hasNext();) {
TaskStats stat1 = (TaskStats) it.next();
LinkedHashMap<String,TaskStats> p2 = new LinkedHashMap<String,TaskStats>();
for (final TaskStats stat1 : taskStats) {
if (stat1.getElapsed()>=0 && stat1.getTask().getName().startsWith(prefix)) { // only ended tasks with proper name
reported++;
String name = stat1.getTask().getName();
String rname = stat1.getRound()+"."+name; // group by round
TaskStats stat2 = (TaskStats) p2.get(rname);
TaskStats stat2 = p2.get(rname);
if (stat2 == null) {
try {
stat2 = (TaskStats) stat1.clone();

View File

@ -21,7 +21,6 @@ import org.apache.lucene.benchmark.byTask.PerfRunData;
import org.apache.lucene.benchmark.byTask.stats.Report;
import org.apache.lucene.benchmark.byTask.stats.TaskStats;
import java.util.Iterator;
import java.util.LinkedHashMap;
import java.util.List;
@ -49,16 +48,15 @@ public class RepSumByPrefTask extends ReportTask {
return 0;
}
protected Report reportSumByPrefix (List taskStats) {
protected Report reportSumByPrefix (List<TaskStats> taskStats) {
// aggregate by task name
int reported = 0;
LinkedHashMap p2 = new LinkedHashMap();
for (Iterator it = taskStats.iterator(); it.hasNext();) {
TaskStats stat1 = (TaskStats) it.next();
LinkedHashMap<String,TaskStats> p2 = new LinkedHashMap<String,TaskStats>();
for (final TaskStats stat1 : taskStats) {
if (stat1.getElapsed()>=0 && stat1.getTask().getName().startsWith(prefix)) { // only ended tasks with proper name
reported++;
String name = stat1.getTask().getName();
TaskStats stat2 = (TaskStats) p2.get(name);
TaskStats stat2 = p2.get(name);
if (stat2 == null) {
try {
stat2 = (TaskStats) stat1.clone();

View File

@ -1,6 +1,5 @@
package org.apache.lucene.benchmark.byTask.tasks;
import java.util.Iterator;
import java.util.LinkedHashMap;
import org.apache.lucene.benchmark.byTask.PerfRunData;
@ -96,10 +95,9 @@ public abstract class ReportTask extends PerfTask {
* @param taskStats completed tasks to be considered.
* @return the longest op name out of completed tasks.
*/
protected String longestOp(Iterator taskStats) {
protected String longestOp(Iterable<TaskStats> taskStats) {
String longest = OP;
while (taskStats.hasNext()) {
TaskStats stat = (TaskStats) taskStats.next();
for (final TaskStats stat : taskStats) {
if (stat.getElapsed()>=0) { // consider only tasks that ended
String name = stat.getTask().getName();
if (name.length() > longest.length()) {
@ -133,15 +131,14 @@ public abstract class ReportTask extends PerfTask {
return sb.toString();
}
protected Report genPartialReport(int reported, LinkedHashMap partOfTasks, int totalSize) {
String longetOp = longestOp(partOfTasks.values().iterator());
protected Report genPartialReport(int reported, LinkedHashMap<String,TaskStats> partOfTasks, int totalSize) {
String longetOp = longestOp(partOfTasks.values());
boolean first = true;
StringBuffer sb = new StringBuffer();
sb.append(tableTitle(longetOp));
sb.append(newline);
int lineNum = 0;
for (Iterator it = partOfTasks.values().iterator(); it.hasNext();) {
TaskStats stat = (TaskStats) it.next();
for (final TaskStats stat : partOfTasks.values()) {
if (!first) {
sb.append(newline);
}

View File

@ -65,7 +65,7 @@ public class SearchTravRetHighlightTask extends SearchTravTask {
protected int numToHighlight = Integer.MAX_VALUE;
protected boolean mergeContiguous;
protected int maxFrags = 2;
protected Set paramFields = Collections.EMPTY_SET;
protected Set<String> paramFields = Collections.emptySet();
protected Highlighter highlighter;
protected int maxDocCharsToAnalyze;
@ -104,8 +104,8 @@ public class SearchTravRetHighlightTask extends SearchTravTask {
};
}
protected Collection/*<String>*/ getFieldsToHighlight(Document document) {
Collection result = super.getFieldsToHighlight(document);
protected Collection<String> getFieldsToHighlight(Document document) {
Collection<String> result = super.getFieldsToHighlight(document);
//if stored is false, then result will be empty, in which case just get all the param fields
if (paramFields.isEmpty() == false && result.isEmpty() == false) {
result.retainAll(paramFields);
@ -127,7 +127,7 @@ public class SearchTravRetHighlightTask extends SearchTravTask {
} else if (splits[i].startsWith("mergeContiguous[") == true){
mergeContiguous = Boolean.valueOf(splits[i].substring("mergeContiguous[".length(),splits[i].length() - 1)).booleanValue();
} else if (splits[i].startsWith("fields[") == true){
paramFields = new HashSet();
paramFields = new HashSet<String>();
String fieldNames = splits[i].substring("fields[".length(), splits[i].length() - 1);
String [] fieldSplits = fieldNames.split(";");
for (int j = 0; j < fieldSplits.length; j++) {

View File

@ -58,12 +58,12 @@ public class SearchTravRetLoadFieldSelectorTask extends SearchTravTask {
public void setParams(String params) {
this.params = params; // cannot just call super.setParams(), b/c it's params differ.
Set fieldsToLoad = new HashSet();
Set<String> fieldsToLoad = new HashSet<String>();
for (StringTokenizer tokenizer = new StringTokenizer(params, ","); tokenizer.hasMoreTokens();) {
String s = tokenizer.nextToken();
fieldsToLoad.add(s);
}
fieldSelector = new SetBasedFieldSelector(fieldsToLoad, Collections.EMPTY_SET);
fieldSelector = new SetBasedFieldSelector(fieldsToLoad, Collections.<String> emptySet());
}

View File

@ -59,7 +59,7 @@ public class SearchTravRetVectorHighlightTask extends SearchTravTask {
protected int numToHighlight = Integer.MAX_VALUE;
protected int maxFrags = 2;
protected int fragSize = 100;
protected Set paramFields = Collections.EMPTY_SET;
protected Set<String> paramFields = Collections.emptySet();
protected FastVectorHighlighter highlighter;
public SearchTravRetVectorHighlightTask(PerfRunData runData) {
@ -101,8 +101,8 @@ public class SearchTravRetVectorHighlightTask extends SearchTravTask {
};
}
protected Collection/*<String>*/ getFieldsToHighlight(Document document) {
Collection result = super.getFieldsToHighlight(document);
protected Collection<String> getFieldsToHighlight(Document document) {
Collection<String> result = super.getFieldsToHighlight(document);
//if stored is false, then result will be empty, in which case just get all the param fields
if (paramFields.isEmpty() == false && result.isEmpty() == false) {
result.retainAll(paramFields);
@ -124,7 +124,7 @@ public class SearchTravRetVectorHighlightTask extends SearchTravTask {
} else if (splits[i].startsWith("fragSize[") == true){
fragSize = (int)Float.parseFloat(splits[i].substring("fragSize[".length(),splits[i].length() - 1));
} else if (splits[i].startsWith("fields[") == true){
paramFields = new HashSet();
paramFields = new HashSet<String>();
String fieldNames = splits[i].substring("fields[".length(), splits[i].length() - 1);
String [] fieldSplits = fieldNames.split(";");
for (int j = 0; j < fieldSplits.length; j++) {

View File

@ -18,7 +18,6 @@ package org.apache.lucene.benchmark.byTask.tasks;
*/
import java.util.ArrayList;
import java.util.Iterator;
import java.text.NumberFormat;
import org.apache.lucene.benchmark.byTask.PerfRunData;
@ -29,7 +28,7 @@ import org.apache.lucene.benchmark.byTask.feeds.NoMoreDataException;
*/
public class TaskSequence extends PerfTask {
public static int REPEAT_EXHAUST = -2;
private ArrayList tasks;
private ArrayList<PerfTask> tasks;
private int repetitions = 1;
private boolean parallel;
private TaskSequence parent;
@ -54,7 +53,7 @@ public class TaskSequence extends PerfTask {
setSequenceName();
this.parent = parent;
this.parallel = parallel;
tasks = new ArrayList();
tasks = new ArrayList<PerfTask>();
}
public void close() throws Exception {
@ -70,7 +69,7 @@ public class TaskSequence extends PerfTask {
final int numTasks = tasks.size();
tasksArray = new PerfTask[numTasks];
for(int k=0;k<numTasks;k++) {
tasksArray[k] = (PerfTask) tasks.get(k);
tasksArray[k] = tasks.get(k);
anyExhaustibleTasks |= tasksArray[k] instanceof ResetInputsTask;
anyExhaustibleTasks |= tasksArray[k] instanceof TaskSequence;
}
@ -279,8 +278,7 @@ public class TaskSequence extends PerfTask {
StringBuffer sb = new StringBuffer(super.toString());
sb.append(parallel ? " [" : " {");
sb.append(NEW_LINE);
for (Iterator it = tasks.iterator(); it.hasNext();) {
PerfTask task = (PerfTask) it.next();
for (final PerfTask task : tasks) {
sb.append(task.toString());
sb.append(NEW_LINE);
}
@ -304,8 +302,7 @@ public class TaskSequence extends PerfTask {
*/
public void setNoChildReport() {
letChildReport = false;
for (Iterator it = tasks.iterator(); it.hasNext();) {
PerfTask task = (PerfTask) it.next();
for (final PerfTask task : tasks) {
if (task instanceof TaskSequence) {
((TaskSequence)task).setNoChildReport();
}
@ -352,7 +349,7 @@ public class TaskSequence extends PerfTask {
/**
* @return Returns the tasks.
*/
public ArrayList getTasks() {
public ArrayList<PerfTask> getTasks() {
return tasks;
}
@ -361,9 +358,9 @@ public class TaskSequence extends PerfTask {
*/
protected Object clone() throws CloneNotSupportedException {
TaskSequence res = (TaskSequence) super.clone();
res.tasks = new ArrayList();
res.tasks = new ArrayList<PerfTask>();
for (int i = 0; i < tasks.size(); i++) {
res.tasks.add(((PerfTask)tasks.get(i)).clone());
res.tasks.add((PerfTask)tasks.get(i).clone());
}
return res;
}

View File

@ -213,9 +213,9 @@ public class Algorithm {
// remove redundant top level enclosing sequences
while (sequence.isCollapsable() && sequence.getRepetitions()==1 && sequence.getRate()==0) {
ArrayList t = sequence.getTasks();
ArrayList<PerfTask> t = sequence.getTasks();
if (t!=null && t.size()==1) {
PerfTask p = (PerfTask) t.get(0);
PerfTask p = t.get(0);
if (p instanceof TaskSequence) {
sequence = (TaskSequence) p;
continue;
@ -252,20 +252,19 @@ public class Algorithm {
* Expert: for test purposes, return all tasks participating in this algorithm.
* @return all tasks participating in this algorithm.
*/
public ArrayList extractTasks() {
ArrayList res = new ArrayList();
public ArrayList<PerfTask> extractTasks() {
ArrayList<PerfTask> res = new ArrayList<PerfTask>();
extractTasks(res, sequence);
return res;
}
private void extractTasks (ArrayList extrct, TaskSequence seq) {
private void extractTasks (ArrayList<PerfTask> extrct, TaskSequence seq) {
if (seq==null)
return;
extrct.add(seq);
ArrayList t = sequence.getTasks();
ArrayList<PerfTask> t = sequence.getTasks();
if (t==null)
return;
for (int i = 0; i < t.size(); i++) {
PerfTask p = (PerfTask) t.get(0);
for (final PerfTask p : t) {
if (p instanceof TaskSequence) {
extractTasks(extrct, (TaskSequence)p);
} else {

View File

@ -26,6 +26,7 @@ import java.util.List;
import java.util.Collections;
import java.util.HashMap;
import java.util.Iterator;
import java.util.Map;
import java.util.Properties;
import java.util.StringTokenizer;
@ -50,7 +51,7 @@ public class Config {
private int roundNumber = 0;
private Properties props;
private HashMap valByRound = new HashMap();
private HashMap colForValByRound = new HashMap();
private HashMap<String,String> colForValByRound = new HashMap<String,String>();
private String algorithmText;
/**
@ -60,7 +61,7 @@ public class Config {
*/
public Config (Reader algReader) throws IOException {
// read alg file to array of lines
ArrayList lines = new ArrayList();
ArrayList<String> lines = new ArrayList<String>();
BufferedReader r = new BufferedReader(algReader);
int lastConfigLine=0;
for (String line = r.readLine(); line!=null; line=r.readLine()) {
@ -112,10 +113,9 @@ public class Config {
private void printProps() {
System.out.println("------------> config properties:");
List propKeys = new ArrayList(props.keySet());
List<String> propKeys = new ArrayList(props.keySet());
Collections.sort(propKeys);
for (Iterator it = propKeys.iterator(); it.hasNext();) {
String propName = (String) it.next();
for (final String propName : propKeys) {
System.out.println(propName + " = " + props.getProperty(propName));
}
System.out.println("-------------------------------");
@ -283,7 +283,7 @@ public class Config {
return new int [] { Integer.parseInt(s) };
}
ArrayList a = new ArrayList();
ArrayList<Integer> a = new ArrayList<Integer>();
StringTokenizer st = new StringTokenizer(s,":");
while (st.hasMoreTokens()) {
String t = st.nextToken();
@ -291,7 +291,7 @@ public class Config {
}
int res[] = new int[a.size()];
for (int i=0; i<a.size(); i++) {
res[i] = ((Integer) a.get(i)).intValue();
res[i] = a.get(i).intValue();
}
return res;
}
@ -302,7 +302,7 @@ public class Config {
return new double [] { Double.parseDouble(s) };
}
ArrayList a = new ArrayList();
ArrayList<Double> a = new ArrayList<Double>();
StringTokenizer st = new StringTokenizer(s,":");
while (st.hasMoreTokens()) {
String t = st.nextToken();
@ -310,7 +310,7 @@ public class Config {
}
double res[] = new double[a.size()];
for (int i=0; i<a.size(); i++) {
res[i] = ((Double) a.get(i)).doubleValue();
res[i] = a.get(i).doubleValue();
}
return res;
}
@ -321,7 +321,7 @@ public class Config {
return new boolean [] { Boolean.valueOf(s).booleanValue() };
}
ArrayList a = new ArrayList();
ArrayList<Boolean> a = new ArrayList<Boolean>();
StringTokenizer st = new StringTokenizer(s,":");
while (st.hasMoreTokens()) {
String t = st.nextToken();
@ -329,7 +329,7 @@ public class Config {
}
boolean res[] = new boolean[a.size()];
for (int i=0; i<a.size(); i++) {
res[i] = ((Boolean) a.get(i)).booleanValue();
res[i] = a.get(i).booleanValue();
}
return res;
}
@ -342,9 +342,8 @@ public class Config {
return "";
}
StringBuffer sb = new StringBuffer();
for (Iterator it = colForValByRound.keySet().iterator(); it.hasNext();) {
String name = (String) it.next();
String colName = (String) colForValByRound.get(name);
for (final String name : colForValByRound.keySet()) {
String colName = colForValByRound.get(name);
sb.append(" ").append(colName);
}
return sb.toString();
@ -358,9 +357,8 @@ public class Config {
return "";
}
StringBuffer sb = new StringBuffer();
for (Iterator it = colForValByRound.keySet().iterator(); it.hasNext();) {
String name = (String) it.next();
String colName = (String) colForValByRound.get(name);
for (final String name : colForValByRound.keySet()) {
String colName = colForValByRound.get(name);
String template = " "+colName;
if (roundNum<0) {
// just append blanks

View File

@ -34,14 +34,14 @@ import java.util.Map;
*/
public class QualityQuery implements Comparable<QualityQuery> {
private String queryID;
private Map nameValPairs;
private Map<String,String> nameValPairs;
/**
* Create a QualityQuery with given ID and name-value pairs.
* @param queryID ID of this quality query.
* @param nameValPairs the contents of this quality query.
*/
public QualityQuery(String queryID, Map nameValPairs) {
public QualityQuery(String queryID, Map<String,String> nameValPairs) {
this.queryID = queryID;
this.nameValPairs = nameValPairs;
}
@ -50,7 +50,7 @@ public class QualityQuery implements Comparable<QualityQuery> {
* Return all the names of name-value-pairs in this QualityQuery.
*/
public String[] getNames() {
return (String[]) nameValPairs.keySet().toArray(new String[0]);
return nameValPairs.keySet().toArray(new String[0]);
}
/**
@ -58,7 +58,7 @@ public class QualityQuery implements Comparable<QualityQuery> {
* @param name the name whose value should be returned.
*/
public String getValue(String name) {
return (String) nameValPairs.get(name);
return nameValPairs.get(name);
}
/**

View File

@ -58,7 +58,7 @@ public class QualityStats {
}
}
private ArrayList recallPoints;
private ArrayList<RecallPoint> recallPoints;
/**
* Construct a QualityStats object with anticipated maximal number of relevant hits.
@ -67,7 +67,7 @@ public class QualityStats {
public QualityStats(double maxGoodPoints, long searchTime) {
this.maxGoodPoints = maxGoodPoints;
this.searchTime = searchTime;
this.recallPoints = new ArrayList();
this.recallPoints = new ArrayList<RecallPoint>();
pAt = new double[MAX_POINTS+1]; // pAt[0] unused.
}
@ -265,7 +265,7 @@ public class QualityStats {
* Returns the recallPoints.
*/
public RecallPoint [] getRecallPoints() {
return (RecallPoint[]) recallPoints.toArray(new RecallPoint[0]);
return recallPoints.toArray(new RecallPoint[0]);
}
/**

View File

@ -55,7 +55,7 @@ public class Trec1MQReader {
* @throws IOException if cannot read the queries.
*/
public QualityQuery[] readQueries(BufferedReader reader) throws IOException {
ArrayList res = new ArrayList();
ArrayList<QualityQuery> res = new ArrayList<QualityQuery>();
String line;
try {
while (null!=(line=reader.readLine())) {
@ -69,7 +69,7 @@ public class Trec1MQReader {
// qtext
String qtext = line.substring(k+1).trim();
// we got a topic!
HashMap fields = new HashMap();
HashMap<String,String> fields = new HashMap<String,String>();
fields.put(name,qtext);
//System.out.println("id: "+id+" qtext: "+qtext+" line: "+line);
QualityQuery topic = new QualityQuery(id,fields);
@ -79,7 +79,7 @@ public class Trec1MQReader {
reader.close();
}
// sort result array (by ID)
QualityQuery qq[] = (QualityQuery[]) res.toArray(new QualityQuery[0]);
QualityQuery qq[] = res.toArray(new QualityQuery[0]);
Arrays.sort(qq);
return qq;
}

View File

@ -21,7 +21,6 @@ import java.io.IOException;
import java.io.PrintWriter;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.Iterator;
import java.util.StringTokenizer;
import org.apache.lucene.benchmark.quality.Judge;
@ -32,7 +31,7 @@ import org.apache.lucene.benchmark.quality.QualityQuery;
*/
public class TrecJudge implements Judge {
HashMap judgements;
HashMap<String,QRelJudgement> judgements;
/**
* Constructor from a reader.
@ -50,7 +49,7 @@ public class TrecJudge implements Judge {
* @throws IOException
*/
public TrecJudge (BufferedReader reader) throws IOException {
judgements = new HashMap();
judgements = new HashMap<String,QRelJudgement>();
QRelJudgement curr = null;
String zero = "0";
String line;
@ -69,7 +68,7 @@ public class TrecJudge implements Judge {
assert !st.hasMoreTokens() : "wrong format: "+line+" next: "+st.nextToken();
if (relevant) { // only keep relevant docs
if (curr==null || !curr.queryID.equals(queryID)) {
curr = (QRelJudgement)judgements.get(queryID);
curr = judgements.get(queryID);
if (curr==null) {
curr = new QRelJudgement(queryID);
judgements.put(queryID,curr);
@ -85,18 +84,18 @@ public class TrecJudge implements Judge {
// inherit javadocs
public boolean isRelevant(String docName, QualityQuery query) {
QRelJudgement qrj = (QRelJudgement) judgements.get(query.getQueryID());
QRelJudgement qrj = judgements.get(query.getQueryID());
return qrj!=null && qrj.isRelevant(docName);
}
/** single Judgement of a trec quality query */
private static class QRelJudgement {
private String queryID;
private HashMap relevantDocs;
private HashMap<String,String> relevantDocs;
QRelJudgement(String queryID) {
this.queryID = queryID;
relevantDocs = new HashMap();
relevantDocs = new HashMap<String,String>();
}
public void addRelevandDoc(String docName) {
@ -114,8 +113,8 @@ public class TrecJudge implements Judge {
// inherit javadocs
public boolean validateData(QualityQuery[] qq, PrintWriter logger) {
HashMap missingQueries = (HashMap) judgements.clone();
ArrayList missingJudgements = new ArrayList();
HashMap<String,QRelJudgement> missingQueries = (HashMap<String, QRelJudgement>) judgements.clone();
ArrayList<String> missingJudgements = new ArrayList<String>();
for (int i=0; i<qq.length; i++) {
String id = qq[i].getQueryID();
if (missingQueries.containsKey(id)) {
@ -130,7 +129,7 @@ public class TrecJudge implements Judge {
if (logger!=null) {
logger.println("WARNING: "+missingJudgements.size()+" queries have no judgments! - ");
for (int i=0; i<missingJudgements.size(); i++) {
logger.println(" "+(String)missingJudgements.get(i));
logger.println(" "+ missingJudgements.get(i));
}
}
}
@ -138,8 +137,7 @@ public class TrecJudge implements Judge {
isValid = false;
if (logger!=null) {
logger.println("WARNING: "+missingQueries.size()+" judgments match no query! - ");
for (Iterator it = missingQueries.keySet().iterator(); it.hasNext();) {
String id = (String) it.next();
for (final String id : missingQueries.keySet()) {
logger.println(" "+id);
}
}
@ -149,7 +147,7 @@ public class TrecJudge implements Judge {
// inherit javadocs
public int maxRecall(QualityQuery query) {
QRelJudgement qrj = (QRelJudgement) judgements.get(query.getQueryID());
QRelJudgement qrj = judgements.get(query.getQueryID());
if (qrj!=null) {
return qrj.maxRecall();
}

View File

@ -62,11 +62,11 @@ public class TrecTopicsReader {
* @throws IOException if cannot read the queries.
*/
public QualityQuery[] readQueries(BufferedReader reader) throws IOException {
ArrayList res = new ArrayList();
ArrayList<QualityQuery> res = new ArrayList<QualityQuery>();
StringBuffer sb;
try {
while (null!=(sb=read(reader,"<top>",null,false,false))) {
HashMap fields = new HashMap();
HashMap<String,String> fields = new HashMap<String,String>();
// id
sb = read(reader,"<num>",null,true,false);
int k = sb.indexOf(":");
@ -91,7 +91,7 @@ public class TrecTopicsReader {
reader.close();
}
// sort result array (by ID)
QualityQuery qq[] = (QualityQuery[]) res.toArray(new QualityQuery[0]);
QualityQuery qq[] = res.toArray(new QualityQuery[0]);
Arrays.sort(qq);
return qq;
}

View File

@ -105,7 +105,7 @@ public class QualityQueriesFinder {
String res[] = new String[pq.size()];
int i = 0;
while (pq.size()>0) {
TermDf tdf = (TermDf) pq.pop();
TermDf tdf = pq.pop();
res[i++] = tdf.word;
System.out.println(i+". word: "+tdf.df+" "+tdf.word);
}

View File

@ -32,7 +32,7 @@ public class SimpleQQParser implements QualityQueryParser {
private String qqName;
private String indexField;
ThreadLocal queryParser = new ThreadLocal();
ThreadLocal<QueryParser> queryParser = new ThreadLocal<QueryParser>();
/**
* Constructor of a simple qq parser.
@ -48,7 +48,7 @@ public class SimpleQQParser implements QualityQueryParser {
* @see org.apache.lucene.benchmark.quality.QualityQueryParser#parse(org.apache.lucene.benchmark.quality.QualityQuery)
*/
public Query parse(QualityQuery qq) throws ParseException {
QueryParser qp = (QueryParser) queryParser.get();
QueryParser qp = queryParser.get();
if (qp==null) {
qp = new QueryParser(indexField, new StandardAnalyzer(Version.LUCENE_CURRENT));
queryParser.set(qp);

View File

@ -47,7 +47,7 @@ public class QueryData {
* @return The QueryData
*/
public static QueryData[] getAll(Query[] queries) {
Vector vqd = new Vector();
Vector<QueryData> vqd = new Vector<QueryData>();
for (int i = 0; i < queries.length; i++) {
for (int r = 1; r >= 0; r--) {
for (int w = 1; w >= 0; w--) {
@ -63,7 +63,7 @@ public class QueryData {
}
}
}
return (QueryData[])vqd.toArray(new QueryData[0]);
return vqd.toArray(new QueryData[0]);
}
/** Short legend for interpreting toString() output. */

View File

@ -19,7 +19,7 @@ package org.apache.lucene.benchmark.stats;
import java.io.File;
import java.text.NumberFormat;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Date;
import java.util.HashMap;
import java.util.Iterator;
@ -53,7 +53,7 @@ public class TestData
/**
* List of results for each test run with these parameters.
*/
private Vector runData = new Vector();
private Vector<TestRunData> runData = new Vector<TestRunData>();
private int maxBufferedDocs, mergeFactor;
/**
* Directory containing source files.
@ -132,7 +132,7 @@ public class TestData
{
return "# [NO RUN DATA]";
}
HashMap resByTask = new HashMap();
HashMap<String,LDCounter> resByTask = new HashMap<String,LDCounter>();
StringBuffer sb = new StringBuffer();
String lineSep = System.getProperty("line.separator");
sb.append("warm = Warm Index Reader").append(lineSep).append("srch = Search Index").append(lineSep).append("trav = Traverse Hits list, optionally retrieving document").append(lineSep).append(lineSep);
@ -140,20 +140,17 @@ public class TestData
sb.append(COLS[i]);
}
sb.append("\n");
LinkedHashMap mapMem = new LinkedHashMap();
LinkedHashMap mapSpeed = new LinkedHashMap();
LinkedHashMap<String,TestData.LCounter[]> mapMem = new LinkedHashMap<String,TestData.LCounter[]>();
LinkedHashMap<String,DCounter> mapSpeed = new LinkedHashMap<String,DCounter>();
for (int i = 0; i < runData.size(); i++)
{
TestRunData trd = (TestRunData) runData.get(i);
Collection labels = trd.getLabels();
Iterator it = labels.iterator();
while (it.hasNext())
TestRunData trd = runData.get(i);
for (final String label : trd.getLabels())
{
String label = (String) it.next();
MemUsage mem = trd.getMemUsage(label);
if (mem != null)
{
TestData.LCounter[] tm = (TestData.LCounter[]) mapMem.get(label);
TestData.LCounter[] tm = mapMem.get(label);
if (tm == null)
{
tm = new TestData.LCounter[2];
@ -169,7 +166,7 @@ public class TestData
TimeData td = trd.getTotals(label);
if (td != null)
{
TestData.DCounter dc = (TestData.DCounter) mapSpeed.get(label);
TestData.DCounter dc = mapSpeed.get(label);
if (dc == null)
{
dc = new TestData.DCounter();
@ -182,12 +179,12 @@ public class TestData
}
}
}
LinkedHashMap res = new LinkedHashMap();
Iterator it = mapSpeed.keySet().iterator();
LinkedHashMap<String,String> res = new LinkedHashMap<String,String>();
Iterator<String> it = mapSpeed.keySet().iterator();
while (it.hasNext())
{
String label = (String) it.next();
TestData.DCounter dc = (TestData.DCounter) mapSpeed.get(label);
String label = it.next();
TestData.DCounter dc = mapSpeed.get(label);
res.put(label,
format(dc.count, RUNCNT) +
format(dc.recordCount / dc.count, RECCNT) +
@ -197,7 +194,7 @@ public class TestData
// also sum by task
String task = label.substring(label.lastIndexOf("-")+1);
LDCounter ldc = (LDCounter) resByTask.get(task);
LDCounter ldc = resByTask.get(task);
if (ldc==null) {
ldc = new LDCounter();
resByTask.put(task,ldc);
@ -209,9 +206,9 @@ public class TestData
it = mapMem.keySet().iterator();
while (it.hasNext())
{
String label = (String) it.next();
TestData.LCounter[] lc = (TestData.LCounter[]) mapMem.get(label);
String speed = (String) res.get(label);
String label = it.next();
TestData.LCounter[] lc = mapMem.get(label);
String speed = res.get(label);
boolean makeSpeed = false;
if (speed == null)
{
@ -227,7 +224,7 @@ public class TestData
// also sum by task
String task = label.substring(label.lastIndexOf("-")+1);
LDCounter ldc = (LDCounter) resByTask.get(task);
LDCounter ldc = resByTask.get(task);
if (ldc==null) {
ldc = new LDCounter();
resByTask.put(task,ldc);
@ -244,7 +241,7 @@ public class TestData
it = res.keySet().iterator();
while (it.hasNext())
{
String label = (String) it.next();
String label = it.next();
sb.append(format(prefix, ID));
sb.append(format(label, OP));
sb.append(res.get(label)).append("\n");
@ -258,8 +255,8 @@ public class TestData
it = resByTask.keySet().iterator();
while (it.hasNext())
{
String task = (String) it.next();
LDCounter ldc = (LDCounter) resByTask.get(task);
String task = it.next();
LDCounter ldc = resByTask.get(task);
sb.append(format(" ", ID));
sb.append(format(task, OP));
sb.append(format(ldc.Dcount, RUNCNT));
@ -309,7 +306,7 @@ public class TestData
*/
public static TestData[] getAll(File[] sources, Analyzer[] analyzers)
{
List res = new ArrayList(50);
List<TestData> res = new ArrayList<TestData>(50);
TestData ref = new TestData();
for (int q = 0; q < analyzers.length; q++)
{
@ -332,7 +329,7 @@ public class TestData
ref.optimize = Constants.BOOLEANS[p].booleanValue();
try
{
res.add(ref.clone());
res.add((TestData)ref.clone());
}
catch (Exception e)
{
@ -344,7 +341,7 @@ public class TestData
}
}
}
return (TestData[]) res.toArray(new TestData[0]);
return res.toArray(new TestData[0]);
}
/**
@ -358,7 +355,7 @@ public class TestData
*/
public static TestData[] getTestDataMinMaxMergeAndMaxBuffered(File[] sources, Analyzer[] analyzers)
{
List res = new ArrayList(50);
List<TestData> res = new ArrayList<TestData>(50);
TestData ref = new TestData();
for (int q = 0; q < analyzers.length; q++)
{
@ -373,7 +370,7 @@ public class TestData
ref.optimize = true;
try
{
res.add(ref.clone());
res.add((TestData)ref.clone());
}
catch (Exception e)
{
@ -388,7 +385,7 @@ public class TestData
ref.optimize = true;
try
{
res.add(ref.clone());
res.add((TestData)ref.clone());
}
catch (Exception e)
{
@ -403,7 +400,7 @@ public class TestData
ref.optimize = true;
try
{
res.add(ref.clone());
res.add((TestData)ref.clone());
}
catch (Exception e)
{
@ -418,7 +415,7 @@ public class TestData
ref.optimize = true;
try
{
res.add(ref.clone());
res.add((TestData)ref.clone());
}
catch (Exception e)
{
@ -426,7 +423,7 @@ public class TestData
}
}
}
return (TestData[]) res.toArray(new TestData[0]);
return res.toArray(new TestData[0]);
}
protected Object clone()
@ -553,12 +550,12 @@ public class TestData
this.queries = queries;
}
public Vector getRunData()
public Vector<TestRunData> getRunData()
{
return runData;
}
public void setRunData(Vector runData)
public void setRunData(Vector<TestRunData> runData)
{
this.runData = runData;
}

View File

@ -33,7 +33,7 @@ public class TestRunData {
/** Start and end time of this test run. */
private long start = 0L, end = 0L;
private LinkedHashMap data = new LinkedHashMap();
private LinkedHashMap<String,Vector<TimeData>> data = new LinkedHashMap<String,Vector<TimeData>>();
public TestRunData() {}
@ -41,7 +41,7 @@ public class TestRunData {
this.id = id;
}
public LinkedHashMap getData()
public LinkedHashMap<String,Vector<TimeData>> getData()
{
return data;
}
@ -79,29 +79,29 @@ public class TestRunData {
/** Add a data point. */
public void addData(TimeData td) {
td.recordMemUsage();
Vector v = (Vector) data.get(td.name);
Vector<TimeData> v = data.get(td.name);
if (v == null) {
v = new Vector();
v = new Vector<TimeData>();
data.put(td.name, v);
}
v.add(td.clone());
v.add((TimeData)td.clone());
}
/** Get a list of all available types of data points. */
public Collection getLabels() {
public Collection<String> getLabels() {
return data.keySet();
}
/** Get total values from all data points of a given type. */
public TimeData getTotals(String label) {
Vector v = (Vector) data.get(label);
Vector<TimeData> v = data.get(label);
if (v == null)
{
return null;
}
TimeData res = new TimeData("TOTAL " + label);
for (int i = 0; i < v.size(); i++) {
TimeData td = (TimeData) v.get(i);
TimeData td = v.get(i);
res.count += td.count;
res.elapsed += td.elapsed;
}
@ -111,12 +111,12 @@ public class TestRunData {
/** Get total values from all data points of all types.
* @return a list of TimeData values for all types.
*/
public Vector getTotals() {
Collection labels = getLabels();
Vector v = new Vector();
Iterator it = labels.iterator();
public Vector<TimeData> getTotals() {
Collection<String> labels = getLabels();
Vector<TimeData> v = new Vector<TimeData>();
Iterator<String> it = labels.iterator();
while (it.hasNext()) {
TimeData td = getTotals((String) it.next());
TimeData td = getTotals(it.next());
v.add(td);
}
return v;
@ -124,7 +124,7 @@ public class TestRunData {
/** Get memory usage stats for a given data type. */
public MemUsage getMemUsage(String label) {
Vector v = (Vector) data.get(label);
Vector<TimeData> v = data.get(label);
if (v == null)
{
return null;
@ -134,7 +134,7 @@ public class TestRunData {
res.minTotal = Long.MAX_VALUE;
long avgFree = 0L, avgTotal = 0L;
for (int i = 0; i < v.size(); i++) {
TimeData td = (TimeData) v.get(i);
TimeData td = v.get(i);
if (res.maxFree < td.freeMem)
{
res.maxFree = td.freeMem;
@ -162,10 +162,7 @@ public class TestRunData {
/** Return a string representation. */
public String toString() {
StringBuffer sb = new StringBuffer();
Collection labels = getLabels();
Iterator it = labels.iterator();
while (it.hasNext()) {
String label = (String) it.next();
for (final String label : getLabels()) {
sb.append(id).append("-").append(label).append(" ").append(getTotals(label).toString(false)).append(" ");
sb.append(getMemUsage(label).toScaledString(1024 * 1024, "MB")).append("\n");
}

View File

@ -20,12 +20,18 @@ package org.apache.lucene.benchmark.utils;
import java.io.IOException;
import java.util.List;
import org.apache.lucene.index.IndexCommit;
import org.apache.lucene.index.IndexDeletionPolicy;
public class NoDeletionPolicy implements IndexDeletionPolicy {
public void onCommit(List commits) throws IOException {
public void onCommit(List<? extends IndexCommit> commits) throws IOException {
// TODO Auto-generated method stub
}
public void onInit(List commits) throws IOException {
public void onInit(List<? extends IndexCommit> commits) throws IOException {
// TODO Auto-generated method stub
}
}

View File

@ -17,7 +17,6 @@ package org.apache.lucene.analysis.standard;
* limitations under the License.
*/
import org.apache.lucene.analysis.Token;
import org.apache.lucene.analysis.TokenFilter;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.tokenattributes.TermAttribute;

View File

@ -136,7 +136,7 @@ class BufferedDeletes {
if (queries.size() > 0) {
newDeleteQueries = new HashMap<Query, Integer>(queries.size());
for(Entry<Query,Integer> entry: queries.entrySet()) {
Integer num = (Integer) entry.getValue();
Integer num = entry.getValue();
newDeleteQueries.put(entry.getKey(),
Integer.valueOf(mapper.remap(num.intValue())));
}

View File

@ -129,7 +129,7 @@ class CompoundFileReader extends Directory {
if (stream == null)
throw new IOException("Stream closed");
FileEntry entry = (FileEntry) entries.get(id);
FileEntry entry = entries.get(id);
if (entry == null)
throw new IOException("No sub-file with id " + id + " found");
@ -139,7 +139,7 @@ class CompoundFileReader extends Directory {
/** Returns an array of strings, one for each file in the directory. */
public String[] listAll() {
String res[] = new String[entries.size()];
return (String[]) entries.keySet().toArray(res);
return entries.keySet().toArray(res);
}
/** Returns true iff a file with the given name exists. */
@ -176,7 +176,7 @@ class CompoundFileReader extends Directory {
public long fileLength(String name)
throws IOException
{
FileEntry e = (FileEntry) entries.get(name);
FileEntry e = entries.get(name);
if (e == null)
throw new IOException("File " + name + " does not exist");
return e.length;

View File

@ -88,7 +88,7 @@ public class ConcurrentMergeScheduler extends MergeScheduler {
final int numThreads = mergeThreadCount();
for(int i=0;i<numThreads;i++) {
MergeThread merge = (MergeThread) mergeThreads.get(i);
MergeThread merge = mergeThreads.get(i);
merge.setThreadPriority(pri);
}
}
@ -123,7 +123,7 @@ public class ConcurrentMergeScheduler extends MergeScheduler {
final int count = mergeThreads.size();
if (verbose()) {
for(int i=0;i<count;i++)
message(" " + i + ": " + ((MergeThread) mergeThreads.get(i)));
message(" " + i + ": " + mergeThreads.get(i));
}
try {
@ -141,7 +141,7 @@ public class ConcurrentMergeScheduler extends MergeScheduler {
int count = 0;
final int numThreads = mergeThreads.size();
for(int i=0;i<numThreads;i++)
if (((MergeThread) mergeThreads.get(i)).isAlive())
if (mergeThreads.get(i).isAlive())
count++;
return count;
}

View File

@ -207,7 +207,7 @@ class DirectoryReader extends IndexReader implements Cloneable {
for (int i = infos.size() - 1; i>=0; i--) {
// find SegmentReader for this segment
Integer oldReaderIndex = (Integer) segmentReaders.get(infos.info(i).name);
Integer oldReaderIndex = segmentReaders.get(infos.info(i).name);
if (oldReaderIndex == null) {
// this is a new segment, no old SegmentReader can be reused
newReaders[i] = null;
@ -268,17 +268,17 @@ class DirectoryReader extends IndexReader implements Cloneable {
// try to copy unchanged norms from the old normsCache to the new one
if (oldNormsCache != null) {
for (Map.Entry<String,byte[]> entry: oldNormsCache.entrySet()) {
String field = (String) entry.getKey();
String field = entry.getKey();
if (!hasNorms(field)) {
continue;
}
byte[] oldBytes = (byte[]) entry.getValue();
byte[] oldBytes = entry.getValue();
byte[] bytes = new byte[maxDoc()];
for (int i = 0; i < subReaders.length; i++) {
Integer oldReaderIndex = ((Integer) segmentReaders.get(subReaders[i].getSegmentName()));
Integer oldReaderIndex = segmentReaders.get(subReaders[i].getSegmentName());
// this SegmentReader was not re-opened, we can copy all of its norms
if (oldReaderIndex != null &&
@ -394,14 +394,14 @@ class DirectoryReader extends IndexReader implements Cloneable {
assert isCurrent();
if (openReadOnly) {
return (IndexReader) clone(openReadOnly);
return clone(openReadOnly);
} else {
return this;
}
} else if (isCurrent()) {
if (openReadOnly != readOnly) {
// Just fallback to clone
return (IndexReader) clone(openReadOnly);
return clone(openReadOnly);
} else {
return this;
}
@ -412,7 +412,7 @@ class DirectoryReader extends IndexReader implements Cloneable {
if (segmentInfos != null && commit.getSegmentsFileName().equals(segmentInfos.getCurrentSegmentFileName())) {
if (readOnly != openReadOnly) {
// Just fallback to clone
return (IndexReader) clone(openReadOnly);
return clone(openReadOnly);
} else {
return this;
}
@ -563,7 +563,7 @@ class DirectoryReader extends IndexReader implements Cloneable {
public synchronized byte[] norms(String field) throws IOException {
ensureOpen();
byte[] bytes = (byte[])normsCache.get(field);
byte[] bytes = normsCache.get(field);
if (bytes != null)
return bytes; // cache hit
if (!hasNorms(field))
@ -579,7 +579,7 @@ class DirectoryReader extends IndexReader implements Cloneable {
public synchronized void norms(String field, byte[] result, int offset)
throws IOException {
ensureOpen();
byte[] bytes = (byte[])normsCache.get(field);
byte[] bytes = normsCache.get(field);
if (bytes==null && !hasNorms(field)) {
Arrays.fill(result, offset, result.length, DefaultSimilarity.encodeNorm(1.0f));
} else if (bytes != null) { // cache hit
@ -977,7 +977,7 @@ class DirectoryReader extends IndexReader implements Cloneable {
int numMatchingSegments = 0;
matchingSegments[0] = null;
SegmentMergeInfo top = (SegmentMergeInfo)queue.top();
SegmentMergeInfo top = queue.top();
if (top == null) {
term = null;
@ -991,7 +991,7 @@ class DirectoryReader extends IndexReader implements Cloneable {
matchingSegments[numMatchingSegments++] = top;
queue.pop();
docFreq += top.termEnum.docFreq(); // increment freq
top = (SegmentMergeInfo)queue.top();
top = queue.top();
}
matchingSegments[numMatchingSegments] = null;
@ -1168,7 +1168,7 @@ class DirectoryReader extends IndexReader implements Cloneable {
}
protected TermDocs termDocs(IndexReader reader) throws IOException {
return (TermDocs)reader.termPositions();
return reader.termPositions();
}
public int nextPosition() throws IOException {

View File

@ -128,7 +128,7 @@ final class DocFieldProcessorPerThread extends DocConsumerPerThread {
}
private void rehash() {
final int newHashSize = (int) (fieldHash.length*2);
final int newHashSize = (fieldHash.length*2);
assert newHashSize > fieldHash.length;
final DocFieldProcessorPerField newHashArray[] = new DocFieldProcessorPerField[newHashSize];
@ -172,7 +172,7 @@ final class DocFieldProcessorPerThread extends DocConsumerPerThread {
// vectors, etc.):
for(int i=0;i<numDocFields;i++) {
Fieldable field = (Fieldable) docFields.get(i);
Fieldable field = docFields.get(i);
final String fieldName = field.name();
// Make sure we have a PerField allocated

View File

@ -666,7 +666,7 @@ final class DocumentsWriter {
// First, find a thread state. If this thread already
// has affinity to a specific ThreadState, use that one
// again.
DocumentsWriterThreadState state = (DocumentsWriterThreadState) threadBindings.get(Thread.currentThread());
DocumentsWriterThreadState state = threadBindings.get(Thread.currentThread());
if (state == null) {
// First time this thread has called us since last

View File

@ -102,7 +102,7 @@ final class FieldInfos {
FieldInfos fis = new FieldInfos();
final int numField = byNumber.size();
for(int i=0;i<numField;i++) {
FieldInfo fi = (FieldInfo) ((FieldInfo) byNumber.get(i)).clone();
FieldInfo fi = (FieldInfo) ( byNumber.get(i)).clone();
fis.byNumber.add(fi);
fis.byName.put(fi.name, fi);
}
@ -259,7 +259,7 @@ final class FieldInfos {
}
public FieldInfo fieldInfo(String fieldName) {
return (FieldInfo) byName.get(fieldName);
return byName.get(fieldName);
}
/**

View File

@ -19,7 +19,6 @@ package org.apache.lucene.index;
import java.io.IOException;
import org.apache.lucene.store.IndexInput;
abstract class FormatPostingsPositionsConsumer {

View File

@ -18,7 +18,7 @@ package org.apache.lucene.index;
*/
import org.apache.lucene.store.IndexOutput;
import org.apache.lucene.store.IndexInput;
import java.io.IOException;

View File

@ -254,7 +254,7 @@ final class IndexFileDeleter {
// First decref all files that had been referred to by
// the now-deleted commits:
for(int i=0;i<size;i++) {
CommitPoint commit = (CommitPoint) commitsToDelete.get(i);
CommitPoint commit = commitsToDelete.get(i);
if (infoStream != null) {
message("deleteCommits: now decRef commit \"" + commit.getSegmentsFileName() + "\"");
}
@ -269,7 +269,7 @@ final class IndexFileDeleter {
int readFrom = 0;
int writeTo = 0;
while(readFrom < size) {
CommitPoint commit = (CommitPoint) commits.get(readFrom);
CommitPoint commit = commits.get(readFrom);
if (!commit.deleted) {
if (writeTo != readFrom) {
commits.set(writeTo, commits.get(readFrom));

View File

@ -590,7 +590,7 @@ public class IndexWriter implements Closeable {
readBufferSize = BufferedIndexInput.BUFFER_SIZE;
}
SegmentReader sr = (SegmentReader) readerMap.get(info);
SegmentReader sr = readerMap.get(info);
if (sr == null) {
// TODO: we may want to avoid doing this while
// synchronized
@ -619,7 +619,7 @@ public class IndexWriter implements Closeable {
// Returns a ref
public synchronized SegmentReader getIfExists(SegmentInfo info) throws IOException {
SegmentReader sr = (SegmentReader) readerMap.get(info);
SegmentReader sr = readerMap.get(info);
if (sr != null) {
sr.incRef();
}
@ -2364,7 +2364,7 @@ public class IndexWriter implements Closeable {
if (spec != null) {
final int numMerges = spec.merges.size();
for(int i=0;i<numMerges;i++)
registerMerge((MergePolicy.OneMerge) spec.merges.get(i));
registerMerge(spec.merges.get(i));
}
}
@ -2385,7 +2385,7 @@ public class IndexWriter implements Closeable {
// if any of them have hit an exception.
running = false;
for(int i=0;i<numMerges;i++) {
final MergePolicy.OneMerge merge = (MergePolicy.OneMerge) spec.merges.get(i);
final MergePolicy.OneMerge merge = spec.merges.get(i);
if (pendingMerges.contains(merge) || runningMerges.contains(merge))
running = true;
Throwable t = merge.getException();
@ -2480,7 +2480,7 @@ public class IndexWriter implements Closeable {
if (spec != null) {
final int numMerges = spec.merges.size();
for(int i=0;i<numMerges;i++) {
final MergePolicy.OneMerge merge = ((MergePolicy.OneMerge) spec.merges.get(i));
final MergePolicy.OneMerge merge = ( spec.merges.get(i));
merge.optimize = true;
merge.maxNumSegmentsOptimize = maxNumSegmentsOptimize;
}
@ -2492,7 +2492,7 @@ public class IndexWriter implements Closeable {
if (spec != null) {
final int numMerges = spec.merges.size();
for(int i=0;i<numMerges;i++)
registerMerge((MergePolicy.OneMerge) spec.merges.get(i));
registerMerge(spec.merges.get(i));
}
}
@ -2504,7 +2504,7 @@ public class IndexWriter implements Closeable {
return null;
else {
// Advance the merge from pending to running
MergePolicy.OneMerge merge = (MergePolicy.OneMerge) pendingMerges.removeFirst();
MergePolicy.OneMerge merge = pendingMerges.removeFirst();
runningMerges.add(merge);
return merge;
}
@ -2518,7 +2518,7 @@ public class IndexWriter implements Closeable {
else {
Iterator<MergePolicy.OneMerge> it = pendingMerges.iterator();
while(it.hasNext()) {
MergePolicy.OneMerge merge = (MergePolicy.OneMerge) it.next();
MergePolicy.OneMerge merge = it.next();
if (merge.isExternal) {
// Advance the merge from pending to running
it.remove();

View File

@ -163,7 +163,7 @@ public abstract class MergePolicy implements java.io.Closeable {
b.append("MergeSpec:\n");
final int count = merges.size();
for(int i=0;i<count;i++)
b.append(" ").append(1 + i).append(": ").append(((OneMerge) merges.get(i)).segString(dir));
b.append(" ").append(1 + i).append(": ").append(merges.get(i).segString(dir));
return b.toString();
}
}

View File

@ -67,7 +67,7 @@ public class MultiReader extends IndexReader implements Cloneable {
}
private void initialize(IndexReader[] subReaders, boolean closeSubReaders) {
this.subReaders = (IndexReader[]) subReaders.clone();
this.subReaders = subReaders.clone();
starts = new int[subReaders.length + 1]; // build starts array
decrefOnClose = new boolean[subReaders.length];
for (int i = 0; i < subReaders.length; i++) {

View File

@ -143,7 +143,7 @@ public class MultipleTermPositions implements TermPositions {
public final boolean skipTo(int target) throws IOException {
while (_termPositionsQueue.peek() != null && target > _termPositionsQueue.peek().doc()) {
TermPositions tp = (TermPositions) _termPositionsQueue.pop();
TermPositions tp = _termPositionsQueue.pop();
if (tp.skipTo(target))
_termPositionsQueue.add(tp);
else
@ -162,7 +162,7 @@ public class MultipleTermPositions implements TermPositions {
public final void close() throws IOException {
while (_termPositionsQueue.size() > 0)
((TermPositions) _termPositionsQueue.pop()).close();
_termPositionsQueue.pop().close();
}
/**

View File

@ -195,8 +195,8 @@ public class ParallelReader extends IndexReader {
List<Boolean> newDecrefOnClose = new ArrayList<Boolean>();
ParallelReader pr = new ParallelReader();
for (int i = 0; i < readers.size(); i++) {
IndexReader oldReader = (IndexReader) readers.get(i);
IndexReader newReader = (IndexReader) newReaders.get(i);
IndexReader oldReader = readers.get(i);
IndexReader newReader = newReaders.get(i);
if (newReader == oldReader) {
newDecrefOnClose.add(Boolean.TRUE);
newReader.incRef();
@ -293,8 +293,7 @@ public class ParallelReader extends IndexReader {
if (vector != null)
results.add(vector);
}
return (TermFreqVector[])
results.toArray(new TermFreqVector[results.size()]);
return results.toArray(new TermFreqVector[results.size()]);
}
public TermFreqVector getTermFreqVector(int n, String field)

View File

@ -70,7 +70,7 @@ public class PositionBasedTermVectorMapper extends TermVectorMapper{
public void map(String term, int frequency, TermVectorOffsetInfo[] offsets, int[] positions) {
for (int i = 0; i < positions.length; i++) {
Integer posVal = Integer.valueOf(positions[i]);
TVPositionInfo pos = (TVPositionInfo) currentPositions.get(posVal);
TVPositionInfo pos = currentPositions.get(posVal);
if (pos == null) {
pos = new TVPositionInfo(positions[i], storeOffsets);
currentPositions.put(posVal, pos);

View File

@ -264,7 +264,7 @@ public final class SegmentInfo {
final int size = files.size();
sizeInBytes = 0;
for(int i=0;i<size;i++) {
final String fileName = (String) files.get(i);
final String fileName = files.get(i);
// We don't count bytes used by a shared doc store
// against this segment:
if (docStoreOffset == -1 || !IndexFileNames.isDocStoreFile(fileName))

View File

@ -35,7 +35,7 @@ final class SegmentMergeQueue extends PriorityQueue<SegmentMergeInfo> {
final void close() throws IOException {
while (top() != null)
((SegmentMergeInfo)pop()).close();
pop().close();
}
}

View File

@ -115,7 +115,7 @@ final class SegmentMerger {
* @return The ith reader to be merged
*/
final IndexReader segmentReader(int i) {
return (IndexReader) readers.get(i);
return readers.get(i);
}
/**
@ -244,7 +244,7 @@ final class SegmentMerger {
// FieldInfos, then we can do a bulk copy of the
// stored fields:
for (int i = 0; i < numReaders; i++) {
IndexReader reader = (IndexReader) readers.get(i);
IndexReader reader = readers.get(i);
if (reader instanceof SegmentReader) {
SegmentReader segmentReader = (SegmentReader) reader;
boolean same = true;
@ -576,7 +576,7 @@ final class SegmentMerger {
int base = 0;
final int readerCount = readers.size();
for (int i = 0; i < readerCount; i++) {
IndexReader reader = (IndexReader) readers.get(i);
IndexReader reader = readers.get(i);
TermEnum termEnum = reader.terms();
SegmentMergeInfo smi = new SegmentMergeInfo(base, termEnum, reader);
int[] docMap = smi.getDocMap();
@ -606,13 +606,13 @@ final class SegmentMerger {
while (queue.size() > 0) {
int matchSize = 0; // pop matching terms
match[matchSize++] = (SegmentMergeInfo) queue.pop();
match[matchSize++] = queue.pop();
Term term = match[0].term;
SegmentMergeInfo top = (SegmentMergeInfo) queue.top();
SegmentMergeInfo top = queue.top();
while (top != null && term.compareTo(top.term) == 0) {
match[matchSize++] = (SegmentMergeInfo) queue.pop();
top = (SegmentMergeInfo) queue.top();
match[matchSize++] = queue.pop();
top = queue.top();
}
if (currentField != term.field) {

View File

@ -61,7 +61,7 @@ public class SortedTermVectorMapper extends TermVectorMapper{
*/
//We need to combine any previous mentions of the term
public void map(String term, int frequency, TermVectorOffsetInfo[] offsets, int[] positions) {
TermVectorEntry entry = (TermVectorEntry) termToTVE.get(term);
TermVectorEntry entry = termToTVE.get(term);
if (entry == null) {
entry = new TermVectorEntry(ALL, term, frequency,
storeOffsets == true ? offsets : null,