mirror of https://github.com/apache/lucene.git
LUCENE-6662: Fixd potential resource leaks.
This commit is contained in:
parent
98f7ab5137
commit
b6c6d5e9ff
|
@ -7,7 +7,10 @@ http://s.apache.org/luceneversions
|
|||
(No Changes)
|
||||
|
||||
======================= Lucene 6.2.0 =======================
|
||||
(No Changes)
|
||||
|
||||
Bug Fixes
|
||||
|
||||
LUCENE-6662: Fixd potential resource leaks. (Rishabh Patel via Adrien Grand)
|
||||
|
||||
======================= Lucene 6.1.0 =======================
|
||||
|
||||
|
|
|
@ -148,19 +148,17 @@ class WordDictionary extends AbstractDictionary {
|
|||
|
||||
private void loadFromObjectInputStream(InputStream serialObjectInputStream)
|
||||
throws IOException, ClassNotFoundException {
|
||||
ObjectInputStream input = new ObjectInputStream(serialObjectInputStream);
|
||||
try (ObjectInputStream input = new ObjectInputStream(serialObjectInputStream)) {
|
||||
wordIndexTable = (short[]) input.readObject();
|
||||
charIndexTable = (char[]) input.readObject();
|
||||
wordItem_charArrayTable = (char[][][]) input.readObject();
|
||||
wordItem_frequencyTable = (int[][]) input.readObject();
|
||||
// log.info("load core dict from serialization.");
|
||||
input.close();
|
||||
}
|
||||
}
|
||||
|
||||
private void saveToObj(Path serialObj) {
|
||||
try {
|
||||
ObjectOutputStream output = new ObjectOutputStream(Files.newOutputStream(
|
||||
serialObj));
|
||||
try (ObjectOutputStream output = new ObjectOutputStream(Files.newOutputStream(serialObj))) {
|
||||
output.writeObject(wordIndexTable);
|
||||
output.writeObject(charIndexTable);
|
||||
output.writeObject(wordItem_charArrayTable);
|
||||
|
|
|
@ -117,14 +117,13 @@ public class Compile {
|
|||
}
|
||||
|
||||
for (int i = 1; i < args.length; i++) {
|
||||
LineNumberReader in;
|
||||
// System.out.println("[" + args[i] + "]");
|
||||
Diff diff = new Diff();
|
||||
|
||||
allocTrie();
|
||||
|
||||
System.out.println(args[i]);
|
||||
in = new LineNumberReader(Files.newBufferedReader(Paths.get(args[i]), Charset.forName(charset)));
|
||||
try (LineNumberReader in = new LineNumberReader(Files.newBufferedReader(Paths.get(args[i]), Charset.forName(charset)))) {
|
||||
for (String line = in.readLine(); line != null; line = in.readLine()) {
|
||||
try {
|
||||
line = line.toLowerCase(Locale.ROOT);
|
||||
|
@ -143,7 +142,7 @@ public class Compile {
|
|||
// no base token (stem) on a line
|
||||
}
|
||||
}
|
||||
in.close();
|
||||
}
|
||||
|
||||
Optimizer o = new Optimizer();
|
||||
Optimizer2 o2 = new Optimizer2();
|
||||
|
@ -180,11 +179,11 @@ public class Compile {
|
|||
trie.printInfo(System.out, prefix + " ");
|
||||
}
|
||||
|
||||
DataOutputStream os = new DataOutputStream(new BufferedOutputStream(
|
||||
Files.newOutputStream(Paths.get(args[i] + ".out"))));
|
||||
try (DataOutputStream os = new DataOutputStream(new BufferedOutputStream(
|
||||
Files.newOutputStream(Paths.get(args[i] + ".out"))))) {
|
||||
os.writeUTF(args[0]);
|
||||
trie.store(os);
|
||||
os.close();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -98,11 +98,10 @@ public class DiffIt {
|
|||
int nop = get(3, args[0]);
|
||||
|
||||
for (int i = 1; i < args.length; i++) {
|
||||
LineNumberReader in;
|
||||
// System.out.println("[" + args[i] + "]");
|
||||
Diff diff = new Diff(ins, del, rep, nop);
|
||||
String charset = System.getProperty("egothor.stemmer.charset", "UTF-8");
|
||||
in = new LineNumberReader(Files.newBufferedReader(Paths.get(args[i]), Charset.forName(charset)));
|
||||
try (LineNumberReader in = new LineNumberReader(Files.newBufferedReader(Paths.get(args[i]), Charset.forName(charset)))) {
|
||||
for (String line = in.readLine(); line != null; line = in.readLine()) {
|
||||
try {
|
||||
line = line.toLowerCase(Locale.ROOT);
|
||||
|
@ -121,4 +120,5 @@ public class DiffIt {
|
|||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -34,12 +34,12 @@ public class CloseTaxonomyReaderTask extends PerfTask {
|
|||
|
||||
@Override
|
||||
public int doLogic() throws IOException {
|
||||
TaxonomyReader taxoReader = getRunData().getTaxonomyReader();
|
||||
try (TaxonomyReader taxoReader = getRunData().getTaxonomyReader()) {
|
||||
getRunData().setTaxonomyReader(null);
|
||||
if (taxoReader.getRefCount() != 1) {
|
||||
System.out.println("WARNING: CloseTaxonomyReader: reference count is currently " + taxoReader.getRefCount());
|
||||
}
|
||||
taxoReader.close();
|
||||
}
|
||||
return 1;
|
||||
}
|
||||
|
||||
|
|
|
@ -902,8 +902,8 @@ public class DirectoryTaxonomyWriter implements TaxonomyWriter {
|
|||
return map;
|
||||
}
|
||||
addDone(); // in case this wasn't previously called
|
||||
DataInputStream in = new DataInputStream(new BufferedInputStream(
|
||||
Files.newInputStream(tmpfile)));
|
||||
try (DataInputStream in = new DataInputStream(new BufferedInputStream(
|
||||
Files.newInputStream(tmpfile)))) {
|
||||
map = new int[in.readInt()];
|
||||
// NOTE: The current code assumes here that the map is complete,
|
||||
// i.e., every ordinal gets one and exactly one value. Otherwise,
|
||||
|
@ -913,7 +913,7 @@ public class DirectoryTaxonomyWriter implements TaxonomyWriter {
|
|||
int newordinal = in.readInt();
|
||||
map[origordinal] = newordinal;
|
||||
}
|
||||
in.close();
|
||||
}
|
||||
|
||||
// Delete the temporary file, which is no longer needed.
|
||||
Files.delete(tmpfile);
|
||||
|
|
|
@ -231,6 +231,7 @@ public class JaspellTernarySearchTrie implements Accountable {
|
|||
in = new BufferedReader(IOUtils.getDecodingReader(new GZIPInputStream(
|
||||
Files.newInputStream(file)), StandardCharsets.UTF_8));
|
||||
else in = Files.newBufferedReader(file, StandardCharsets.UTF_8);
|
||||
try {
|
||||
String word;
|
||||
int pos;
|
||||
Float occur, one = new Float(1);
|
||||
|
@ -275,7 +276,9 @@ public class JaspellTernarySearchTrie implements Accountable {
|
|||
currentNode.data = occur;
|
||||
}
|
||||
}
|
||||
in.close();
|
||||
} finally {
|
||||
IOUtils.close(in);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
Loading…
Reference in New Issue