LUCENE-6662: Fixd potential resource leaks.

This commit is contained in:
Adrien Grand 2016-06-09 15:48:10 +02:00
parent 98f7ab5137
commit b6c6d5e9ff
7 changed files with 106 additions and 103 deletions

View File

@ -7,7 +7,10 @@ http://s.apache.org/luceneversions
(No Changes)
======================= Lucene 6.2.0 =======================
(No Changes)
Bug Fixes
LUCENE-6662: Fixd potential resource leaks. (Rishabh Patel via Adrien Grand)
======================= Lucene 6.1.0 =======================

View File

@ -148,19 +148,17 @@ class WordDictionary extends AbstractDictionary {
private void loadFromObjectInputStream(InputStream serialObjectInputStream)
throws IOException, ClassNotFoundException {
ObjectInputStream input = new ObjectInputStream(serialObjectInputStream);
wordIndexTable = (short[]) input.readObject();
charIndexTable = (char[]) input.readObject();
wordItem_charArrayTable = (char[][][]) input.readObject();
wordItem_frequencyTable = (int[][]) input.readObject();
// log.info("load core dict from serialization.");
input.close();
try (ObjectInputStream input = new ObjectInputStream(serialObjectInputStream)) {
wordIndexTable = (short[]) input.readObject();
charIndexTable = (char[]) input.readObject();
wordItem_charArrayTable = (char[][][]) input.readObject();
wordItem_frequencyTable = (int[][]) input.readObject();
// log.info("load core dict from serialization.");
}
}
private void saveToObj(Path serialObj) {
try {
ObjectOutputStream output = new ObjectOutputStream(Files.newOutputStream(
serialObj));
try (ObjectOutputStream output = new ObjectOutputStream(Files.newOutputStream(serialObj))) {
output.writeObject(wordIndexTable);
output.writeObject(charIndexTable);
output.writeObject(wordItem_charArrayTable);

View File

@ -117,33 +117,32 @@ public class Compile {
}
for (int i = 1; i < args.length; i++) {
LineNumberReader in;
// System.out.println("[" + args[i] + "]");
Diff diff = new Diff();
allocTrie();
System.out.println(args[i]);
in = new LineNumberReader(Files.newBufferedReader(Paths.get(args[i]), Charset.forName(charset)));
for (String line = in.readLine(); line != null; line = in.readLine()) {
try {
line = line.toLowerCase(Locale.ROOT);
StringTokenizer st = new StringTokenizer(line);
String stem = st.nextToken();
if (storeorig) {
trie.add(stem, "-a");
}
while (st.hasMoreTokens()) {
String token = st.nextToken();
if (token.equals(stem) == false) {
trie.add(token, diff.exec(token, stem));
try (LineNumberReader in = new LineNumberReader(Files.newBufferedReader(Paths.get(args[i]), Charset.forName(charset)))) {
for (String line = in.readLine(); line != null; line = in.readLine()) {
try {
line = line.toLowerCase(Locale.ROOT);
StringTokenizer st = new StringTokenizer(line);
String stem = st.nextToken();
if (storeorig) {
trie.add(stem, "-a");
}
while (st.hasMoreTokens()) {
String token = st.nextToken();
if (token.equals(stem) == false) {
trie.add(token, diff.exec(token, stem));
}
}
} catch (java.util.NoSuchElementException x) {
// no base token (stem) on a line
}
} catch (java.util.NoSuchElementException x) {
// no base token (stem) on a line
}
}
in.close();
Optimizer o = new Optimizer();
Optimizer2 o2 = new Optimizer2();
@ -180,11 +179,11 @@ public class Compile {
trie.printInfo(System.out, prefix + " ");
}
DataOutputStream os = new DataOutputStream(new BufferedOutputStream(
Files.newOutputStream(Paths.get(args[i] + ".out"))));
os.writeUTF(args[0]);
trie.store(os);
os.close();
try (DataOutputStream os = new DataOutputStream(new BufferedOutputStream(
Files.newOutputStream(Paths.get(args[i] + ".out"))))) {
os.writeUTF(args[0]);
trie.store(os);
}
}
}

View File

@ -98,25 +98,25 @@ public class DiffIt {
int nop = get(3, args[0]);
for (int i = 1; i < args.length; i++) {
LineNumberReader in;
// System.out.println("[" + args[i] + "]");
Diff diff = new Diff(ins, del, rep, nop);
String charset = System.getProperty("egothor.stemmer.charset", "UTF-8");
in = new LineNumberReader(Files.newBufferedReader(Paths.get(args[i]), Charset.forName(charset)));
for (String line = in.readLine(); line != null; line = in.readLine()) {
try {
line = line.toLowerCase(Locale.ROOT);
StringTokenizer st = new StringTokenizer(line);
String stem = st.nextToken();
System.out.println(stem + " -a");
while (st.hasMoreTokens()) {
String token = st.nextToken();
if (token.equals(stem) == false) {
System.out.println(stem + " " + diff.exec(token, stem));
try (LineNumberReader in = new LineNumberReader(Files.newBufferedReader(Paths.get(args[i]), Charset.forName(charset)))) {
for (String line = in.readLine(); line != null; line = in.readLine()) {
try {
line = line.toLowerCase(Locale.ROOT);
StringTokenizer st = new StringTokenizer(line);
String stem = st.nextToken();
System.out.println(stem + " -a");
while (st.hasMoreTokens()) {
String token = st.nextToken();
if (token.equals(stem) == false) {
System.out.println(stem + " " + diff.exec(token, stem));
}
}
} catch (java.util.NoSuchElementException x) {
// no base token (stem) on a line
}
} catch (java.util.NoSuchElementException x) {
// no base token (stem) on a line
}
}
}

View File

@ -34,12 +34,12 @@ public class CloseTaxonomyReaderTask extends PerfTask {
@Override
public int doLogic() throws IOException {
TaxonomyReader taxoReader = getRunData().getTaxonomyReader();
getRunData().setTaxonomyReader(null);
if (taxoReader.getRefCount() != 1) {
System.out.println("WARNING: CloseTaxonomyReader: reference count is currently " + taxoReader.getRefCount());
try (TaxonomyReader taxoReader = getRunData().getTaxonomyReader()) {
getRunData().setTaxonomyReader(null);
if (taxoReader.getRefCount() != 1) {
System.out.println("WARNING: CloseTaxonomyReader: reference count is currently " + taxoReader.getRefCount());
}
}
taxoReader.close();
return 1;
}

View File

@ -902,18 +902,18 @@ public class DirectoryTaxonomyWriter implements TaxonomyWriter {
return map;
}
addDone(); // in case this wasn't previously called
DataInputStream in = new DataInputStream(new BufferedInputStream(
Files.newInputStream(tmpfile)));
map = new int[in.readInt()];
// NOTE: The current code assumes here that the map is complete,
// i.e., every ordinal gets one and exactly one value. Otherwise,
// we may run into an EOF here, or vice versa, not read everything.
for (int i=0; i<map.length; i++) {
int origordinal = in.readInt();
int newordinal = in.readInt();
map[origordinal] = newordinal;
try (DataInputStream in = new DataInputStream(new BufferedInputStream(
Files.newInputStream(tmpfile)))) {
map = new int[in.readInt()];
// NOTE: The current code assumes here that the map is complete,
// i.e., every ordinal gets one and exactly one value. Otherwise,
// we may run into an EOF here, or vice versa, not read everything.
for (int i=0; i<map.length; i++) {
int origordinal = in.readInt();
int newordinal = in.readInt();
map[origordinal] = newordinal;
}
}
in.close();
// Delete the temporary file, which is no longer needed.
Files.delete(tmpfile);

View File

@ -231,51 +231,54 @@ public class JaspellTernarySearchTrie implements Accountable {
in = new BufferedReader(IOUtils.getDecodingReader(new GZIPInputStream(
Files.newInputStream(file)), StandardCharsets.UTF_8));
else in = Files.newBufferedReader(file, StandardCharsets.UTF_8);
String word;
int pos;
Float occur, one = new Float(1);
while ((word = in.readLine()) != null) {
pos = word.indexOf("\t");
occur = one;
if (pos != -1) {
occur = Float.parseFloat(word.substring(pos + 1).trim());
word = word.substring(0, pos);
}
String key = word.toLowerCase(locale);
if (rootNode == null) {
rootNode = new TSTNode(key.charAt(0), null);
}
TSTNode node = null;
if (key.length() > 0 && rootNode != null) {
TSTNode currentNode = rootNode;
int charIndex = 0;
while (true) {
if (currentNode == null) break;
int charComp = compareCharsAlphabetically(key.charAt(charIndex),
currentNode.splitchar);
if (charComp == 0) {
charIndex++;
if (charIndex == key.length()) {
node = currentNode;
break;
try {
String word;
int pos;
Float occur, one = new Float(1);
while ((word = in.readLine()) != null) {
pos = word.indexOf("\t");
occur = one;
if (pos != -1) {
occur = Float.parseFloat(word.substring(pos + 1).trim());
word = word.substring(0, pos);
}
String key = word.toLowerCase(locale);
if (rootNode == null) {
rootNode = new TSTNode(key.charAt(0), null);
}
TSTNode node = null;
if (key.length() > 0 && rootNode != null) {
TSTNode currentNode = rootNode;
int charIndex = 0;
while (true) {
if (currentNode == null) break;
int charComp = compareCharsAlphabetically(key.charAt(charIndex),
currentNode.splitchar);
if (charComp == 0) {
charIndex++;
if (charIndex == key.length()) {
node = currentNode;
break;
}
currentNode = currentNode.relatives[TSTNode.EQKID];
} else if (charComp < 0) {
currentNode = currentNode.relatives[TSTNode.LOKID];
} else {
currentNode = currentNode.relatives[TSTNode.HIKID];
}
currentNode = currentNode.relatives[TSTNode.EQKID];
} else if (charComp < 0) {
currentNode = currentNode.relatives[TSTNode.LOKID];
} else {
currentNode = currentNode.relatives[TSTNode.HIKID];
}
Float occur2 = null;
if (node != null) occur2 = ((Float) (node.data));
if (occur2 != null) {
occur += occur2.floatValue();
}
currentNode = getOrCreateNode(word.trim().toLowerCase(locale));
currentNode.data = occur;
}
Float occur2 = null;
if (node != null) occur2 = ((Float) (node.data));
if (occur2 != null) {
occur += occur2.floatValue();
}
currentNode = getOrCreateNode(word.trim().toLowerCase(locale));
currentNode.data = occur;
}
} finally {
IOUtils.close(in);
}
in.close();
}
/**