YARN-5368. Memory leak in timeline server (Jonathan Eagles via Varun Saxena)

This commit is contained in:
Varun Saxena 2017-03-29 01:53:20 +05:30
parent 6b09336438
commit 01aca54a22
1 changed files with 117 additions and 146 deletions

View File

@ -275,9 +275,7 @@ public class RollingLevelDBTimelineStore extends AbstractService implements
Path domainDBPath = new Path(dbPath, DOMAIN); Path domainDBPath = new Path(dbPath, DOMAIN);
Path starttimeDBPath = new Path(dbPath, STARTTIME); Path starttimeDBPath = new Path(dbPath, STARTTIME);
Path ownerDBPath = new Path(dbPath, OWNER); Path ownerDBPath = new Path(dbPath, OWNER);
FileSystem localFS = null; try (FileSystem localFS = FileSystem.getLocal(conf)) {
try {
localFS = FileSystem.getLocal(conf);
if (!localFS.exists(dbPath)) { if (!localFS.exists(dbPath)) {
if (!localFS.mkdirs(dbPath)) { if (!localFS.mkdirs(dbPath)) {
throw new IOException("Couldn't create directory for leveldb " throw new IOException("Couldn't create directory for leveldb "
@ -306,8 +304,6 @@ public class RollingLevelDBTimelineStore extends AbstractService implements
} }
localFS.setPermission(ownerDBPath, LEVELDB_DIR_UMASK); localFS.setPermission(ownerDBPath, LEVELDB_DIR_UMASK);
} }
} finally {
IOUtils.cleanup(LOG, localFS);
} }
options.maxOpenFiles(conf.getInt( options.maxOpenFiles(conf.getInt(
TIMELINE_SERVICE_LEVELDB_MAX_OPEN_FILES, TIMELINE_SERVICE_LEVELDB_MAX_OPEN_FILES,
@ -408,19 +404,15 @@ public class RollingLevelDBTimelineStore extends AbstractService implements
.add(writeReverseOrderedLong(revStartTime)).add(entityId) .add(writeReverseOrderedLong(revStartTime)).add(entityId)
.getBytesForLookup(); .getBytesForLookup();
DBIterator iterator = null;
try {
DB db = entitydb.getDBForStartTime(revStartTime); DB db = entitydb.getDBForStartTime(revStartTime);
if (db == null) { if (db == null) {
return null; return null;
} }
iterator = db.iterator(); try (DBIterator iterator = db.iterator()) {
iterator.seek(prefix); iterator.seek(prefix);
return getEntity(entityId, entityType, revStartTime, fields, iterator, return getEntity(entityId, entityType, revStartTime, fields, iterator,
prefix, prefix.length); prefix, prefix.length);
} finally {
IOUtils.cleanup(LOG, iterator);
} }
} }
@ -533,8 +525,7 @@ public class RollingLevelDBTimelineStore extends AbstractService implements
o2.length); o2.length);
} }
}); });
DBIterator iterator = null;
try {
// look up start times for the specified entities // look up start times for the specified entities
// skip entities with no start time // skip entities with no start time
for (String entityId : entityIds) { for (String entityId : entityIds) {
@ -582,7 +573,7 @@ public class RollingLevelDBTimelineStore extends AbstractService implements
if (db == null) { if (db == null) {
continue; continue;
} }
iterator = db.iterator(); try (DBIterator iterator = db.iterator()) {
for (iterator.seek(first); entity.getEvents().size() < limit for (iterator.seek(first); entity.getEvents().size() < limit
&& iterator.hasNext(); iterator.next()) { && iterator.hasNext(); iterator.next()) {
byte[] key = iterator.peekNext().getKey(); byte[] key = iterator.peekNext().getKey();
@ -599,8 +590,6 @@ public class RollingLevelDBTimelineStore extends AbstractService implements
} }
} }
} }
} finally {
IOUtils.cleanup(LOG, iterator);
} }
return events; return events;
} }
@ -657,8 +646,6 @@ public class RollingLevelDBTimelineStore extends AbstractService implements
Long limit, Long starttime, Long endtime, String fromId, Long fromTs, Long limit, Long starttime, Long endtime, String fromId, Long fromTs,
Collection<NameValuePair> secondaryFilters, EnumSet<Field> fields, Collection<NameValuePair> secondaryFilters, EnumSet<Field> fields,
CheckAcl checkAcl, boolean usingPrimaryFilter) throws IOException { CheckAcl checkAcl, boolean usingPrimaryFilter) throws IOException {
DBIterator iterator = null;
try {
KeyBuilder kb = KeyBuilder.newInstance().add(base).add(entityType); KeyBuilder kb = KeyBuilder.newInstance().add(base).add(entityType);
// only db keys matching the prefix (base + entity type) will be parsed // only db keys matching the prefix (base + entity type) will be parsed
byte[] prefix = kb.getBytesForLookup(); byte[] prefix = kb.getBytesForLookup();
@ -716,7 +703,7 @@ public class RollingLevelDBTimelineStore extends AbstractService implements
DB db = rollingdb.getDBForStartTime(firstStartTime); DB db = rollingdb.getDBForStartTime(firstStartTime);
while (entities.getEntities().size() < limit && db != null) { while (entities.getEntities().size() < limit && db != null) {
iterator = db.iterator(); try (DBIterator iterator = db.iterator()) {
iterator.seek(first); iterator.seek(first);
// iterate until one of the following conditions is met: limit is // iterate until one of the following conditions is met: limit is
@ -814,10 +801,8 @@ public class RollingLevelDBTimelineStore extends AbstractService implements
} }
db = rollingdb.getPreviousDB(db); db = rollingdb.getPreviousDB(db);
} }
return entities;
} finally {
IOUtils.cleanup(LOG, iterator);
} }
return entities;
} }
/** /**
@ -1459,15 +1444,14 @@ public class RollingLevelDBTimelineStore extends AbstractService implements
long startTimesCount = 0; long startTimesCount = 0;
WriteBatch writeBatch = null; WriteBatch writeBatch = null;
DBIterator iterator = null;
try {
writeBatch = starttimedb.createWriteBatch();
ReadOptions readOptions = new ReadOptions(); ReadOptions readOptions = new ReadOptions();
readOptions.fillCache(false); readOptions.fillCache(false);
iterator = starttimedb.iterator(readOptions); try (DBIterator iterator = starttimedb.iterator(readOptions)) {
// seek to the first start time entry // seek to the first start time entry
iterator.seekToFirst(); iterator.seekToFirst();
writeBatch = starttimedb.createWriteBatch();
// evaluate each start time entry to see if it needs to be evicted or not // evaluate each start time entry to see if it needs to be evicted or not
while (iterator.hasNext()) { while (iterator.hasNext()) {
@ -1513,7 +1497,6 @@ public class RollingLevelDBTimelineStore extends AbstractService implements
+ " start time entities earlier than " + minStartTime); + " start time entities earlier than " + minStartTime);
} finally { } finally {
IOUtils.cleanup(LOG, writeBatch); IOUtils.cleanup(LOG, writeBatch);
IOUtils.cleanup(LOG, iterator);
} }
return startTimesCount; return startTimesCount;
} }
@ -1598,11 +1581,9 @@ public class RollingLevelDBTimelineStore extends AbstractService implements
// TODO: make data retention work with the domain data as well // TODO: make data retention work with the domain data as well
@Override @Override
public void put(TimelineDomain domain) throws IOException { public void put(TimelineDomain domain) throws IOException {
WriteBatch domainWriteBatch = null; try (WriteBatch domainWriteBatch = domaindb.createWriteBatch();
WriteBatch ownerWriteBatch = null; WriteBatch ownerWriteBatch = ownerdb.createWriteBatch();) {
try {
domainWriteBatch = domaindb.createWriteBatch();
ownerWriteBatch = ownerdb.createWriteBatch();
if (domain.getId() == null || domain.getId().length() == 0) { if (domain.getId() == null || domain.getId().length() == 0) {
throw new IllegalArgumentException("Domain doesn't have an ID"); throw new IllegalArgumentException("Domain doesn't have an ID");
} }
@ -1682,9 +1663,6 @@ public class RollingLevelDBTimelineStore extends AbstractService implements
ownerWriteBatch.put(ownerLookupEntryKey, timestamps); ownerWriteBatch.put(ownerLookupEntryKey, timestamps);
domaindb.write(domainWriteBatch); domaindb.write(domainWriteBatch);
ownerdb.write(ownerWriteBatch); ownerdb.write(ownerWriteBatch);
} finally {
IOUtils.cleanup(LOG, domainWriteBatch);
IOUtils.cleanup(LOG, ownerWriteBatch);
} }
} }
@ -1709,26 +1687,21 @@ public class RollingLevelDBTimelineStore extends AbstractService implements
@Override @Override
public TimelineDomain getDomain(String domainId) throws IOException { public TimelineDomain getDomain(String domainId) throws IOException {
DBIterator iterator = null; try (DBIterator iterator = domaindb.iterator()) {
try {
byte[] prefix = KeyBuilder.newInstance().add(domainId) byte[] prefix = KeyBuilder.newInstance().add(domainId)
.getBytesForLookup(); .getBytesForLookup();
iterator = domaindb.iterator();
iterator.seek(prefix); iterator.seek(prefix);
return getTimelineDomain(iterator, domainId, prefix); return getTimelineDomain(iterator, domainId, prefix);
} finally {
IOUtils.cleanup(LOG, iterator);
} }
} }
@Override @Override
public TimelineDomains getDomains(String owner) throws IOException { public TimelineDomains getDomains(String owner) throws IOException {
DBIterator iterator = null; try (DBIterator iterator = ownerdb.iterator()) {
try {
byte[] prefix = KeyBuilder.newInstance().add(owner).getBytesForLookup(); byte[] prefix = KeyBuilder.newInstance().add(owner).getBytesForLookup();
iterator.seek(prefix);
List<TimelineDomain> domains = new ArrayList<TimelineDomain>(); List<TimelineDomain> domains = new ArrayList<TimelineDomain>();
for (iterator = ownerdb.iterator(), iterator.seek(prefix); iterator while (iterator.hasNext()) {
.hasNext();) {
byte[] key = iterator.peekNext().getKey(); byte[] key = iterator.peekNext().getKey();
if (!prefixMatches(prefix, prefix.length, key)) { if (!prefixMatches(prefix, prefix.length, key)) {
break; break;
@ -1761,8 +1734,6 @@ public class RollingLevelDBTimelineStore extends AbstractService implements
TimelineDomains domainsToReturn = new TimelineDomains(); TimelineDomains domainsToReturn = new TimelineDomains();
domainsToReturn.addDomains(domains); domainsToReturn.addDomains(domains);
return domainsToReturn; return domainsToReturn;
} finally {
IOUtils.cleanup(LOG, iterator);
} }
} }