SOLR-11873: Use time based expiration cache in all necessary places in HdfsDirectoryFactory.

This commit is contained in:
markrmiller 2018-01-29 09:49:44 -06:00
parent b4baf080e9
commit 13773755b8
2 changed files with 18 additions and 30 deletions

View File

@ -178,6 +178,8 @@ Bug Fixes
* SOLR-10525: Stacked recovery requests do no cancel an in progress recovery first. (Mike Drob via Cao Manh Dat)
* SOLR-11873: Use time based expiration cache in all necessary places in HdfsDirectoryFactory. (Mihaly Toth via Mark Miller)
Optimizations
----------------------

View File

@ -322,14 +322,7 @@ public class HdfsDirectoryFactory extends CachingDirectoryFactory implements Sol
@Override
public boolean exists(String path) {
final Path hdfsDirPath = new Path(path);
final Configuration conf = getConf();
FileSystem fileSystem = null;
try {
// no need to close the fs, the cache will do it
fileSystem = tmpFsCache.get(path, () -> FileSystem.get(hdfsDirPath.toUri(), conf));
} catch (ExecutionException e) {
throw new RuntimeException(e);
}
FileSystem fileSystem = getCachedFileSystem(path);
try {
return fileSystem.exists(hdfsDirPath);
@ -349,16 +342,8 @@ public class HdfsDirectoryFactory extends CachingDirectoryFactory implements Sol
protected synchronized void removeDirectory(final CacheValue cacheValue)
throws IOException {
final Configuration conf = getConf();
FileSystem fileSystem = null;
try {
// no need to close the fs, the cache will do it
fileSystem = tmpFsCache.get(cacheValue.path, () -> FileSystem.get(new Path(cacheValue.path).toUri(), conf));
} catch (ExecutionException e) {
throw new RuntimeException(e);
}
FileSystem fileSystem = getCachedFileSystem(cacheValue.path);
try {
boolean success = fileSystem.delete(new Path(cacheValue.path), true);
if (!success) {
@ -438,11 +423,9 @@ public class HdfsDirectoryFactory extends CachingDirectoryFactory implements Sol
@Override
public long size(String path) throws IOException {
Path hdfsDirPath = new Path(path);
FileSystem fileSystem = null;
FileSystem fileSystem = getCachedFileSystem(path);
try {
fileSystem = FileSystem.newInstance(hdfsDirPath.toUri(), getConf());
long size = fileSystem.getContentSummary(hdfsDirPath).getLength();
return size;
return fileSystem.getContentSummary(hdfsDirPath).getLength();
} catch (IOException e) {
LOG.error("Error checking if hdfs path exists", e);
throw new SolrException(ErrorCode.SERVER_ERROR, "Error checking if hdfs path exists", e);
@ -450,7 +433,16 @@ public class HdfsDirectoryFactory extends CachingDirectoryFactory implements Sol
IOUtils.closeQuietly(fileSystem);
}
}
private FileSystem getCachedFileSystem(String path) {
try {
// no need to close the fs, the cache will do it
return tmpFsCache.get(path, () -> FileSystem.get(new Path(path).toUri(), getConf()));
} catch (ExecutionException e) {
throw new RuntimeException(e);
}
}
public String getConfDir() {
return confDir;
}
@ -515,13 +507,7 @@ public class HdfsDirectoryFactory extends CachingDirectoryFactory implements Sol
// Get the FileSystem object
final Path dataDirPath = new Path(dataDir);
final Configuration conf = getConf();
FileSystem fileSystem = null;
try {
fileSystem = tmpFsCache.get(dataDir, () -> FileSystem.get(dataDirPath.toUri(), conf));
} catch (ExecutionException e) {
throw new RuntimeException(e);
}
FileSystem fileSystem = getCachedFileSystem(dataDir);
boolean pathExists = false;
try {