SOLR-6425: If you using the new global hdfs block cache option, you can end up reading corrupt files on file name reuse.

git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1620236 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Mark Robert Miller 2014-08-24 23:26:33 +00:00
parent c4ef611991
commit 2bb99e83fa
4 changed files with 11 additions and 14 deletions

View File

@ -129,6 +129,9 @@ Bug Fixes
* SOLR-6424: The hdfs block cache BLOCKCACHE_WRITE_ENABLED is not defaulting to false like it
should. (Mark Miller)
* SOLR-6425: If you using the new global hdfs block cache option, you can end up reading corrupt
files on file name reuse. (Mark Miller, Gregory Chanan)
Other Changes
---------------------

View File

@ -81,7 +81,7 @@ public class BlockCache {
}
public void release(BlockCacheKey key) {
releaseLocation(cache.get(key));
releaseLocation(cache.remove(key));
}
private void releaseLocation(BlockCacheLocation location) {

View File

@ -96,11 +96,6 @@ public class BlockDirectory extends Directory {
public BlockDirectory(String dirName, Directory directory, Cache cache,
Set<String> blockCacheFileTypes, boolean blockCacheReadEnabled,
boolean blockCacheWriteEnabled) throws IOException {
this(dirName, directory, cache, blockCacheFileTypes, blockCacheReadEnabled, blockCacheWriteEnabled, false);
}
public BlockDirectory(String dirName, Directory directory, Cache cache,
Set<String> blockCacheFileTypes, boolean blockCacheReadEnabled,
boolean blockCacheWriteEnabled, boolean releaseBlocksOnClose) throws IOException {
this.dirName = dirName;
this.directory = directory;
blockSize = BLOCK_SIZE;
@ -244,11 +239,11 @@ public class BlockDirectory extends Directory {
// segments.gen won't be removed above
cache.delete(dirName + "/" + "segments.gen");
cache.releaseResources();
} catch (FileNotFoundException e) {
// the local file system folder may be gone
} finally {
directory.close();
cache.releaseResources();
}
}

View File

@ -31,7 +31,7 @@ public class BlockDirectoryCache implements Cache {
private final BlockCache blockCache;
private final AtomicInteger counter = new AtomicInteger();
private final Map<String,Integer> names = new ConcurrentHashMap<>();
private Set<BlockCacheKey> keys;
private Set<BlockCacheKey> keysToRelease;
private final String path;
private final Metrics metrics;
@ -44,7 +44,7 @@ public class BlockDirectoryCache implements Cache {
this.path = path;
this.metrics = metrics;
if (releaseBlocks) {
keys = Collections.synchronizedSet(new HashSet<BlockCacheKey>());
keysToRelease = Collections.synchronizedSet(new HashSet<BlockCacheKey>());
}
}
@ -74,9 +74,8 @@ public class BlockDirectoryCache implements Cache {
blockCacheKey.setPath(path);
blockCacheKey.setBlock(blockId);
blockCacheKey.setFile(file);
blockCache.store(blockCacheKey, blockOffset, buffer, offset, length);
if (keys != null) {
keys.add(blockCacheKey);
if (blockCache.store(blockCacheKey, blockOffset, buffer, offset, length) && keysToRelease != null) {
keysToRelease.add(blockCacheKey);
}
}
@ -117,8 +116,8 @@ public class BlockDirectoryCache implements Cache {
@Override
public void releaseResources() {
if (keys != null) {
for (BlockCacheKey key : keys) {
if (keysToRelease != null) {
for (BlockCacheKey key : keysToRelease) {
blockCache.release(key);
}
}