SOLR-7284: HdfsUpdateLog is using hdfs FileSystem.get without turning off the cache.

SOLR-7286: Using HDFS's FileSystem.newInstance does not guarantee a new instance.

git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1668411 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Mark Robert Miller 2015-03-22 17:09:40 +00:00
parent 8edc462ec1
commit 4e01da3843
8 changed files with 20 additions and 10 deletions

View File

@ -276,6 +276,12 @@ Bug Fixes
* SOLR-7141: RecoveryStrategy: Raise time that we wait for any updates from the leader before
they saw the recovery state to have finished. (Mark Miller)
* SOLR-7284: HdfsUpdateLog is using hdfs FileSystem.get without turning off the cache.
(Mark Miller)
* SOLR-7286: Using HDFS's FileSystem.newInstance does not guarantee a new instance.
(Mark Miller)
Optimizations
----------------------

View File

@ -261,7 +261,7 @@ public class HdfsDirectoryFactory extends CachingDirectoryFactory implements Sol
Configuration conf = getConf();
FileSystem fileSystem = null;
try {
fileSystem = FileSystem.newInstance(hdfsDirPath.toUri(), conf);
fileSystem = FileSystem.get(hdfsDirPath.toUri(), conf);
return fileSystem.exists(hdfsDirPath);
} catch (IOException e) {
LOG.error("Error checking if hdfs path exists", e);
@ -275,6 +275,7 @@ public class HdfsDirectoryFactory extends CachingDirectoryFactory implements Sol
Configuration conf = new Configuration();
confDir = getConfig(CONFIG_DIRECTORY, null);
HdfsUtil.addHdfsResources(conf, confDir);
conf.setBoolean("fs.hdfs.impl.disable.cache", true);
return conf;
}
@ -283,7 +284,7 @@ public class HdfsDirectoryFactory extends CachingDirectoryFactory implements Sol
Configuration conf = getConf();
FileSystem fileSystem = null;
try {
fileSystem = FileSystem.newInstance(new URI(cacheValue.path), conf);
fileSystem = FileSystem.get(new URI(cacheValue.path), conf);
boolean success = fileSystem.delete(new Path(cacheValue.path), true);
if (!success) {
throw new RuntimeException("Could not remove directory");

View File

@ -60,7 +60,7 @@ public class HdfsDirectory extends BaseDirectory {
super(lockFactory);
this.hdfsDirPath = hdfsDirPath;
this.configuration = configuration;
fileSystem = FileSystem.newInstance(hdfsDirPath.toUri(), configuration);
fileSystem = FileSystem.get(hdfsDirPath.toUri(), configuration);
fileContext = FileContext.getFileContext(hdfsDirPath.toUri(), configuration);
while (true) {

View File

@ -64,7 +64,7 @@ public class HdfsLockFactory extends LockFactory {
@Override
public boolean obtain() throws IOException {
FSDataOutputStream file = null;
FileSystem fs = FileSystem.newInstance(lockPath.toUri(), conf);
FileSystem fs = FileSystem.get(lockPath.toUri(), conf);
try {
while (true) {
try {
@ -111,7 +111,7 @@ public class HdfsLockFactory extends LockFactory {
@Override
public void close() throws IOException {
FileSystem fs = FileSystem.newInstance(lockPath.toUri(), conf);
FileSystem fs = FileSystem.get(lockPath.toUri(), conf);
try {
if (fs.exists(new Path(lockPath, lockName))
&& !fs.delete(new Path(lockPath, lockName), false)) throw new LockReleaseFailedException(
@ -124,7 +124,7 @@ public class HdfsLockFactory extends LockFactory {
@Override
public boolean isLocked() throws IOException {
boolean isLocked = false;
FileSystem fs = FileSystem.newInstance(lockPath.toUri(), conf);
FileSystem fs = FileSystem.get(lockPath.toUri(), conf);
try {
isLocked = fs.exists(new Path(lockPath, lockName));
} finally {

View File

@ -97,7 +97,7 @@ public class HdfsUpdateLog extends UpdateLog {
if (confDir != null) {
HdfsUtil.addHdfsResources(conf, confDir);
}
conf.setBoolean("fs.hdfs.impl.disable.cache", true);
return conf;
}

View File

@ -64,7 +64,7 @@ public class HdfsTestUtil {
conf.set("hadoop.security.authentication", "simple");
conf.set("hdfs.minidfs.basedir", dir + File.separator + "hdfsBaseDir");
conf.set("dfs.namenode.name.dir", dir + File.separator + "nameNodeNameDir");
conf.setBoolean("fs.hdfs.impl.disable.cache", true);
System.setProperty("test.build.data", dir + File.separator + "hdfs" + File.separator + "build");
System.setProperty("test.cache.data", dir + File.separator + "hdfs" + File.separator + "cache");

View File

@ -215,7 +215,8 @@ public class StressHdfsTest extends BasicDistributedZkTest {
// check that all dirs are gone
for (String dataDir : dataDirs) {
Configuration conf = new Configuration();
FileSystem fs = FileSystem.newInstance(new URI(dataDir), conf);
conf.setBoolean("fs.hdfs.impl.disable.cache", true);
FileSystem fs = FileSystem.get(new URI(dataDir), conf);
assertFalse(
"Data directory exists after collection removal : " + dataDir,
fs.exists(new Path(dataDir)));

View File

@ -81,7 +81,9 @@ public class TestRecoveryHdfs extends SolrTestCaseJ4 {
try {
URI uri = new URI(hdfsUri);
fs = FileSystem.newInstance(uri, new Configuration());
Configuration conf = new Configuration();
conf.setBoolean("fs.hdfs.impl.disable.cache", true);
fs = FileSystem.get(uri, conf);
} catch (IOException | URISyntaxException e) {
throw new RuntimeException(e);
}