timers = new HashMap<>();
+ private static final Object TIMERS_LOCK = new Object();
private static FSDataOutputStream badTlogOutStream;
@@ -145,7 +146,12 @@ public class HdfsTestUtil {
int rnd = random().nextInt(10000);
Timer timer = new Timer();
- timers.put(dfsCluster, timer);
+ synchronized (TIMERS_LOCK) {
+ if (timers == null) {
+ timers = new HashMap<>();
+ }
+ timers.put(dfsCluster, timer);
+ }
timer.schedule(new TimerTask() {
@Override
@@ -156,7 +162,12 @@ public class HdfsTestUtil {
} else if (haTesting && rndMode == 2) {
int rnd = random().nextInt(30000);
Timer timer = new Timer();
- timers.put(dfsCluster, timer);
+ synchronized (TIMERS_LOCK) {
+ if (timers == null) {
+ timers = new HashMap<>();
+ }
+ timers.put(dfsCluster, timer);
+ }
timer.schedule(new TimerTask() {
@Override
@@ -196,19 +207,23 @@ public class HdfsTestUtil {
public static Configuration getClientConfiguration(MiniDFSCluster dfsCluster) {
Configuration conf = getBasicConfiguration(dfsCluster.getConfiguration(0));
- if (dfsCluster.getNameNodeInfos().length > 1) {
+ if (dfsCluster.getNumNameNodes() > 1) {
HATestUtil.setFailoverConfigurations(dfsCluster, conf);
}
return conf;
}
public static void teardownClass(MiniDFSCluster dfsCluster) throws Exception {
+ HdfsUtil.TEST_CONF = null;
+
if (badTlogOutStream != null) {
IOUtils.closeQuietly(badTlogOutStream);
+ badTlogOutStream = null;
}
if (badTlogOutStreamFs != null) {
IOUtils.closeQuietly(badTlogOutStreamFs);
+ badTlogOutStreamFs = null;
}
try {
@@ -218,9 +233,16 @@ public class HdfsTestUtil {
log.error("Exception trying to reset solr.directoryFactory", e);
}
if (dfsCluster != null) {
- Timer timer = timers.remove(dfsCluster);
- if (timer != null) {
- timer.cancel();
+ synchronized (TIMERS_LOCK) {
+ if (timers != null) {
+ Timer timer = timers.remove(dfsCluster);
+ if (timer != null) {
+ timer.cancel();
+ }
+ if (timers.isEmpty()) {
+ timers = null;
+ }
+ }
}
try {
dfsCluster.shutdown(true);
diff --git a/solr/core/src/test/org/apache/solr/cloud/hdfs/HdfsWriteToMultipleCollectionsTest.java b/solr/core/src/test/org/apache/solr/cloud/hdfs/HdfsWriteToMultipleCollectionsTest.java
index 1f316033785..13531a4c364 100644
--- a/solr/core/src/test/org/apache/solr/cloud/hdfs/HdfsWriteToMultipleCollectionsTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/hdfs/HdfsWriteToMultipleCollectionsTest.java
@@ -73,6 +73,7 @@ public class HdfsWriteToMultipleCollectionsTest extends BasicDistributedZkTest {
HdfsTestUtil.teardownClass(dfsCluster);
} finally {
dfsCluster = null;
+ schemaString = null;
}
}
diff --git a/solr/core/src/test/org/apache/solr/index/hdfs/CheckHdfsIndexTest.java b/solr/core/src/test/org/apache/solr/index/hdfs/CheckHdfsIndexTest.java
index 8eefd9a3662..d31bd820be8 100644
--- a/solr/core/src/test/org/apache/solr/index/hdfs/CheckHdfsIndexTest.java
+++ b/solr/core/src/test/org/apache/solr/index/hdfs/CheckHdfsIndexTest.java
@@ -71,6 +71,7 @@ public class CheckHdfsIndexTest extends AbstractFullDistribZkTestBase {
HdfsTestUtil.teardownClass(dfsCluster);
} finally {
dfsCluster = null;
+ path = null;
}
}
diff --git a/solr/core/src/test/org/apache/solr/search/TestRecoveryHdfs.java b/solr/core/src/test/org/apache/solr/search/TestRecoveryHdfs.java
index e19895e9e53..a148d59599f 100644
--- a/solr/core/src/test/org/apache/solr/search/TestRecoveryHdfs.java
+++ b/solr/core/src/test/org/apache/solr/search/TestRecoveryHdfs.java
@@ -65,10 +65,11 @@ import static org.apache.solr.update.processor.DistributingUpdateProcessorFactor
// TODO: longer term this should be combined with TestRecovery somehow ??
public class TestRecoveryHdfs extends SolrTestCaseJ4 {
// means that we've seen the leader and have version info (i.e. we are a non-leader replica)
- private static String FROM_LEADER = DistribPhase.FROMLEADER.toString();
+ private static final String FROM_LEADER = DistribPhase.FROMLEADER.toString();
+
+ // acquire timeout in seconds. change this to a huge number when debugging to prevent threads from advancing.
+ private static final int TIMEOUT = 60;
- private static int timeout=60; // acquire timeout in seconds. change this to a huge number when debugging to prevent threads from advancing.
-
private static MiniDFSCluster dfsCluster;
private static String hdfsUri;
private static FileSystem fs;
@@ -102,7 +103,7 @@ public class TestRecoveryHdfs extends SolrTestCaseJ4 {
HdfsTestUtil.teardownClass(dfsCluster);
} finally {
dfsCluster = null;
- hdfsDataDir = null;
+ hdfsUri = null;
System.clearProperty("solr.ulog.dir");
System.clearProperty("test.build.data");
System.clearProperty("test.cache.data");
@@ -142,7 +143,7 @@ public class TestRecoveryHdfs extends SolrTestCaseJ4 {
UpdateLog.testing_logReplayHook = () -> {
try {
- assertTrue(logReplay.tryAcquire(timeout, TimeUnit.SECONDS));
+ assertTrue(logReplay.tryAcquire(TIMEOUT, TimeUnit.SECONDS));
} catch (Exception e) {
throw new RuntimeException(e);
}
@@ -183,7 +184,7 @@ public class TestRecoveryHdfs extends SolrTestCaseJ4 {
assertJQ(req("qt","/get", "getVersions",""+versions.size()),"/versions==" + versions);
// wait until recovery has finished
- assertTrue(logReplayFinish.tryAcquire(timeout, TimeUnit.SECONDS));
+ assertTrue(logReplayFinish.tryAcquire(TIMEOUT, TimeUnit.SECONDS));
assertJQ(req("q","*:*") ,"/response/numFound==3");
@@ -203,7 +204,7 @@ public class TestRecoveryHdfs extends SolrTestCaseJ4 {
// h.getCore().getUpdateHandler().getUpdateLog().recoverFromLog();
// wait until recovery has finished
- assertTrue(logReplayFinish.tryAcquire(timeout, TimeUnit.SECONDS));
+ assertTrue(logReplayFinish.tryAcquire(TIMEOUT, TimeUnit.SECONDS));
assertJQ(req("q","*:*") ,"/response/numFound==5");
assertJQ(req("q","id:A2") ,"/response/numFound==0");
@@ -235,7 +236,7 @@ public class TestRecoveryHdfs extends SolrTestCaseJ4 {
UpdateLog.testing_logReplayHook = () -> {
try {
- assertTrue(logReplay.tryAcquire(timeout, TimeUnit.SECONDS));
+ assertTrue(logReplay.tryAcquire(TIMEOUT, TimeUnit.SECONDS));
} catch (Exception e) {
throw new RuntimeException(e);
}
@@ -386,7 +387,7 @@ public class TestRecoveryHdfs extends SolrTestCaseJ4 {
UpdateLog.testing_logReplayHook = () -> {
try {
- assertTrue(logReplay.tryAcquire(timeout, TimeUnit.SECONDS));
+ assertTrue(logReplay.tryAcquire(TIMEOUT, TimeUnit.SECONDS));
} catch (Exception e) {
throw new RuntimeException(e);
}
@@ -606,7 +607,7 @@ public class TestRecoveryHdfs extends SolrTestCaseJ4 {
UpdateLog.testing_logReplayHook = () -> {
try {
- assertTrue(logReplay.tryAcquire(timeout, TimeUnit.SECONDS));
+ assertTrue(logReplay.tryAcquire(TIMEOUT, TimeUnit.SECONDS));
} catch (Exception e) {
throw new RuntimeException(e);
}
@@ -661,7 +662,7 @@ public class TestRecoveryHdfs extends SolrTestCaseJ4 {
UpdateLog.testing_logReplayHook = () -> {
try {
- assertTrue(logReplay.tryAcquire(timeout, TimeUnit.SECONDS));
+ assertTrue(logReplay.tryAcquire(TIMEOUT, TimeUnit.SECONDS));
} catch (Exception e) {
throw new RuntimeException(e);
}
@@ -718,7 +719,7 @@ public class TestRecoveryHdfs extends SolrTestCaseJ4 {
assertJQ(req("qt","/get", "getVersions",""+maxReq), "/versions==" + versions.subList(0,Math.min(maxReq,start)));
logReplay.release(1000);
- assertTrue(logReplayFinish.tryAcquire(timeout, TimeUnit.SECONDS));
+ assertTrue(logReplayFinish.tryAcquire(TIMEOUT, TimeUnit.SECONDS));
assertJQ(req("qt","/get", "getVersions",""+maxReq), "/versions==" + versions.subList(0,Math.min(maxReq,start)));
@@ -768,7 +769,7 @@ public class TestRecoveryHdfs extends SolrTestCaseJ4 {
UpdateLog.testing_logReplayHook = () -> {
try {
- assertTrue(logReplay.tryAcquire(timeout, TimeUnit.SECONDS));
+ assertTrue(logReplay.tryAcquire(TIMEOUT, TimeUnit.SECONDS));
} catch (Exception e) {
throw new RuntimeException(e);
}
@@ -802,7 +803,7 @@ public class TestRecoveryHdfs extends SolrTestCaseJ4 {
logReplayFinish.drainPermits();
ignoreException("OutOfBoundsException"); // this is what the corrupted log currently produces... subject to change.
createCore();
- assertTrue(logReplayFinish.tryAcquire(timeout, TimeUnit.SECONDS));
+ assertTrue(logReplayFinish.tryAcquire(TIMEOUT, TimeUnit.SECONDS));
resetExceptionIgnores();
assertJQ(req("q","*:*") ,"/response/numFound==3");
@@ -896,7 +897,7 @@ public class TestRecoveryHdfs extends SolrTestCaseJ4 {
UpdateLog.testing_logReplayHook = () -> {
try {
- assertTrue(logReplay.tryAcquire(timeout, TimeUnit.SECONDS));
+ assertTrue(logReplay.tryAcquire(TIMEOUT, TimeUnit.SECONDS));
} catch (Exception e) {
throw new RuntimeException(e);
}
@@ -948,7 +949,7 @@ public class TestRecoveryHdfs extends SolrTestCaseJ4 {
logReplayFinish.drainPermits();
ignoreException("OutOfBoundsException"); // this is what the corrupted log currently produces... subject to change.
createCore();
- assertTrue(logReplayFinish.tryAcquire(timeout, TimeUnit.SECONDS));
+ assertTrue(logReplayFinish.tryAcquire(TIMEOUT, TimeUnit.SECONDS));
resetExceptionIgnores();
assertJQ(req("q","*:*") ,"/response/numFound==6");
diff --git a/solr/core/src/test/org/apache/solr/store/hdfs/HdfsDirectoryTest.java b/solr/core/src/test/org/apache/solr/store/hdfs/HdfsDirectoryTest.java
index 5e72b522204..12269262a3f 100644
--- a/solr/core/src/test/org/apache/solr/store/hdfs/HdfsDirectoryTest.java
+++ b/solr/core/src/test/org/apache/solr/store/hdfs/HdfsDirectoryTest.java
@@ -62,8 +62,11 @@ public class HdfsDirectoryTest extends SolrTestCaseJ4 {
@AfterClass
public static void afterClass() throws Exception {
- HdfsTestUtil.teardownClass(dfsCluster);
- dfsCluster = null;
+ try {
+ HdfsTestUtil.teardownClass(dfsCluster);
+ } finally {
+ dfsCluster = null;
+ }
}
@Before
diff --git a/solr/core/src/test/org/apache/solr/store/hdfs/HdfsLockFactoryTest.java b/solr/core/src/test/org/apache/solr/store/hdfs/HdfsLockFactoryTest.java
index 7a232408891..bf00016d164 100644
--- a/solr/core/src/test/org/apache/solr/store/hdfs/HdfsLockFactoryTest.java
+++ b/solr/core/src/test/org/apache/solr/store/hdfs/HdfsLockFactoryTest.java
@@ -48,8 +48,11 @@ public class HdfsLockFactoryTest extends SolrTestCaseJ4 {
@AfterClass
public static void afterClass() throws Exception {
- HdfsTestUtil.teardownClass(dfsCluster);
- dfsCluster = null;
+ try {
+ HdfsTestUtil.teardownClass(dfsCluster);
+ } finally {
+ dfsCluster = null;
+ }
}
@Test
diff --git a/solr/core/src/test/org/apache/solr/update/TestHdfsUpdateLog.java b/solr/core/src/test/org/apache/solr/update/TestHdfsUpdateLog.java
index 5ba7f58ec14..b9a0158b0ee 100644
--- a/solr/core/src/test/org/apache/solr/update/TestHdfsUpdateLog.java
+++ b/solr/core/src/test/org/apache/solr/update/TestHdfsUpdateLog.java
@@ -70,8 +70,8 @@ public class TestHdfsUpdateLog extends SolrTestCaseJ4 {
try {
HdfsTestUtil.teardownClass(dfsCluster);
} finally {
- hdfsDataDir = null;
dfsCluster = null;
+ hdfsUri = null;
System.clearProperty("solr.ulog.dir");
System.clearProperty("test.build.data");
System.clearProperty("test.cache.data");
diff --git a/solr/test-framework/src/java/org/apache/solr/SolrTestCaseJ4.java b/solr/test-framework/src/java/org/apache/solr/SolrTestCaseJ4.java
index 26709518276..ac8deaaf35f 100644
--- a/solr/test-framework/src/java/org/apache/solr/SolrTestCaseJ4.java
+++ b/solr/test-framework/src/java/org/apache/solr/SolrTestCaseJ4.java
@@ -331,6 +331,7 @@ public abstract class SolrTestCaseJ4 extends SolrTestCase {
if (null != testExecutor) {
ExecutorUtil.shutdownAndAwaitTermination(testExecutor);
+ testExecutor = null;
}
resetExceptionIgnores();
@@ -489,6 +490,7 @@ public abstract class SolrTestCaseJ4 extends SolrTestCase {
changedFactory = false;
if (savedFactory != null) {
System.setProperty("solr.directoryFactory", savedFactory);
+ savedFactory = null;
} else {
System.clearProperty("solr.directoryFactory");
}
@@ -895,6 +897,7 @@ public abstract class SolrTestCaseJ4 extends SolrTestCase {
lrf = null;
configString = schemaString = null;
initCoreDataDir = null;
+ hdfsDataDir = null;
}
/** Validates an update XML String is successful
diff --git a/solr/test-framework/src/java/org/apache/solr/cloud/SolrCloudTestCase.java b/solr/test-framework/src/java/org/apache/solr/cloud/SolrCloudTestCase.java
index b8e0798dc0e..fccb4abdeec 100644
--- a/solr/test-framework/src/java/org/apache/solr/cloud/SolrCloudTestCase.java
+++ b/solr/test-framework/src/java/org/apache/solr/cloud/SolrCloudTestCase.java
@@ -275,9 +275,12 @@ public class SolrCloudTestCase extends SolrTestCaseJ4 {
@AfterClass
public static void shutdownCluster() throws Exception {
if (cluster != null) {
- cluster.shutdown();
+ try {
+ cluster.shutdown();
+ } finally {
+ cluster = null;
+ }
}
- cluster = null;
}
@Before
diff --git a/solr/test-framework/src/java/org/apache/solr/util/SolrSecurityManager.java b/solr/test-framework/src/java/org/apache/solr/util/SolrSecurityManager.java
index add572a2495..056f4f7a88f 100644
--- a/solr/test-framework/src/java/org/apache/solr/util/SolrSecurityManager.java
+++ b/solr/test-framework/src/java/org/apache/solr/util/SolrSecurityManager.java
@@ -20,8 +20,7 @@ import java.security.AccessController;
import java.security.PrivilegedAction;
/**
- * A {@link SecurityManager} that prevents tests calling {@link System#exit(int)},
- * and implements some hacks for hadoop.
+ * A {@link SecurityManager} that prevents tests calling {@link System#exit(int)}.
* Only the test runner itself is allowed to exit the JVM.
* All other security checks are handled by the default security policy.
*
@@ -43,93 +42,6 @@ public final class SolrSecurityManager extends SecurityManager {
super();
}
- // TODO: move this stuff into a Solr (non-test) SecurityManager!
- /**
- * {@inheritDoc}
- *
This method implements hacks to workaround hadoop's garbage Shell and FileUtil code
- */
- @Override
- public void checkExec(String cmd) {
- // NOTE: it would be tempting to just allow anything from hadoop's Shell class, but then
- // that would just give an easy vector for RCE (use hadoop Shell instead of e.g. ProcessBuilder)
- // so we whitelist actual caller impl methods instead.
- for (StackTraceElement element : Thread.currentThread().getStackTrace()) {
- // hadoop insists on shelling out to get the user's supplementary groups?
- if ("org.apache.hadoop.security.ShellBasedUnixGroupsMapping".equals(element.getClassName()) &&
- "getGroups".equals(element.getMethodName())) {
- return;
- }
- // hadoop insists on shelling out to parse 'df' command instead of using FileStore?
- if ("org.apache.hadoop.fs.DF".equals(element.getClassName()) &&
- "getFilesystem".equals(element.getMethodName())) {
- return;
- }
- // hadoop insists on shelling out to parse 'du' command instead of using FileStore?
- if ("org.apache.hadoop.fs.DU".equals(element.getClassName()) &&
- "refresh".equals(element.getMethodName())) {
- return;
- }
- // hadoop insists on shelling out to parse 'ls' command instead of java nio apis?
- if ("org.apache.hadoop.util.DiskChecker".equals(element.getClassName()) &&
- "checkDir".equals(element.getMethodName())) {
- return;
- }
- // hadoop insists on shelling out to parse 'stat' command instead of Files.getAttributes?
- if ("org.apache.hadoop.fs.HardLink".equals(element.getClassName()) &&
- "getLinkCount".equals(element.getMethodName())) {
- return;
- }
- // hadoop "canExecute" method doesn't handle securityexception and fails completely.
- // so, lie to it, and tell it we will happily execute, so it does not crash.
- if ("org.apache.hadoop.fs.FileUtil".equals(element.getClassName()) &&
- "canExecute".equals(element.getMethodName())) {
- return;
- }
- }
- super.checkExec(cmd);
- }
-
- /**
- * {@inheritDoc}
- *
This method implements hacks to workaround hadoop's garbage FileUtil code
- */
- @Override
- public void checkWrite(String file) {
- for (StackTraceElement element : Thread.currentThread().getStackTrace()) {
- // hadoop "canWrite" method doesn't handle securityexception and fails completely.
- // so, lie to it, and tell it we will happily write, so it does not crash.
- if ("org.apache.hadoop.fs.FileUtil".equals(element.getClassName()) &&
- "canWrite".equals(element.getMethodName())) {
- return;
- }
- }
- super.checkWrite(file);
- }
-
- /**
- * {@inheritDoc}
- *
This method implements hacks to workaround hadoop's garbage FileUtil code
- */
- @Override
- public void checkRead(String file) {
- for (StackTraceElement element : Thread.currentThread().getStackTrace()) {
- // hadoop "createPermissionsDiagnosisString" method doesn't handle securityexception and fails completely.
- // it insists on climbing up full directory tree!
- // so, lie to it, and tell it we will happily read, so it does not crash.
- if ("org.apache.hadoop.hdfs.MiniDFSCluster".equals(element.getClassName()) &&
- "createPermissionsDiagnosisString".equals(element.getMethodName())) {
- return;
- }
- // hadoop "canRead" method doesn't handle securityexception and fails completely.
- // so, lie to it, and tell it we will happily read, so it does not crash.
- if ("org.apache.hadoop.fs.FileUtil".equals(element.getClassName()) &&
- "canRead".equals(element.getMethodName())) {
- return;
- }
- }
- super.checkRead(file);
- }
-
/**
* {@inheritDoc}
*
This method inspects the stack trace and checks who is calling