SOLR-13307: Ensure HDFS tests clear System properties they set (Kevin Risden)

Signed-off-by: Kevin Risden <krisden@apache.org>
This commit is contained in:
Kevin Risden 2019-03-08 08:30:46 -05:00
parent d8cfeba912
commit 6777af074f
No known key found for this signature in database
GPG Key ID: 040FAE3292C5F73F
21 changed files with 269 additions and 286 deletions

View File

@ -145,6 +145,8 @@ Other Changes
* SOLR-13268: Clean up any test failures resulting from defaulting to async logging (Erick Erickson)
* SOLR-13307: Ensure HDFS tests clear System properties they set (Kevin Risden)
================== 8.0.0 ==================
Consult the LUCENE_CHANGES.txt file for additional, low level, changes in this release.

View File

@ -63,9 +63,17 @@ public class MoveReplicaHDFSFailoverTest extends SolrCloudTestCase {
@AfterClass
public static void teardownClass() throws Exception {
cluster.shutdown(); // need to close before the MiniDFSCluster
HdfsTestUtil.teardownClass(dfsCluster);
dfsCluster = null;
try {
shutdownCluster();
} finally {
try {
HdfsTestUtil.teardownClass(dfsCluster);
} finally {
dfsCluster = null;
System.clearProperty("solr.hdfs.home");
System.clearProperty("solr.hdfs.blockcache.enabled");
}
}
}
@Test
@ -204,5 +212,4 @@ public class MoveReplicaHDFSFailoverTest extends SolrCloudTestCase {
solrClient.add(collection, doc);
}
}
}

View File

@ -50,8 +50,8 @@ public class MoveReplicaHDFSTest extends MoveReplicaTest {
HdfsTestUtil.teardownClass(dfsCluster);
} finally {
dfsCluster = null;
System.setProperty("solr.hdfs.blockcache.blocksperbank", "512");
System.setProperty("tests.hdfs.numdatanodes", "1");
System.clearProperty("solr.hdfs.blockcache.blocksperbank");
System.clearProperty("tests.hdfs.numdatanodes");
}
}

View File

@ -147,14 +147,17 @@ public class TestHdfsCloudBackupRestore extends AbstractCloudBackupRestoreTestCa
@AfterClass
public static void teardownClass() throws Exception {
System.clearProperty("solr.hdfs.home");
System.clearProperty("solr.hdfs.default.backup.path");
System.clearProperty("test.build.data");
System.clearProperty("test.cache.data");
IOUtils.closeQuietly(fs);
fs = null;
HdfsTestUtil.teardownClass(dfsCluster);
dfsCluster = null;
try {
HdfsTestUtil.teardownClass(dfsCluster);
} finally {
dfsCluster = null;
System.clearProperty("solr.hdfs.home");
System.clearProperty("solr.hdfs.default.backup.path");
System.clearProperty("test.build.data");
System.clearProperty("test.cache.data");
}
}
@Override
@ -211,11 +214,10 @@ public class TestHdfsCloudBackupRestore extends AbstractCloudBackupRestoreTestCa
assertTrue(expected.contains(d));
}
}
@Override
@Test
// commented 15-Sep-2018 @BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // added 09-Aug-2018
public void test() throws Exception {
super.test();
}
}
}

View File

@ -38,6 +38,7 @@ public class HdfsAutoAddReplicasIntegrationTest extends AutoAddReplicasIntegrati
@BeforeClass
public static void setupClass() throws Exception {
System.setProperty("solr.hdfs.blockcache.global", "true");
System.setProperty("solr.hdfs.blockcache.blocksperbank", "512");
System.setProperty("tests.hdfs.numdatanodes", "1");
dfsCluster = HdfsTestUtil.setupClass(createTempDir().toFile().getAbsolutePath());
@ -49,6 +50,7 @@ public class HdfsAutoAddReplicasIntegrationTest extends AutoAddReplicasIntegrati
HdfsTestUtil.teardownClass(dfsCluster);
} finally {
dfsCluster = null;
System.clearProperty("solr.hdfs.blockcache.global");
System.clearProperty("solr.hdfs.blockcache.blocksperbank");
System.clearProperty("tests.hdfs.numdatanodes");
}

View File

@ -55,9 +55,17 @@ public class HDFSCollectionsAPITest extends SolrCloudTestCase {
@AfterClass
public static void teardownClass() throws Exception {
cluster.shutdown(); // need to close before the MiniDFSCluster
HdfsTestUtil.teardownClass(dfsCluster);
dfsCluster = null;
try {
shutdownCluster(); // need to close before the MiniDFSCluster
} finally {
try {
HdfsTestUtil.teardownClass(dfsCluster);
} finally {
dfsCluster = null;
System.clearProperty("solr.hdfs.blockcache.enabled");
System.clearProperty("solr.hdfs.home");
}
}
}
public void testDataDirIsNotReused() throws Exception {

View File

@ -28,13 +28,11 @@ import org.junit.BeforeClass;
import com.carrotsearch.randomizedtesting.annotations.Nightly;
import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters;
@Slow
@Nightly
@ThreadLeakFilters(defaultFilters = true, filters = {
BadHdfsThreadsFilter.class // hdfs currently leaks thread(s)
})
// commented 20-July-2018 @LuceneTestCase.BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // 12-Jun-2018
public class HdfsBasicDistributedZkTest extends BasicDistributedZkTest {
private static MiniDFSCluster dfsCluster;
@ -51,15 +49,16 @@ public class HdfsBasicDistributedZkTest extends BasicDistributedZkTest {
@AfterClass
public static void teardownClass() throws Exception {
HdfsTestUtil.teardownClass(dfsCluster);
System.clearProperty("tests.hdfs.numdatanodes");
dfsCluster = null;
try {
HdfsTestUtil.teardownClass(dfsCluster);
} finally {
dfsCluster = null;
System.clearProperty("tests.hdfs.numdatanodes");
}
}
@Override
protected String getDataDir(String dataDir) throws IOException {
return HdfsTestUtil.getDataDir(dfsCluster, dataDir);
}
}

View File

@ -33,20 +33,23 @@ import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters;
@ThreadLeakFilters(defaultFilters = true, filters = {
BadHdfsThreadsFilter.class // hdfs currently leaks thread(s)
})
// commented out on: 24-Dec-2018 @BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028, https://issues.apache.org/jira/browse/SOLR-10191")
public class HdfsChaosMonkeyNothingIsSafeTest extends ChaosMonkeyNothingIsSafeTest {
private static MiniDFSCluster dfsCluster;
@BeforeClass
public static void setupClass() throws Exception {
dfsCluster = HdfsTestUtil.setupClass(createTempDir().toFile().getAbsolutePath());
System.setProperty("solr.hdfs.blockcache.global", "true"); // always use global cache, this test can create a lot of directories
dfsCluster = HdfsTestUtil.setupClass(createTempDir().toFile().getAbsolutePath());
}
@AfterClass
public static void teardownClass() throws Exception {
HdfsTestUtil.teardownClass(dfsCluster);
dfsCluster = null;
try {
HdfsTestUtil.teardownClass(dfsCluster);
} finally {
dfsCluster = null;
System.clearProperty("solr.hdfs.blockcache.global");
}
}
@Override
@ -56,12 +59,9 @@ public class HdfsChaosMonkeyNothingIsSafeTest extends ChaosMonkeyNothingIsSafeTe
// super class may hard code directory
useFactory("org.apache.solr.core.HdfsDirectoryFactory");
}
@Override
protected String getDataDir(String dataDir) throws IOException {
return HdfsTestUtil.getDataDir(dfsCluster, dataDir);
}
}

View File

@ -33,7 +33,6 @@ import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters;
@ThreadLeakFilters(defaultFilters = true, filters = {
BadHdfsThreadsFilter.class // hdfs currently leaks thread(s)
})
// commented out on: 24-Dec-2018 @LuceneTestCase.BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // 2-Aug-2018
public class HdfsChaosMonkeySafeLeaderTest extends ChaosMonkeySafeLeaderTest {
private static MiniDFSCluster dfsCluster;
@ -45,8 +44,12 @@ public class HdfsChaosMonkeySafeLeaderTest extends ChaosMonkeySafeLeaderTest {
@AfterClass
public static void teardownClass() throws Exception {
HdfsTestUtil.teardownClass(dfsCluster);
dfsCluster = null;
try {
HdfsTestUtil.teardownClass(dfsCluster);
} finally {
dfsCluster = null;
System.clearProperty("solr.hdfs.blockcache.global");
}
}
@Override
@ -56,12 +59,9 @@ public class HdfsChaosMonkeySafeLeaderTest extends ChaosMonkeySafeLeaderTest {
// super class may hard code directory
useFactory("org.apache.solr.core.HdfsDirectoryFactory");
}
@Override
protected String getDataDir(String dataDir) throws IOException {
return HdfsTestUtil.getDataDir(dfsCluster, dataDir);
}
}

View File

@ -35,10 +35,8 @@ import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters;
})
@LuceneTestCase.BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // 12-Jun-2018
public class HdfsNNFailoverTest extends BasicDistributedZkTest {
private static final String COLLECTION = "collection";
private static MiniDFSCluster dfsCluster;
@BeforeClass
public static void setupClass() throws Exception {
@ -47,8 +45,11 @@ public class HdfsNNFailoverTest extends BasicDistributedZkTest {
@AfterClass
public static void teardownClass() throws Exception {
HdfsTestUtil.teardownClass(dfsCluster);
dfsCluster = null;
try {
HdfsTestUtil.teardownClass(dfsCluster);
} finally {
dfsCluster = null;
}
}
@Override
@ -61,7 +62,7 @@ public class HdfsNNFailoverTest extends BasicDistributedZkTest {
sliceCount = 1;
fixShardCount(TEST_NIGHTLY ? 7 : random().nextInt(2) + 1);
}
protected String getSolrXml() {
return "solr.xml";
}

View File

@ -51,9 +51,17 @@ public class HdfsRecoveryZkTest extends RecoveryZkTest {
@AfterClass
public static void teardownClass() throws Exception {
cluster.shutdown(); // need to close before the MiniDFSCluster
HdfsTestUtil.teardownClass(dfsCluster);
dfsCluster = null;
try {
shutdownCluster(); // need to close before the MiniDFSCluster
} finally {
try {
HdfsTestUtil.teardownClass(dfsCluster);
} finally {
dfsCluster = null;
System.clearProperty("solr.hdfs.blockcache.blocksperbank");
System.clearProperty("solr.hdfs.home");
}
}
}
}

View File

@ -33,9 +33,7 @@ import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters;
@ThreadLeakFilters(defaultFilters = true, filters = {
BadHdfsThreadsFilter.class // hdfs currently leaks thread(s)
})
// 12-Jun-2018 @LuceneTestCase.BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") //2018-03-10
public class HdfsRestartWhileUpdatingTest extends RestartWhileUpdatingTest {
public HdfsRestartWhileUpdatingTest() throws Exception {
super();
}
@ -50,14 +48,16 @@ public class HdfsRestartWhileUpdatingTest extends RestartWhileUpdatingTest {
@AfterClass
public static void teardownClass() throws Exception {
HdfsTestUtil.teardownClass(dfsCluster);
dfsCluster = null;
try {
HdfsTestUtil.teardownClass(dfsCluster);
} finally {
dfsCluster = null;
System.clearProperty("solr.hdfs.blockcache.blocksperbank");
}
}
@Override
protected String getDataDir(String dataDir) throws IOException {
return HdfsTestUtil.getDataDir(dfsCluster, dataDir);
}
}

View File

@ -113,7 +113,8 @@ public class HdfsTestUtil {
System.setProperty("test.cache.data", dir + File.separator + "hdfs" + File.separator + "cache");
System.setProperty("solr.lock.type", DirectoryFactory.LOCK_TYPE_HDFS);
System.setProperty("solr.hdfs.blockcache.global", Boolean.toString(LuceneTestCase.random().nextBoolean()));
System.setProperty("solr.hdfs.blockcache.global",
System.getProperty("solr.hdfs.blockcache.global", Boolean.toString(LuceneTestCase.random().nextBoolean())));
final MiniDFSCluster dfsCluster;

View File

@ -30,35 +30,34 @@ import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters;
@Slow
@Nightly
// 12-Jun-2018 @LuceneTestCase.BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028")
// commented out on: 24-Dec-2018 @LuceneTestCase.BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // added 20-Jul-2018
@ThreadLeakFilters(defaultFilters = true, filters = {
BadHdfsThreadsFilter.class // hdfs currently leaks thread(s)
})
public class HdfsTlogReplayBufferedWhileIndexingTest extends TlogReplayBufferedWhileIndexingTest {
private static MiniDFSCluster dfsCluster;
public HdfsTlogReplayBufferedWhileIndexingTest() throws Exception {
super();
}
private static MiniDFSCluster dfsCluster;
@BeforeClass
public static void setupClass() throws Exception {
dfsCluster = HdfsTestUtil.setupClass(createTempDir().toFile().getAbsolutePath());
System.setProperty("solr.hdfs.blockcache.blocksperbank", "2048");
dfsCluster = HdfsTestUtil.setupClass(createTempDir().toFile().getAbsolutePath());
}
@AfterClass
public static void teardownClass() throws Exception {
HdfsTestUtil.teardownClass(dfsCluster);
dfsCluster = null;
try {
HdfsTestUtil.teardownClass(dfsCluster);
} finally {
dfsCluster = null;
System.clearProperty("solr.hdfs.blockcache.blocksperbank");
}
}
@Override
protected String getDataDir(String dataDir) throws IOException {
return HdfsTestUtil.getDataDir(dfsCluster, dataDir);
}
}

View File

@ -33,7 +33,6 @@ import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters;
@ThreadLeakFilters(defaultFilters = true, filters = {
BadHdfsThreadsFilter.class // hdfs currently leaks thread(s)
})
//Commented 4-Oct-2018 @LuceneTestCase.BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // 21-May-2018
public class HdfsUnloadDistributedZkTest extends UnloadDistributedZkTest {
private static MiniDFSCluster dfsCluster;
@ -44,14 +43,15 @@ public class HdfsUnloadDistributedZkTest extends UnloadDistributedZkTest {
@AfterClass
public static void teardownClass() throws Exception {
HdfsTestUtil.teardownClass(dfsCluster);
dfsCluster = null;
try {
HdfsTestUtil.teardownClass(dfsCluster);
} finally {
dfsCluster = null;
}
}
@Override
protected String getDataDir(String dataDir) throws IOException {
return HdfsTestUtil.getDataDir(dfsCluster, dataDir);
}
}

View File

@ -57,10 +57,7 @@ import org.junit.Test;
@ThreadLeakFilters(defaultFilters = true, filters = {
BadHdfsThreadsFilter.class // hdfs currently leaks thread(s)
})
//Commented 4-Oct-2018 @LuceneTestCase.BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // 12-Jun-2018
public class HdfsWriteToMultipleCollectionsTest extends BasicDistributedZkTest {
private static final String SOLR_HDFS_HOME = "solr.hdfs.home";
private static final String SOLR_HDFS_BLOCKCACHE_GLOBAL = "solr.hdfs.blockcache.global";
private static final String ACOLLECTION = "acollection";
private static MiniDFSCluster dfsCluster;
@ -72,8 +69,11 @@ public class HdfsWriteToMultipleCollectionsTest extends BasicDistributedZkTest {
@AfterClass
public static void teardownClass() throws Exception {
HdfsTestUtil.teardownClass(dfsCluster);
dfsCluster = null;
try {
HdfsTestUtil.teardownClass(dfsCluster);
} finally {
dfsCluster = null;
}
}
@Override
@ -168,7 +168,7 @@ public class HdfsWriteToMultipleCollectionsTest extends BasicDistributedZkTest {
BlockCache blockCache = ((BlockDirectoryCache) cache)
.getBlockCache();
if (lastBlockCache != null) {
if (Boolean.getBoolean(SOLR_HDFS_BLOCKCACHE_GLOBAL)) {
if (Boolean.getBoolean("solr.hdfs.blockcache.global")) {
assertEquals(lastBlockCache, blockCache);
} else {
assertNotSame(lastBlockCache, blockCache);

View File

@ -24,11 +24,9 @@ import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
import org.apache.lucene.util.LuceneTestCase;
import org.apache.lucene.util.LuceneTestCase.Slow;
import org.apache.solr.client.solrj.SolrClient;
import org.apache.solr.client.solrj.SolrQuery;
import org.apache.solr.client.solrj.SolrServerException;
import org.apache.solr.client.solrj.impl.HttpSolrClient;
import org.apache.solr.client.solrj.request.QueryRequest;
import org.apache.solr.cloud.BasicDistributedZkTest;
@ -42,14 +40,12 @@ import org.apache.solr.common.util.NamedList;
import org.apache.solr.common.util.TimeSource;
import org.apache.solr.util.BadHdfsThreadsFilter;
import org.apache.solr.util.TimeOut;
import org.apache.zookeeper.KeeperException;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.Test;
import java.io.IOException;
import java.net.URI;
import java.net.URISyntaxException;
import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
@ -58,16 +54,13 @@ import java.util.TimerTask;
import java.util.concurrent.TimeUnit;
@Slow
@Nightly
@ThreadLeakFilters(defaultFilters = true, filters = {
BadHdfsThreadsFilter.class // hdfs currently leaks thread(s)
})
// commented out on: 24-Dec-2018 @LuceneTestCase.BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // 6-Sep-2018
@Nightly
public class StressHdfsTest extends BasicDistributedZkTest {
private static final String DELETE_DATA_DIR_COLLECTION = "delete_data_dir";
private static MiniDFSCluster dfsCluster;
private boolean testRestartIntoSafeMode;
@ -78,8 +71,11 @@ public class StressHdfsTest extends BasicDistributedZkTest {
@AfterClass
public static void teardownClass() throws Exception {
HdfsTestUtil.teardownClass(dfsCluster);
dfsCluster = null;
try {
HdfsTestUtil.teardownClass(dfsCluster);
} finally {
dfsCluster = null;
}
}
@Override
@ -93,13 +89,12 @@ public class StressHdfsTest extends BasicDistributedZkTest {
fixShardCount(TEST_NIGHTLY ? 7 : random().nextInt(2) + 1);
testRestartIntoSafeMode = random().nextBoolean();
}
protected String getSolrXml() {
return "solr.xml";
}
@Test
//2018-06-18 (commented) @BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // 21-May-2018
public void test() throws Exception {
randomlyEnableAutoSoftCommit();
@ -121,7 +116,7 @@ public class StressHdfsTest extends BasicDistributedZkTest {
// enter safe mode and restart a node
NameNodeAdapter.enterSafeMode(dfsCluster.getNameNode(), false);
int rnd = LuceneTestCase.random().nextInt(10000);
int rnd = random().nextInt(10000);
timer.schedule(new TimerTask() {
@ -140,10 +135,7 @@ public class StressHdfsTest extends BasicDistributedZkTest {
}
}
private void createAndDeleteCollection() throws SolrServerException,
IOException, Exception, KeeperException, InterruptedException,
URISyntaxException {
private void createAndDeleteCollection() throws Exception {
boolean overshard = random().nextBoolean();
int rep;
int nShards;
@ -238,11 +230,11 @@ public class StressHdfsTest extends BasicDistributedZkTest {
for (String dataDir : dataDirs) {
Configuration conf = HdfsTestUtil.getClientConfiguration(dfsCluster);
conf.setBoolean("fs.hdfs.impl.disable.cache", true);
FileSystem fs = FileSystem.get(new URI(HdfsTestUtil.getURI(dfsCluster)), conf);
assertFalse(
"Data directory exists after collection removal : " + dataDir,
fs.exists(new Path(dataDir)));
fs.close();
try(FileSystem fs = FileSystem.get(new URI(HdfsTestUtil.getURI(dfsCluster)), conf)) {
assertFalse(
"Data directory exists after collection removal : " + dataDir,
fs.exists(new Path(dataDir)));
}
}
}
}

View File

@ -51,105 +51,96 @@ import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters;
BadHdfsThreadsFilter.class // hdfs currently leaks thread(s)
})
public class HdfsDirectoryFactoryTest extends SolrTestCaseJ4 {
private static MiniDFSCluster dfsCluster;
@BeforeClass
public static void setupClass() throws Exception {
dfsCluster = HdfsTestUtil.setupClass(createTempDir().toFile().getAbsolutePath(), false);
System.setProperty("solr.hdfs.blockcache.blocksperbank", "1024");
dfsCluster = HdfsTestUtil.setupClass(createTempDir().toFile().getAbsolutePath(), false);
}
@AfterClass
public static void teardownClass() throws Exception {
HdfsTestUtil.teardownClass(dfsCluster);
System.clearProperty("solr.hdfs.home");
System.clearProperty(HdfsDirectoryFactory.NRTCACHINGDIRECTORY_MAXMERGESIZEMB);
System.clearProperty("solr.hdfs.blockcache.blocksperbank");
dfsCluster = null;
try {
HdfsTestUtil.teardownClass(dfsCluster);
} finally {
dfsCluster = null;
System.clearProperty("solr.hdfs.home");
System.clearProperty(HdfsDirectoryFactory.NRTCACHINGDIRECTORY_MAXMERGESIZEMB);
System.clearProperty("solr.hdfs.blockcache.blocksperbank");
}
}
@Test
public void testInitArgsOrSysPropConfig() throws Exception {
HdfsDirectoryFactory hdfsFactory = new HdfsDirectoryFactory();
// test sys prop config
System.setProperty("solr.hdfs.home", HdfsTestUtil.getURI(dfsCluster) + "/solr1");
hdfsFactory.init(new NamedList<>());
String dataHome = hdfsFactory.getDataHome(new MockCoreDescriptor());
try(HdfsDirectoryFactory hdfsFactory = new HdfsDirectoryFactory()) {
assertTrue(dataHome.endsWith("/solr1/mock/data"));
System.clearProperty("solr.hdfs.home");
// test init args config
NamedList<Object> nl = new NamedList<>();
nl.add("solr.hdfs.home", HdfsTestUtil.getURI(dfsCluster) + "/solr2");
hdfsFactory.init(nl);
dataHome = hdfsFactory.getDataHome(new MockCoreDescriptor());
// test sys prop config
System.setProperty("solr.hdfs.home", HdfsTestUtil.getURI(dfsCluster) + "/solr1");
hdfsFactory.init(new NamedList<>());
String dataHome = hdfsFactory.getDataHome(new MockCoreDescriptor());
assertTrue(dataHome.endsWith("/solr2/mock/data"));
// test sys prop and init args config - init args wins
System.setProperty("solr.hdfs.home", HdfsTestUtil.getURI(dfsCluster) + "/solr1");
hdfsFactory.init(nl);
dataHome = hdfsFactory.getDataHome(new MockCoreDescriptor());
assertTrue(dataHome.endsWith("/solr1/mock/data"));
assertTrue(dataHome.endsWith("/solr2/mock/data"));
System.clearProperty("solr.hdfs.home");
// set conf dir by sys prop
Path confDir = createTempDir();
System.setProperty(HdfsDirectoryFactory.CONFIG_DIRECTORY, confDir.toString());
Directory dir = hdfsFactory.create(HdfsTestUtil.getURI(dfsCluster) + "/solr", NoLockFactory.INSTANCE, DirContext.DEFAULT);
try {
assertEquals(confDir.toString(), hdfsFactory.getConfDir());
} finally {
dir.close();
System.clearProperty("solr.hdfs.home");
// test init args config
NamedList<Object> nl = new NamedList<>();
nl.add("solr.hdfs.home", HdfsTestUtil.getURI(dfsCluster) + "/solr2");
hdfsFactory.init(nl);
dataHome = hdfsFactory.getDataHome(new MockCoreDescriptor());
assertTrue(dataHome.endsWith("/solr2/mock/data"));
// test sys prop and init args config - init args wins
System.setProperty("solr.hdfs.home", HdfsTestUtil.getURI(dfsCluster) + "/solr1");
hdfsFactory.init(nl);
dataHome = hdfsFactory.getDataHome(new MockCoreDescriptor());
assertTrue(dataHome.endsWith("/solr2/mock/data"));
System.clearProperty("solr.hdfs.home");
// set conf dir by sys prop
Path confDir = createTempDir();
System.setProperty(HdfsDirectoryFactory.CONFIG_DIRECTORY, confDir.toString());
try (Directory dir = hdfsFactory
.create(HdfsTestUtil.getURI(dfsCluster) + "/solr", NoLockFactory.INSTANCE, DirContext.DEFAULT)) {
assertEquals(confDir.toString(), hdfsFactory.getConfDir());
}
// check bool and int getConf impls
nl = new NamedList<>();
nl.add(HdfsDirectoryFactory.NRTCACHINGDIRECTORY_MAXMERGESIZEMB, 4);
System.setProperty(HdfsDirectoryFactory.NRTCACHINGDIRECTORY_MAXMERGESIZEMB, "3");
nl.add(HdfsDirectoryFactory.BLOCKCACHE_ENABLED, true);
System.setProperty(HdfsDirectoryFactory.BLOCKCACHE_ENABLED, "false");
hdfsFactory.init(nl);
assertEquals(4, hdfsFactory.getConfig(HdfsDirectoryFactory.NRTCACHINGDIRECTORY_MAXMERGESIZEMB, 0));
assertTrue(hdfsFactory.getConfig(HdfsDirectoryFactory.BLOCKCACHE_ENABLED, false));
nl = new NamedList<>();
hdfsFactory.init(nl);
System.setProperty(HdfsDirectoryFactory.BLOCKCACHE_ENABLED, "true");
assertEquals(3, hdfsFactory.getConfig(HdfsDirectoryFactory.NRTCACHINGDIRECTORY_MAXMERGESIZEMB, 0));
assertTrue(hdfsFactory.getConfig(HdfsDirectoryFactory.BLOCKCACHE_ENABLED, false));
System.clearProperty(HdfsDirectoryFactory.NRTCACHINGDIRECTORY_MAXMERGESIZEMB);
System.clearProperty(HdfsDirectoryFactory.BLOCKCACHE_ENABLED);
assertEquals(0, hdfsFactory.getConfig(HdfsDirectoryFactory.NRTCACHINGDIRECTORY_MAXMERGESIZEMB, 0));
assertFalse(hdfsFactory.getConfig(HdfsDirectoryFactory.BLOCKCACHE_ENABLED, false));
}
// check bool and int getConf impls
nl = new NamedList<>();
nl.add(HdfsDirectoryFactory.NRTCACHINGDIRECTORY_MAXMERGESIZEMB, 4);
System.setProperty(HdfsDirectoryFactory.NRTCACHINGDIRECTORY_MAXMERGESIZEMB, "3");
nl.add(HdfsDirectoryFactory.BLOCKCACHE_ENABLED, true);
System.setProperty(HdfsDirectoryFactory.BLOCKCACHE_ENABLED, "false");
hdfsFactory.init(nl);
assertEquals(4, hdfsFactory.getConfig(HdfsDirectoryFactory.NRTCACHINGDIRECTORY_MAXMERGESIZEMB, 0));
assertEquals(true, hdfsFactory.getConfig(HdfsDirectoryFactory.BLOCKCACHE_ENABLED, false));
nl = new NamedList<>();
hdfsFactory.init(nl);
System.setProperty(HdfsDirectoryFactory.BLOCKCACHE_ENABLED, "true");
assertEquals(3, hdfsFactory.getConfig(HdfsDirectoryFactory.NRTCACHINGDIRECTORY_MAXMERGESIZEMB, 0));
assertEquals(true, hdfsFactory.getConfig(HdfsDirectoryFactory.BLOCKCACHE_ENABLED, false));
System.clearProperty(HdfsDirectoryFactory.NRTCACHINGDIRECTORY_MAXMERGESIZEMB);
System.clearProperty(HdfsDirectoryFactory.BLOCKCACHE_ENABLED);
assertEquals(0, hdfsFactory.getConfig(HdfsDirectoryFactory.NRTCACHINGDIRECTORY_MAXMERGESIZEMB, 0));
assertEquals(false, hdfsFactory.getConfig(HdfsDirectoryFactory.BLOCKCACHE_ENABLED, false));
hdfsFactory.close();
}
@Test
public void testCleanupOldIndexDirectories() throws Exception {
try (HdfsDirectoryFactory hdfsFactory = new HdfsDirectoryFactory()) {
System.setProperty("solr.hdfs.home", HdfsTestUtil.getURI(dfsCluster) + "/solr1");
hdfsFactory.init(new NamedList<>());
String dataHome = hdfsFactory.getDataHome(new MockCoreDescriptor());
@ -183,56 +174,56 @@ public class HdfsDirectoryFactoryTest extends SolrTestCaseJ4 {
conf.set("dfs.permissions.enabled", "false");
Random r = random();
HdfsDirectoryFactory factory = new HdfsDirectoryFactory();
SolrMetricManager metricManager = new SolrMetricManager();
String registry = TestUtil.randomSimpleString(r, 2, 10);
String scope = TestUtil.randomSimpleString(r,2, 10);
Map<String,String> props = new HashMap<String,String>();
props.put(HdfsDirectoryFactory.HDFS_HOME, HdfsTestUtil.getURI(dfsCluster) + "/solr");
props.put(HdfsDirectoryFactory.BLOCKCACHE_ENABLED, "false");
props.put(HdfsDirectoryFactory.NRTCACHINGDIRECTORY_ENABLE, "false");
props.put(HdfsDirectoryFactory.LOCALITYMETRICS_ENABLED, "true");
factory.init(new NamedList<>(props));
factory.initializeMetrics(metricManager, registry, "foo", scope);
try(HdfsDirectoryFactory factory = new HdfsDirectoryFactory()) {
SolrMetricManager metricManager = new SolrMetricManager();
String registry = TestUtil.randomSimpleString(r, 2, 10);
String scope = TestUtil.randomSimpleString(r, 2, 10);
Map<String, String> props = new HashMap<String, String>();
props.put(HdfsDirectoryFactory.HDFS_HOME, HdfsTestUtil.getURI(dfsCluster) + "/solr");
props.put(HdfsDirectoryFactory.BLOCKCACHE_ENABLED, "false");
props.put(HdfsDirectoryFactory.NRTCACHINGDIRECTORY_ENABLE, "false");
props.put(HdfsDirectoryFactory.LOCALITYMETRICS_ENABLED, "true");
factory.init(new NamedList<>(props));
factory.initializeMetrics(metricManager, registry, "foo", scope);
// get the metrics map for the locality bean
MetricsMap metrics = (MetricsMap)((SolrMetricManager.GaugeWrapper)metricManager.registry(registry).getMetrics().get("OTHER." + scope + ".hdfsLocality")).getGauge();
// We haven't done anything, so there should be no data
Map<String,Object> statistics = metrics.getValue();
assertEquals("Saw bytes that were not written: " + statistics.get(HdfsLocalityReporter.LOCALITY_BYTES_TOTAL), 0l,
statistics.get(HdfsLocalityReporter.LOCALITY_BYTES_TOTAL));
assertEquals(
"Counted bytes as local when none written: " + statistics.get(HdfsLocalityReporter.LOCALITY_BYTES_RATIO), 0,
statistics.get(HdfsLocalityReporter.LOCALITY_BYTES_RATIO));
// create a directory and a file
String path = HdfsTestUtil.getURI(dfsCluster) + "/solr3/";
Directory dir = factory.create(path, NoLockFactory.INSTANCE, DirContext.DEFAULT);
try(IndexOutput writer = dir.createOutput("output", null)) {
writer.writeLong(42l);
// get the metrics map for the locality bean
MetricsMap metrics = (MetricsMap) ((SolrMetricManager.GaugeWrapper) metricManager.registry(registry).getMetrics().get("OTHER." + scope + ".hdfsLocality")).getGauge();
// We haven't done anything, so there should be no data
Map<String, Object> statistics = metrics.getValue();
assertEquals("Saw bytes that were not written: " + statistics.get(HdfsLocalityReporter.LOCALITY_BYTES_TOTAL), 0l,
statistics.get(HdfsLocalityReporter.LOCALITY_BYTES_TOTAL));
assertEquals(
"Counted bytes as local when none written: " + statistics.get(HdfsLocalityReporter.LOCALITY_BYTES_RATIO), 0,
statistics.get(HdfsLocalityReporter.LOCALITY_BYTES_RATIO));
// create a directory and a file
String path = HdfsTestUtil.getURI(dfsCluster) + "/solr3/";
try (Directory dir = factory.create(path, NoLockFactory.INSTANCE, DirContext.DEFAULT)) {
try (IndexOutput writer = dir.createOutput("output", null)) {
writer.writeLong(42L);
}
final long long_bytes = Long.SIZE / Byte.SIZE;
// no locality because hostname not set
factory.setHost("bogus");
statistics = metrics.getValue();
assertEquals("Wrong number of total bytes counted: " + statistics.get(HdfsLocalityReporter.LOCALITY_BYTES_TOTAL),
long_bytes, statistics.get(HdfsLocalityReporter.LOCALITY_BYTES_TOTAL));
assertEquals("Wrong number of total blocks counted: " + statistics.get(HdfsLocalityReporter.LOCALITY_BLOCKS_TOTAL),
1, statistics.get(HdfsLocalityReporter.LOCALITY_BLOCKS_TOTAL));
assertEquals(
"Counted block as local when bad hostname set: " + statistics.get(HdfsLocalityReporter.LOCALITY_BLOCKS_LOCAL),
0, statistics.get(HdfsLocalityReporter.LOCALITY_BLOCKS_LOCAL));
// set hostname and check again
factory.setHost("127.0.0.1");
statistics = metrics.getValue();
assertEquals(
"Did not count block as local after setting hostname: "
+ statistics.get(HdfsLocalityReporter.LOCALITY_BYTES_LOCAL),
long_bytes, statistics.get(HdfsLocalityReporter.LOCALITY_BYTES_LOCAL));
}
}
final long long_bytes = Long.SIZE / Byte.SIZE;
// no locality because hostname not set
factory.setHost("bogus");
statistics = metrics.getValue();
assertEquals("Wrong number of total bytes counted: " + statistics.get(HdfsLocalityReporter.LOCALITY_BYTES_TOTAL),
long_bytes, statistics.get(HdfsLocalityReporter.LOCALITY_BYTES_TOTAL));
assertEquals("Wrong number of total blocks counted: " + statistics.get(HdfsLocalityReporter.LOCALITY_BLOCKS_TOTAL),
1, statistics.get(HdfsLocalityReporter.LOCALITY_BLOCKS_TOTAL));
assertEquals(
"Counted block as local when bad hostname set: " + statistics.get(HdfsLocalityReporter.LOCALITY_BLOCKS_LOCAL),
0, statistics.get(HdfsLocalityReporter.LOCALITY_BLOCKS_LOCAL));
// set hostname and check again
factory.setHost("127.0.0.1");
statistics = metrics.getValue();
assertEquals(
"Did not count block as local after setting hostname: "
+ statistics.get(HdfsLocalityReporter.LOCALITY_BYTES_LOCAL),
long_bytes, statistics.get(HdfsLocalityReporter.LOCALITY_BYTES_LOCAL));
factory.close();
}
}

View File

@ -14,7 +14,6 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.solr.handler;
import java.io.IOException;
@ -145,14 +144,17 @@ public class TestHdfsBackupRestoreCore extends SolrCloudTestCase {
@AfterClass
public static void teardownClass() throws Exception {
System.clearProperty("solr.hdfs.home");
System.clearProperty("solr.hdfs.default.backup.path");
System.clearProperty("test.build.data");
System.clearProperty("test.cache.data");
IOUtils.closeQuietly(fs);
fs = null;
HdfsTestUtil.teardownClass(dfsCluster);
dfsCluster = null;
try {
HdfsTestUtil.teardownClass(dfsCluster);
} finally {
dfsCluster = null;
System.clearProperty("solr.hdfs.home");
System.clearProperty("solr.hdfs.default.backup.path");
System.clearProperty("test.build.data");
System.clearProperty("test.cache.data");
}
}
@Test

View File

@ -16,7 +16,6 @@
*/
package org.apache.solr.search;
import static org.apache.solr.update.processor.DistributingUpdateProcessorFactory.DISTRIB_UPDATE_PARAM;
import java.io.IOException;
@ -65,18 +64,14 @@ import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters;
BadHdfsThreadsFilter.class // hdfs currently leaks thread(s)
})
// TODO: longer term this should be combined with TestRecovery somehow ??
// commented out on: 24-Dec-2018 @LuceneTestCase.BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // 12-Jun-2018
public class TestRecoveryHdfs extends SolrTestCaseJ4 {
// means that we've seen the leader and have version info (i.e. we are a non-leader replica)
private static String FROM_LEADER = DistribPhase.FROMLEADER.toString();
private static int timeout=60; // acquire timeout in seconds. change this to a huge number when debugging to prevent threads from advancing.
private static MiniDFSCluster dfsCluster;
private static String hdfsUri;
private static FileSystem fs;
@BeforeClass
@ -100,16 +95,21 @@ public class TestRecoveryHdfs extends SolrTestCaseJ4 {
@AfterClass
public static void afterClass() throws Exception {
System.clearProperty("solr.ulog.dir");
System.clearProperty("test.build.data");
System.clearProperty("test.cache.data");
deleteCore();
IOUtils.closeQuietly(fs);
fs = null;
HdfsTestUtil.teardownClass(dfsCluster);
hdfsDataDir = null;
dfsCluster = null;
try {
deleteCore();
} finally {
try {
HdfsTestUtil.teardownClass(dfsCluster);
} finally {
dfsCluster = null;
hdfsDataDir = null;
System.clearProperty("solr.ulog.dir");
System.clearProperty("test.build.data");
System.clearProperty("test.cache.data");
}
}
}
@Test
@ -135,11 +135,9 @@ public class TestRecoveryHdfs extends SolrTestCaseJ4 {
assertTrue("Expected to find tlogs with a replication factor of 2", foundRep2);
}
@Test
public void testLogReplay() throws Exception {
try {
DirectUpdateHandler2.commitOnClose = false;
final Semaphore logReplay = new Semaphore(0);
final Semaphore logReplayFinish = new Semaphore(0);
@ -154,7 +152,6 @@ public class TestRecoveryHdfs extends SolrTestCaseJ4 {
UpdateLog.testing_logReplayFinishHook = logReplayFinish::release;
clearIndex();
assertU(commit());
@ -230,12 +227,10 @@ public class TestRecoveryHdfs extends SolrTestCaseJ4 {
UpdateLog.testing_logReplayHook = null;
UpdateLog.testing_logReplayFinishHook = null;
}
}
@Test
public void testBuffering() throws Exception {
DirectUpdateHandler2.commitOnClose = false;
final Semaphore logReplay = new Semaphore(0);
final Semaphore logReplayFinish = new Semaphore(0);
@ -250,7 +245,6 @@ public class TestRecoveryHdfs extends SolrTestCaseJ4 {
UpdateLog.testing_logReplayFinishHook = logReplayFinish::release;
SolrQueryRequest req = req();
UpdateHandler uhandler = req.getCore().getUpdateHandler();
UpdateLog ulog = uhandler.getUpdateLog();
@ -383,14 +377,11 @@ public class TestRecoveryHdfs extends SolrTestCaseJ4 {
req().close();
}
}
@Test
@Ignore("HDFS-3107: no truncate support yet")
public void testDropBuffered() throws Exception {
DirectUpdateHandler2.commitOnClose = false;
final Semaphore logReplay = new Semaphore(0);
final Semaphore logReplayFinish = new Semaphore(0);
@ -499,12 +490,8 @@ public class TestRecoveryHdfs extends SolrTestCaseJ4 {
+"]"
);
updateJ(jsonAdd(sdoc("id","C2", "_version_","302")), params(DISTRIB_UPDATE_PARAM,FROM_LEADER));
assertEquals(UpdateLog.State.ACTIVE, ulog.getState()); // leave each test method in a good state
} finally {
DirectUpdateHandler2.commitOnClose = true;
@ -513,13 +500,10 @@ public class TestRecoveryHdfs extends SolrTestCaseJ4 {
req().close();
}
}
@Test
public void testExistOldBufferLog() throws Exception {
DirectUpdateHandler2.commitOnClose = false;
SolrQueryRequest req = req();
@ -587,11 +571,8 @@ public class TestRecoveryHdfs extends SolrTestCaseJ4 {
req().close();
}
}
// make sure that on a restart, versions don't start too low
@Test
public void testVersionsOnRestart() throws Exception {
@ -616,7 +597,6 @@ public class TestRecoveryHdfs extends SolrTestCaseJ4 {
assertJQ(req("qt","/get", "getVersions","2")
,"/versions==[" + v2 + "," + v1a + "]"
);
}
// make sure that log isn't needlessly replayed after a clean close
@ -636,7 +616,6 @@ public class TestRecoveryHdfs extends SolrTestCaseJ4 {
UpdateLog.testing_logReplayFinishHook = () -> logReplayFinish.release();
SolrQueryRequest req = req();
UpdateHandler uhandler = req.getCore().getUpdateHandler();
UpdateLog ulog = uhandler.getUpdateLog();
@ -669,7 +648,6 @@ public class TestRecoveryHdfs extends SolrTestCaseJ4 {
}
}
private void addDocs(int nDocs, int start, LinkedList<Long> versions) throws Exception {
for (int i=0; i<nDocs; i++) {
versions.addFirst( addAndGetVersion( sdoc("id",Integer.toString(start + nDocs)) , null) );
@ -783,7 +761,6 @@ public class TestRecoveryHdfs extends SolrTestCaseJ4 {
// test that a partially written last tlog entry (that will cause problems for both reverse reading and for
// log replay) doesn't stop us from coming up, and from recovering the documents that were not cut off.
//
@Test
public void testTruncatedLog() throws Exception {
try {
@ -850,7 +827,6 @@ public class TestRecoveryHdfs extends SolrTestCaseJ4 {
}
}
//
// test that a corrupt tlog doesn't stop us from coming up
//
@ -912,8 +888,6 @@ public class TestRecoveryHdfs extends SolrTestCaseJ4 {
}
}
// in rare circumstances, two logs can be left uncapped (lacking a commit at the end signifying that all the content in the log was committed)
@Test
public void testRecoveryMultipleLogs() throws Exception {

View File

@ -37,13 +37,9 @@ import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters;
@ThreadLeakFilters(defaultFilters = true, filters = {
BadHdfsThreadsFilter.class // hdfs currently leaks thread(s)
})
// commented out on: 24-Dec-2018 @LuceneTestCase.BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // added 23-Aug-2018
public class TestHdfsUpdateLog extends SolrTestCaseJ4 {
private static MiniDFSCluster dfsCluster;
private static String hdfsUri;
private static FileSystem fs;
@BeforeClass
@ -69,22 +65,25 @@ public class TestHdfsUpdateLog extends SolrTestCaseJ4 {
@AfterClass
public static void afterClass() throws Exception {
System.clearProperty("solr.ulog.dir");
System.clearProperty("test.build.data");
System.clearProperty("test.cache.data");
deleteCore();
IOUtils.closeQuietly(fs);
fs = null;
HdfsTestUtil.teardownClass(dfsCluster);
hdfsDataDir = null;
dfsCluster = null;
try {
deleteCore();
} finally {
try {
HdfsTestUtil.teardownClass(dfsCluster);
} finally {
hdfsDataDir = null;
dfsCluster = null;
System.clearProperty("solr.ulog.dir");
System.clearProperty("test.build.data");
System.clearProperty("test.cache.data");
}
}
}
@Test
//28-June-2018 @BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // 21-May-2018
public void testFSThreadSafety() throws Exception {
final SolrQueryRequest req = req();
final UpdateHandler uhandler = req.getCore().getUpdateHandler();
((DirectUpdateHandler2) uhandler).getCommitTracker().setTimeUpperBound(100);
@ -131,15 +130,11 @@ public class TestHdfsUpdateLog extends SolrTestCaseJ4 {
}
}
};
thread.start();
thread2.start();
thread.join();
thread2.join();
}
}