mirror of https://github.com/apache/lucene.git
SOLR-13330: Improve HDFS tests
Related JIRAs: * SOLR-11010 * SOLR-11381 * SOLR-12040 * SOLR-13297 Changes: * Consolidate hdfs configuration into HdfsTestUtil * Ensure socketTimeout long enough for HDFS tests * Ensure HdfsTestUtil.getClientConfiguration used in tests * Replace deprecated HDFS calls * Use try-with-resources to ensure closing of HDFS resources Signed-off-by: Kevin Risden <krisden@apache.org>
This commit is contained in:
parent
3ac07b8dfb
commit
cf828163bd
|
@ -158,6 +158,8 @@ Other Changes
|
|||
|
||||
* SOLR-13307: Ensure HDFS tests clear System properties they set (Kevin Risden)
|
||||
|
||||
* SOLR-13330: Improve HDFS tests (Kevin Risden)
|
||||
|
||||
================== 8.0.0 ==================
|
||||
|
||||
Consult the LUCENE_CHANGES.txt file for additional, low level, changes in this release.
|
||||
|
|
|
@ -531,7 +531,7 @@ public class HdfsDirectoryFactory extends CachingDirectoryFactory implements Sol
|
|||
boolean accept = false;
|
||||
String pathName = path.getName();
|
||||
try {
|
||||
accept = fs.isDirectory(path) && !path.equals(currentIndexDirPath) &&
|
||||
accept = fs.getFileStatus(path).isDirectory() && !path.equals(currentIndexDirPath) &&
|
||||
(pathName.equals("index") || pathName.matches(INDEX_W_TIMESTAMP_REGEX));
|
||||
} catch (IOException e) {
|
||||
log.error("Error checking if path {} is an old index directory, caused by: {}", path, e);
|
||||
|
|
|
@ -49,8 +49,8 @@
|
|||
<bool name="solr.hdfs.blockcache.enabled">${solr.hdfs.blockcache.enabled:true}</bool>
|
||||
<bool name="solr.hdfs.blockcache.global">${solr.hdfs.blockcache.global:true}</bool>
|
||||
<bool name="solr.hdfs.blockcache.write.enabled">${solr.hdfs.blockcache.write.enabled:false}</bool>
|
||||
<int name="solr.hdfs.blockcache.blocksperbank">10</int>
|
||||
<int name="solr.hdfs.blockcache.slab.count">1</int>
|
||||
<int name="solr.hdfs.blockcache.blocksperbank">${solr.hdfs.blockcache.blocksperbank:10}</int>
|
||||
<int name="solr.hdfs.blockcache.slab.count">${solr.hdfs.blockcache.slab.count:1}</int>
|
||||
</directoryFactory>
|
||||
|
||||
<schemaFactory class="ClassicIndexSchemaFactory"/>
|
||||
|
|
|
@ -49,8 +49,8 @@
|
|||
<bool name="solr.hdfs.blockcache.enabled">${solr.hdfs.blockcache.enabled:true}</bool>
|
||||
<bool name="solr.hdfs.blockcache.global">${solr.hdfs.blockcache.global:true}</bool>
|
||||
<bool name="solr.hdfs.blockcache.write.enabled">${solr.hdfs.blockcache.write.enabled:false}</bool>
|
||||
<int name="solr.hdfs.blockcache.blocksperbank">10</int>
|
||||
<int name="solr.hdfs.blockcache.slab.count">1</int>
|
||||
<int name="solr.hdfs.blockcache.blocksperbank">${solr.hdfs.blockcache.blocksperbank:10}</int>
|
||||
<int name="solr.hdfs.blockcache.slab.count">${solr.hdfs.blockcache.slab.count:1}</int>
|
||||
</directoryFactory>
|
||||
|
||||
<schemaFactory class="ClassicIndexSchemaFactory"/>
|
||||
|
|
|
@ -52,13 +52,10 @@ public class MoveReplicaHDFSFailoverTest extends SolrCloudTestCase {
|
|||
.addConfig("conf1", TEST_PATH().resolve("configsets").resolve("cloud-dynamic").resolve("conf"))
|
||||
.configure();
|
||||
|
||||
System.setProperty("solr.hdfs.blockcache.enabled", "false");
|
||||
dfsCluster = HdfsTestUtil.setupClass(createTempDir().toFile().getAbsolutePath());
|
||||
|
||||
ZkConfigManager configManager = new ZkConfigManager(zkClient());
|
||||
configManager.uploadConfigDir(configset("cloud-hdfs"), "conf1");
|
||||
|
||||
System.setProperty("solr.hdfs.home", HdfsTestUtil.getDataDir(dfsCluster, "data"));
|
||||
}
|
||||
|
||||
@AfterClass
|
||||
|
@ -70,8 +67,6 @@ public class MoveReplicaHDFSFailoverTest extends SolrCloudTestCase {
|
|||
HdfsTestUtil.teardownClass(dfsCluster);
|
||||
} finally {
|
||||
dfsCluster = null;
|
||||
System.clearProperty("solr.hdfs.home");
|
||||
System.clearProperty("solr.hdfs.blockcache.enabled");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -39,8 +39,6 @@ public class MoveReplicaHDFSTest extends MoveReplicaTest {
|
|||
|
||||
@BeforeClass
|
||||
public static void setupClass() throws Exception {
|
||||
System.setProperty("solr.hdfs.blockcache.blocksperbank", "512");
|
||||
System.setProperty("tests.hdfs.numdatanodes", "1");
|
||||
dfsCluster = HdfsTestUtil.setupClass(createTempDir().toFile().getAbsolutePath());
|
||||
}
|
||||
|
||||
|
@ -50,8 +48,6 @@ public class MoveReplicaHDFSTest extends MoveReplicaTest {
|
|||
HdfsTestUtil.teardownClass(dfsCluster);
|
||||
} finally {
|
||||
dfsCluster = null;
|
||||
System.clearProperty("solr.hdfs.blockcache.blocksperbank");
|
||||
System.clearProperty("tests.hdfs.numdatanodes");
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -31,7 +31,6 @@ import org.apache.solr.common.cloud.Slice;
|
|||
import org.apache.solr.common.cloud.ZkCoreNodeProps;
|
||||
import org.apache.solr.common.cloud.ZkStateReader;
|
||||
import org.apache.solr.common.params.ModifiableSolrParams;
|
||||
import org.apache.solr.common.params.SolrParams;
|
||||
import org.apache.solr.common.util.ExecutorUtil;
|
||||
import org.apache.solr.common.util.TimeSource;
|
||||
import org.apache.solr.core.SolrCore;
|
||||
|
@ -42,6 +41,7 @@ import org.junit.Test;
|
|||
|
||||
import java.io.IOException;
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
import java.util.Random;
|
||||
import java.util.concurrent.SynchronousQueue;
|
||||
import java.util.concurrent.ThreadPoolExecutor;
|
||||
|
@ -54,23 +54,19 @@ import java.util.concurrent.TimeUnit;
|
|||
@Slow
|
||||
@SuppressSSL(bugUrl = "https://issues.apache.org/jira/browse/SOLR-5776")
|
||||
public class UnloadDistributedZkTest extends BasicDistributedZkTest {
|
||||
|
||||
protected String getSolrXml() {
|
||||
return "solr.xml";
|
||||
}
|
||||
|
||||
public UnloadDistributedZkTest() {
|
||||
super();
|
||||
}
|
||||
|
||||
protected String getSolrXml() {
|
||||
return "solr.xml";
|
||||
}
|
||||
|
||||
@Test
|
||||
//28-June-2018 @BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // 21-May-2018
|
||||
// commented out on: 24-Dec-2018 @BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // added 20-Sep-2018
|
||||
public void test() throws Exception {
|
||||
|
||||
testCoreUnloadAndLeaders(); // long
|
||||
testUnloadLotsOfCores(); // long
|
||||
|
||||
|
||||
testUnloadShardAndCollection();
|
||||
}
|
||||
|
||||
|
@ -78,9 +74,9 @@ public class UnloadDistributedZkTest extends BasicDistributedZkTest {
|
|||
boolean shouldBePresent, int expectedSliceCount) throws Exception {
|
||||
final TimeOut timeout = new TimeOut(45, TimeUnit.SECONDS, TimeSource.NANO_TIME);
|
||||
Boolean isPresent = null; // null meaning "don't know"
|
||||
while (null == isPresent || shouldBePresent != isPresent.booleanValue()) {
|
||||
while (null == isPresent || shouldBePresent != isPresent) {
|
||||
final DocCollection docCollection = getCommonCloudSolrClient().getZkStateReader().getClusterState().getCollectionOrNull(collectionName);
|
||||
final Collection<Slice> slices = (docCollection != null) ? docCollection.getSlices() : null;
|
||||
final Collection<Slice> slices = (docCollection != null) ? docCollection.getSlices() : Collections.emptyList();
|
||||
if (timeout.hasTimedOut()) {
|
||||
printLayout();
|
||||
fail("checkCoreNamePresenceAndSliceCount failed:"
|
||||
|
@ -88,14 +84,12 @@ public class UnloadDistributedZkTest extends BasicDistributedZkTest {
|
|||
+" shouldBePresent="+shouldBePresent+" isPresent="+isPresent
|
||||
+" expectedSliceCount="+expectedSliceCount+" actualSliceCount="+slices.size());
|
||||
}
|
||||
if (expectedSliceCount == (slices == null ? 0 : slices.size())) {
|
||||
if (expectedSliceCount == slices.size()) {
|
||||
isPresent = false;
|
||||
if (slices != null) {
|
||||
for (Slice slice : slices) {
|
||||
for (Replica replica : slice.getReplicas()) {
|
||||
if (coreName.equals(replica.get("core"))) {
|
||||
isPresent = true;
|
||||
}
|
||||
for (Slice slice : slices) {
|
||||
for (Replica replica : slice.getReplicas()) {
|
||||
if (coreName.equals(replica.get("core"))) {
|
||||
isPresent = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -105,7 +99,6 @@ public class UnloadDistributedZkTest extends BasicDistributedZkTest {
|
|||
}
|
||||
|
||||
private void testUnloadShardAndCollection() throws Exception{
|
||||
|
||||
final int numShards = 2;
|
||||
|
||||
final String collection = "test_unload_shard_and_collection";
|
||||
|
@ -285,7 +278,6 @@ public class UnloadDistributedZkTest extends BasicDistributedZkTest {
|
|||
|
||||
Unload unloadCmd = new Unload(false);
|
||||
unloadCmd.setCoreName(leaderProps.getCoreName());
|
||||
SolrParams p = (ModifiableSolrParams) unloadCmd.getParams();
|
||||
collectionClient.request(unloadCmd);
|
||||
}
|
||||
tries = 50;
|
||||
|
@ -335,7 +327,6 @@ public class UnloadDistributedZkTest extends BasicDistributedZkTest {
|
|||
assertEquals(found1, found3);
|
||||
assertEquals(found3, found4);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
private void testUnloadLotsOfCores() throws Exception {
|
||||
|
@ -343,7 +334,7 @@ public class UnloadDistributedZkTest extends BasicDistributedZkTest {
|
|||
try (final HttpSolrClient adminClient = (HttpSolrClient) jetty.newClient(15000, 60000)) {
|
||||
int numReplicas = atLeast(3);
|
||||
ThreadPoolExecutor executor = new ExecutorUtil.MDCAwareThreadPoolExecutor(0, Integer.MAX_VALUE,
|
||||
5, TimeUnit.SECONDS, new SynchronousQueue<Runnable>(),
|
||||
5, TimeUnit.SECONDS, new SynchronousQueue<>(),
|
||||
new DefaultSolrThreadFactory("testExecutor"));
|
||||
try {
|
||||
// create the cores
|
||||
|
@ -353,7 +344,7 @@ public class UnloadDistributedZkTest extends BasicDistributedZkTest {
|
|||
}
|
||||
|
||||
executor = new ExecutorUtil.MDCAwareThreadPoolExecutor(0, Integer.MAX_VALUE, 5,
|
||||
TimeUnit.SECONDS, new SynchronousQueue<Runnable>(),
|
||||
TimeUnit.SECONDS, new SynchronousQueue<>(),
|
||||
new DefaultSolrThreadFactory("testExecutor"));
|
||||
try {
|
||||
for (int j = 0; j < numReplicas; j++) {
|
||||
|
@ -374,5 +365,4 @@ public class UnloadDistributedZkTest extends BasicDistributedZkTest {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -38,8 +38,6 @@ public class HdfsCollectionsAPIDistributedZkTest extends CollectionsAPIDistribut
|
|||
|
||||
@BeforeClass
|
||||
public static void setupClass() throws Exception {
|
||||
System.setProperty("solr.hdfs.blockcache.blocksperbank", "512");
|
||||
System.setProperty("tests.hdfs.numdatanodes", "1");
|
||||
dfsCluster = HdfsTestUtil.setupClass(createTempDir().toFile().getAbsolutePath());
|
||||
}
|
||||
|
||||
|
@ -49,8 +47,6 @@ public class HdfsCollectionsAPIDistributedZkTest extends CollectionsAPIDistribut
|
|||
HdfsTestUtil.teardownClass(dfsCluster);
|
||||
} finally {
|
||||
dfsCluster = null;
|
||||
System.clearProperty("solr.hdfs.blockcache.blocksperbank");
|
||||
System.clearProperty("tests.hdfs.numdatanodes");
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -112,7 +112,6 @@ public class TestHdfsCloudBackupRestore extends AbstractCloudBackupRestoreTestCa
|
|||
try {
|
||||
URI uri = new URI(hdfsUri);
|
||||
Configuration conf = HdfsTestUtil.getClientConfiguration(dfsCluster);
|
||||
conf.setBoolean("fs.hdfs.impl.disable.cache", true);
|
||||
fs = FileSystem.get(uri, conf);
|
||||
|
||||
if (fs instanceof DistributedFileSystem) {
|
||||
|
|
|
@ -38,9 +38,6 @@ public class HdfsAutoAddReplicasIntegrationTest extends AutoAddReplicasIntegrati
|
|||
|
||||
@BeforeClass
|
||||
public static void setupClass() throws Exception {
|
||||
System.setProperty("solr.hdfs.blockcache.global", "true");
|
||||
System.setProperty("solr.hdfs.blockcache.blocksperbank", "512");
|
||||
System.setProperty("tests.hdfs.numdatanodes", "1");
|
||||
dfsCluster = HdfsTestUtil.setupClass(createTempDir().toFile().getAbsolutePath());
|
||||
}
|
||||
|
||||
|
@ -50,9 +47,6 @@ public class HdfsAutoAddReplicasIntegrationTest extends AutoAddReplicasIntegrati
|
|||
HdfsTestUtil.teardownClass(dfsCluster);
|
||||
} finally {
|
||||
dfsCluster = null;
|
||||
System.clearProperty("solr.hdfs.blockcache.global");
|
||||
System.clearProperty("solr.hdfs.blockcache.blocksperbank");
|
||||
System.clearProperty("tests.hdfs.numdatanodes");
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -43,13 +43,10 @@ public class HDFSCollectionsAPITest extends SolrCloudTestCase {
|
|||
configureCluster(2)
|
||||
.configure();
|
||||
|
||||
System.setProperty("solr.hdfs.blockcache.enabled", "false");
|
||||
dfsCluster = HdfsTestUtil.setupClass(createTempDir().toFile().getAbsolutePath());
|
||||
|
||||
ZkConfigManager configManager = new ZkConfigManager(zkClient());
|
||||
configManager.uploadConfigDir(configset("cloud-hdfs"), "conf1");
|
||||
|
||||
System.setProperty("solr.hdfs.home", HdfsTestUtil.getDataDir(dfsCluster, "data"));
|
||||
}
|
||||
|
||||
|
||||
|
@ -62,8 +59,6 @@ public class HDFSCollectionsAPITest extends SolrCloudTestCase {
|
|||
HdfsTestUtil.teardownClass(dfsCluster);
|
||||
} finally {
|
||||
dfsCluster = null;
|
||||
System.clearProperty("solr.hdfs.blockcache.enabled");
|
||||
System.clearProperty("solr.hdfs.home");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -38,7 +38,6 @@ public class HdfsBasicDistributedZkTest extends BasicDistributedZkTest {
|
|||
|
||||
@BeforeClass
|
||||
public static void setupClass() throws Exception {
|
||||
System.setProperty("tests.hdfs.numdatanodes", "1");
|
||||
dfsCluster = HdfsTestUtil.setupClass(createTempDir().toFile().getAbsolutePath());
|
||||
}
|
||||
|
||||
|
@ -53,7 +52,6 @@ public class HdfsBasicDistributedZkTest extends BasicDistributedZkTest {
|
|||
HdfsTestUtil.teardownClass(dfsCluster);
|
||||
} finally {
|
||||
dfsCluster = null;
|
||||
System.clearProperty("tests.hdfs.numdatanodes");
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -72,8 +72,7 @@ public class HdfsRecoverLeaseTest extends SolrTestCaseJ4 {
|
|||
|
||||
URI uri = dfsCluster.getURI();
|
||||
Path path = new Path(uri);
|
||||
Configuration conf = new Configuration();
|
||||
conf.setBoolean("fs.hdfs.impl.disable.cache", true);
|
||||
Configuration conf = HdfsTestUtil.getClientConfiguration(dfsCluster);
|
||||
FileSystem fs1 = FileSystem.get(path.toUri(), conf);
|
||||
Path testFile = new Path(uri.toString() + "/testfile");
|
||||
FSDataOutputStream out = fs1.create(testFile);
|
||||
|
@ -131,8 +130,7 @@ public class HdfsRecoverLeaseTest extends SolrTestCaseJ4 {
|
|||
|
||||
final URI uri = dfsCluster.getURI();
|
||||
final Path path = new Path(uri);
|
||||
final Configuration conf = new Configuration();
|
||||
conf.setBoolean("fs.hdfs.impl.disable.cache", true);
|
||||
final Configuration conf = HdfsTestUtil.getClientConfiguration(dfsCluster);
|
||||
|
||||
// n threads create files
|
||||
class WriterThread extends Thread {
|
||||
|
|
|
@ -18,35 +18,30 @@ package org.apache.solr.cloud.hdfs;
|
|||
|
||||
import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters;
|
||||
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.apache.lucene.util.LuceneTestCase.BadApple;
|
||||
import org.apache.lucene.util.LuceneTestCase.Nightly;
|
||||
import org.apache.lucene.util.LuceneTestCase.Slow;
|
||||
import org.apache.solr.cloud.RecoveryZkTest;
|
||||
import org.apache.solr.common.cloud.ZkConfigManager;
|
||||
import org.apache.solr.util.BadHdfsThreadsFilter;
|
||||
import org.apache.solr.util.LogLevel;
|
||||
import org.junit.AfterClass;
|
||||
import org.junit.BeforeClass;
|
||||
|
||||
@Slow
|
||||
@LuceneTestCase.BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028")
|
||||
//@Nightly
|
||||
@BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028")
|
||||
@Nightly
|
||||
@ThreadLeakFilters(defaultFilters = true, filters = {
|
||||
BadHdfsThreadsFilter.class // hdfs currently leaks thread(s)
|
||||
})
|
||||
@LogLevel("org.apache.solr.update.HdfsTransactionLog=DEBUG")
|
||||
public class HdfsRecoveryZkTest extends RecoveryZkTest {
|
||||
|
||||
private static MiniDFSCluster dfsCluster;
|
||||
|
||||
@BeforeClass
|
||||
public static void setupClass() throws Exception {
|
||||
dfsCluster = HdfsTestUtil.setupClass(createTempDir().toFile().getAbsolutePath());
|
||||
System.setProperty("solr.hdfs.blockcache.blocksperbank", "2048");
|
||||
|
||||
ZkConfigManager configManager = new ZkConfigManager(zkClient());
|
||||
configManager.uploadConfigDir(configset("cloud-hdfs"), "conf");
|
||||
|
||||
System.setProperty("solr.hdfs.home", HdfsTestUtil.getDataDir(dfsCluster, "data"));
|
||||
}
|
||||
|
||||
@AfterClass
|
||||
|
@ -58,10 +53,7 @@ public class HdfsRecoveryZkTest extends RecoveryZkTest {
|
|||
HdfsTestUtil.teardownClass(dfsCluster);
|
||||
} finally {
|
||||
dfsCluster = null;
|
||||
System.clearProperty("solr.hdfs.blockcache.blocksperbank");
|
||||
System.clearProperty("solr.hdfs.home");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -16,8 +16,6 @@
|
|||
*/
|
||||
package org.apache.solr.cloud.hdfs;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||
import org.apache.lucene.util.LuceneTestCase.Slow;
|
||||
import org.apache.solr.cloud.RestartWhileUpdatingTest;
|
||||
|
@ -34,16 +32,15 @@ import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters;
|
|||
BadHdfsThreadsFilter.class // hdfs currently leaks thread(s)
|
||||
})
|
||||
public class HdfsRestartWhileUpdatingTest extends RestartWhileUpdatingTest {
|
||||
private static MiniDFSCluster dfsCluster;
|
||||
|
||||
public HdfsRestartWhileUpdatingTest() throws Exception {
|
||||
super();
|
||||
}
|
||||
|
||||
private static MiniDFSCluster dfsCluster;
|
||||
|
||||
@BeforeClass
|
||||
public static void setupClass() throws Exception {
|
||||
dfsCluster = HdfsTestUtil.setupClass(createTempDir().toFile().getAbsolutePath());
|
||||
System.setProperty("solr.hdfs.blockcache.blocksperbank", "2048");
|
||||
}
|
||||
|
||||
@AfterClass
|
||||
|
@ -52,12 +49,11 @@ public class HdfsRestartWhileUpdatingTest extends RestartWhileUpdatingTest {
|
|||
HdfsTestUtil.teardownClass(dfsCluster);
|
||||
} finally {
|
||||
dfsCluster = null;
|
||||
System.clearProperty("solr.hdfs.blockcache.blocksperbank");
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
protected String getDataDir(String dataDir) throws IOException {
|
||||
protected String getDataDir(String dataDir) {
|
||||
return HdfsTestUtil.getDataDir(dfsCluster, dataDir);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -17,9 +17,9 @@
|
|||
package org.apache.solr.cloud.hdfs;
|
||||
|
||||
import java.io.File;
|
||||
import java.io.IOException;
|
||||
import java.lang.invoke.MethodHandles;
|
||||
import java.net.URI;
|
||||
import java.util.Enumeration;
|
||||
import java.util.Locale;
|
||||
import java.util.Map;
|
||||
import java.util.Timer;
|
||||
|
@ -47,6 +47,8 @@ import org.apache.solr.util.HdfsUtil;
|
|||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import static org.apache.lucene.util.LuceneTestCase.random;
|
||||
|
||||
public class HdfsTestUtil {
|
||||
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
||||
|
||||
|
@ -99,49 +101,49 @@ public class HdfsTestUtil {
|
|||
|
||||
if (!HA_TESTING_ENABLED) haTesting = false;
|
||||
|
||||
int dataNodes = Integer.getInteger("tests.hdfs.numdatanodes", 2);
|
||||
|
||||
Configuration conf = new Configuration();
|
||||
conf.set("dfs.block.access.token.enable", "false");
|
||||
conf.set("dfs.permissions.enabled", "false");
|
||||
conf.set("hadoop.security.authentication", "simple");
|
||||
Configuration conf = getBasicConfiguration(new Configuration());
|
||||
conf.set("hdfs.minidfs.basedir", dir + File.separator + "hdfsBaseDir");
|
||||
conf.set("dfs.namenode.name.dir", dir + File.separator + "nameNodeNameDir");
|
||||
conf.setBoolean("fs.hdfs.impl.disable.cache", true);
|
||||
// Disable metrics logging for HDFS
|
||||
conf.setInt("dfs.namenode.metrics.logger.period.seconds", 0);
|
||||
conf.setInt("dfs.datanode.metrics.logger.period.seconds", 0);
|
||||
|
||||
System.setProperty("test.build.data", dir + File.separator + "hdfs" + File.separator + "build");
|
||||
System.setProperty("test.cache.data", dir + File.separator + "hdfs" + File.separator + "cache");
|
||||
System.setProperty("solr.lock.type", DirectoryFactory.LOCK_TYPE_HDFS);
|
||||
|
||||
System.setProperty("solr.hdfs.blockcache.global",
|
||||
System.getProperty("solr.hdfs.blockcache.global", Boolean.toString(LuceneTestCase.random().nextBoolean())));
|
||||
// test-files/solr/solr.xml sets this to be 15000. This isn't long enough for HDFS in some cases.
|
||||
System.setProperty("socketTimeout", "90000");
|
||||
|
||||
final MiniDFSCluster dfsCluster;
|
||||
|
||||
if (!haTesting) {
|
||||
dfsCluster = new MiniDFSCluster.Builder(conf).numDataNodes(dataNodes).format(true).build();
|
||||
|
||||
System.setProperty("solr.hdfs.home", getDataDir(dfsCluster, "solr_hdfs_home"));
|
||||
String blockcacheGlobal = System.getProperty("solr.hdfs.blockcache.global", Boolean.toString(random().nextBoolean()));
|
||||
System.setProperty("solr.hdfs.blockcache.global", blockcacheGlobal);
|
||||
// Limit memory usage for HDFS tests
|
||||
if(Boolean.parseBoolean(blockcacheGlobal)) {
|
||||
System.setProperty("solr.hdfs.blockcache.blocksperbank", "4096");
|
||||
} else {
|
||||
dfsCluster = new MiniDFSCluster.Builder(conf)
|
||||
.nnTopology(MiniDFSNNTopology.simpleHATopology()).numDataNodes(dataNodes)
|
||||
.build();
|
||||
|
||||
Configuration haConfig = getClientConfiguration(dfsCluster);
|
||||
|
||||
HdfsUtil.TEST_CONF = haConfig;
|
||||
System.setProperty("solr.hdfs.home", getDataDir(dfsCluster, "solr_hdfs_home"));
|
||||
System.setProperty("solr.hdfs.blockcache.blocksperbank", "512");
|
||||
System.setProperty("tests.hdfs.numdatanodes", "1");
|
||||
}
|
||||
|
||||
int dataNodes = Integer.getInteger("tests.hdfs.numdatanodes", 2);
|
||||
final MiniDFSCluster.Builder dfsClusterBuilder = new MiniDFSCluster.Builder(conf)
|
||||
.numDataNodes(dataNodes).format(true);
|
||||
if (haTesting) {
|
||||
dfsClusterBuilder.nnTopology(MiniDFSNNTopology.simpleHATopology());
|
||||
}
|
||||
MiniDFSCluster dfsCluster = dfsClusterBuilder.build();
|
||||
HdfsUtil.TEST_CONF = getClientConfiguration(dfsCluster);
|
||||
System.setProperty("solr.hdfs.home", getDataDir(dfsCluster, "solr_hdfs_home"));
|
||||
|
||||
dfsCluster.waitActive();
|
||||
|
||||
if (haTesting) dfsCluster.transitionToActive(0);
|
||||
|
||||
int rndMode = LuceneTestCase.random().nextInt(3);
|
||||
int rndMode = random().nextInt(3);
|
||||
if (safeModeTesting && rndMode == 1) {
|
||||
NameNodeAdapter.enterSafeMode(dfsCluster.getNameNode(), false);
|
||||
|
||||
int rnd = LuceneTestCase.random().nextInt(10000);
|
||||
int rnd = random().nextInt(10000);
|
||||
Timer timer = new Timer();
|
||||
timers.put(dfsCluster, timer);
|
||||
timer.schedule(new TimerTask() {
|
||||
|
@ -151,9 +153,8 @@ public class HdfsTestUtil {
|
|||
NameNodeAdapter.leaveSafeMode(dfsCluster.getNameNode());
|
||||
}
|
||||
}, rnd);
|
||||
|
||||
} else if (haTesting && rndMode == 2) {
|
||||
int rnd = LuceneTestCase.random().nextInt(30000);
|
||||
int rnd = random().nextInt(30000);
|
||||
Timer timer = new Timer();
|
||||
timers.put(dfsCluster, timer);
|
||||
timer.schedule(new TimerTask() {
|
||||
|
@ -176,7 +177,7 @@ public class HdfsTestUtil {
|
|||
URI uri = dfsCluster.getURI();
|
||||
Path hdfsDirPath = new Path(uri.toString() + "/solr/collection1/core_node1/data/tlog/tlog.0000000000000000000");
|
||||
// tran log already being created testing
|
||||
badTlogOutStreamFs = FileSystem.get(hdfsDirPath.toUri(), conf);
|
||||
badTlogOutStreamFs = FileSystem.get(hdfsDirPath.toUri(), getClientConfiguration(dfsCluster));
|
||||
badTlogOutStream = badTlogOutStreamFs.create(hdfsDirPath);
|
||||
}
|
||||
|
||||
|
@ -185,18 +186,23 @@ public class HdfsTestUtil {
|
|||
return dfsCluster;
|
||||
}
|
||||
|
||||
private static Configuration getBasicConfiguration(Configuration conf) {
|
||||
conf.setBoolean("dfs.block.access.token.enable", false);
|
||||
conf.setBoolean("dfs.permissions.enabled", false);
|
||||
conf.set("hadoop.security.authentication", "simple");
|
||||
conf.setBoolean("fs.hdfs.impl.disable.cache", true);
|
||||
return conf;
|
||||
}
|
||||
|
||||
public static Configuration getClientConfiguration(MiniDFSCluster dfsCluster) {
|
||||
Configuration conf = getBasicConfiguration(dfsCluster.getConfiguration(0));
|
||||
if (dfsCluster.getNameNodeInfos().length > 1) {
|
||||
Configuration conf = new Configuration();
|
||||
HATestUtil.setFailoverConfigurations(dfsCluster, conf);
|
||||
return conf;
|
||||
} else {
|
||||
return new Configuration();
|
||||
}
|
||||
return conf;
|
||||
}
|
||||
|
||||
public static void teardownClass(MiniDFSCluster dfsCluster) throws Exception {
|
||||
|
||||
if (badTlogOutStream != null) {
|
||||
IOUtils.closeQuietly(badTlogOutStream);
|
||||
}
|
||||
|
@ -205,16 +211,19 @@ public class HdfsTestUtil {
|
|||
IOUtils.closeQuietly(badTlogOutStreamFs);
|
||||
}
|
||||
|
||||
SolrTestCaseJ4.resetFactory();
|
||||
|
||||
try {
|
||||
try {
|
||||
SolrTestCaseJ4.resetFactory();
|
||||
} catch (Exception e) {
|
||||
log.error("Exception trying to reset solr.directoryFactory", e);
|
||||
}
|
||||
if (dfsCluster != null) {
|
||||
Timer timer = timers.remove(dfsCluster);
|
||||
if (timer != null) {
|
||||
timer.cancel();
|
||||
}
|
||||
try {
|
||||
dfsCluster.shutdown();
|
||||
dfsCluster.shutdown(true);
|
||||
} catch (Error e) {
|
||||
// Added in SOLR-7134
|
||||
// Rarely, this can fail to either a NullPointerException
|
||||
|
@ -224,16 +233,27 @@ public class HdfsTestUtil {
|
|||
}
|
||||
}
|
||||
} finally {
|
||||
System.clearProperty("solr.lock.type");
|
||||
System.clearProperty("test.build.data");
|
||||
System.clearProperty("test.cache.data");
|
||||
System.clearProperty("solr.hdfs.home");
|
||||
System.clearProperty("solr.hdfs.blockcache.global");
|
||||
|
||||
System.clearProperty("socketTimeout");
|
||||
|
||||
System.clearProperty("tests.hdfs.numdatanodes");
|
||||
|
||||
System.clearProperty("solr.lock.type");
|
||||
|
||||
// Clear "solr.hdfs." system properties
|
||||
Enumeration<?> propertyNames = System.getProperties().propertyNames();
|
||||
while(propertyNames.hasMoreElements()) {
|
||||
String propertyName = String.valueOf(propertyNames.nextElement());
|
||||
if(propertyName.startsWith("solr.hdfs.")) {
|
||||
System.clearProperty(propertyName);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public static String getDataDir(MiniDFSCluster dfsCluster, String dataDir)
|
||||
throws IOException {
|
||||
public static String getDataDir(MiniDFSCluster dfsCluster, String dataDir) {
|
||||
if (dataDir == null) {
|
||||
return null;
|
||||
}
|
||||
|
@ -250,7 +270,7 @@ public class HdfsTestUtil {
|
|||
return "hdfs://" + logicalName;
|
||||
} else {
|
||||
URI uri = dfsCluster.getURI(0);
|
||||
return uri.toString() ;
|
||||
return uri.toString();
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -25,9 +25,7 @@ import org.apache.hadoop.hdfs.DistributedFileSystem;
|
|||
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||
import org.apache.solr.SolrTestCaseJ4;
|
||||
import org.apache.solr.util.BadHdfsThreadsFilter;
|
||||
import org.junit.After;
|
||||
import org.junit.AfterClass;
|
||||
import org.junit.Before;
|
||||
import org.junit.BeforeClass;
|
||||
import org.junit.Test;
|
||||
|
||||
|
@ -37,7 +35,6 @@ import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters;
|
|||
BadHdfsThreadsFilter.class // hdfs currently leaks thread(s)
|
||||
})
|
||||
public class HdfsThreadLeakTest extends SolrTestCaseJ4 {
|
||||
|
||||
private static MiniDFSCluster dfsCluster;
|
||||
|
||||
@BeforeClass
|
||||
|
@ -47,38 +44,26 @@ public class HdfsThreadLeakTest extends SolrTestCaseJ4 {
|
|||
|
||||
@AfterClass
|
||||
public static void afterClass() throws Exception {
|
||||
HdfsTestUtil.teardownClass(dfsCluster);
|
||||
dfsCluster = null;
|
||||
}
|
||||
|
||||
@Before
|
||||
public void setUp() throws Exception {
|
||||
super.setUp();
|
||||
}
|
||||
|
||||
@After
|
||||
public void tearDown() throws Exception {
|
||||
super.tearDown();
|
||||
try {
|
||||
HdfsTestUtil.teardownClass(dfsCluster);
|
||||
} finally {
|
||||
dfsCluster = null;
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testBasic() throws IOException {
|
||||
String uri = HdfsTestUtil.getURI(dfsCluster);
|
||||
Path path = new Path(uri);
|
||||
Configuration conf = new Configuration();
|
||||
conf.setBoolean("fs.hdfs.impl.disable.cache", true);
|
||||
FileSystem fs = FileSystem.get(path.toUri(), conf);
|
||||
Path testFile = new Path(uri.toString() + "/testfile");
|
||||
FSDataOutputStream out = fs.create(testFile);
|
||||
|
||||
out.write(5);
|
||||
out.hflush();
|
||||
out.close();
|
||||
Configuration conf = HdfsTestUtil.getClientConfiguration(dfsCluster);
|
||||
try(FileSystem fs = FileSystem.get(path.toUri(), conf)) {
|
||||
Path testFile = new Path(uri + "/testfile");
|
||||
try(FSDataOutputStream out = fs.create(testFile)) {
|
||||
out.write(5);
|
||||
out.hflush();
|
||||
}
|
||||
|
||||
((DistributedFileSystem) fs).recoverLease(testFile);
|
||||
|
||||
|
||||
fs.close();
|
||||
((DistributedFileSystem) fs).recoverLease(testFile);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -16,8 +16,6 @@
|
|||
*/
|
||||
package org.apache.solr.cloud.hdfs;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||
import org.apache.lucene.util.LuceneTestCase.Slow;
|
||||
import org.apache.solr.cloud.TlogReplayBufferedWhileIndexingTest;
|
||||
|
@ -42,7 +40,6 @@ public class HdfsTlogReplayBufferedWhileIndexingTest extends TlogReplayBufferedW
|
|||
|
||||
@BeforeClass
|
||||
public static void setupClass() throws Exception {
|
||||
System.setProperty("solr.hdfs.blockcache.blocksperbank", "2048");
|
||||
dfsCluster = HdfsTestUtil.setupClass(createTempDir().toFile().getAbsolutePath());
|
||||
}
|
||||
|
||||
|
@ -52,12 +49,11 @@ public class HdfsTlogReplayBufferedWhileIndexingTest extends TlogReplayBufferedW
|
|||
HdfsTestUtil.teardownClass(dfsCluster);
|
||||
} finally {
|
||||
dfsCluster = null;
|
||||
System.clearProperty("solr.hdfs.blockcache.blocksperbank");
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
protected String getDataDir(String dataDir) throws IOException {
|
||||
protected String getDataDir(String dataDir) {
|
||||
return HdfsTestUtil.getDataDir(dfsCluster, dataDir);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -16,8 +16,6 @@
|
|||
*/
|
||||
package org.apache.solr.cloud.hdfs;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||
import org.apache.lucene.util.LuceneTestCase.Slow;
|
||||
import org.apache.solr.cloud.UnloadDistributedZkTest;
|
||||
|
@ -51,7 +49,7 @@ public class HdfsUnloadDistributedZkTest extends UnloadDistributedZkTest {
|
|||
}
|
||||
|
||||
@Override
|
||||
protected String getDataDir(String dataDir) throws IOException {
|
||||
protected String getDataDir(String dataDir) {
|
||||
return HdfsTestUtil.getDataDir(dfsCluster, dataDir);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -141,10 +141,9 @@ public class HdfsWriteToMultipleCollectionsTest extends BasicDistributedZkTest {
|
|||
Directory dir = factory.get(core.getDataDir(), null, null);
|
||||
try {
|
||||
long dataDirSize = factory.size(dir);
|
||||
FileSystem fileSystem = null;
|
||||
|
||||
fileSystem = FileSystem.newInstance(
|
||||
new Path(core.getDataDir()).toUri(), new Configuration());
|
||||
Configuration conf = HdfsTestUtil.getClientConfiguration(dfsCluster);
|
||||
FileSystem fileSystem = FileSystem.newInstance(
|
||||
new Path(core.getDataDir()).toUri(), conf);
|
||||
long size = fileSystem.getContentSummary(
|
||||
new Path(core.getDataDir())).getLength();
|
||||
assertEquals(size, dataDirSize);
|
||||
|
|
|
@ -229,7 +229,6 @@ public class StressHdfsTest extends BasicDistributedZkTest {
|
|||
// check that all dirs are gone
|
||||
for (String dataDir : dataDirs) {
|
||||
Configuration conf = HdfsTestUtil.getClientConfiguration(dfsCluster);
|
||||
conf.setBoolean("fs.hdfs.impl.disable.cache", true);
|
||||
try(FileSystem fs = FileSystem.get(new URI(HdfsTestUtil.getURI(dfsCluster)), conf)) {
|
||||
assertFalse(
|
||||
"Data directory exists after collection removal : " + dataDir,
|
||||
|
|
|
@ -16,6 +16,8 @@
|
|||
*/
|
||||
package org.apache.solr.core;
|
||||
|
||||
import java.io.FileNotFoundException;
|
||||
import java.io.IOException;
|
||||
import java.nio.file.Path;
|
||||
import java.text.SimpleDateFormat;
|
||||
import java.util.Date;
|
||||
|
@ -24,7 +26,6 @@ import java.util.Locale;
|
|||
import java.util.Map;
|
||||
import java.util.Random;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||
import org.apache.lucene.store.Directory;
|
||||
|
@ -55,7 +56,6 @@ public class HdfsDirectoryFactoryTest extends SolrTestCaseJ4 {
|
|||
|
||||
@BeforeClass
|
||||
public static void setupClass() throws Exception {
|
||||
System.setProperty("solr.hdfs.blockcache.blocksperbank", "1024");
|
||||
dfsCluster = HdfsTestUtil.setupClass(createTempDir().toFile().getAbsolutePath(), false);
|
||||
}
|
||||
|
||||
|
@ -65,41 +65,42 @@ public class HdfsDirectoryFactoryTest extends SolrTestCaseJ4 {
|
|||
HdfsTestUtil.teardownClass(dfsCluster);
|
||||
} finally {
|
||||
dfsCluster = null;
|
||||
System.clearProperty("solr.hdfs.home");
|
||||
System.clearProperty(HdfsDirectoryFactory.HDFS_HOME);
|
||||
System.clearProperty(HdfsDirectoryFactory.CONFIG_DIRECTORY);
|
||||
System.clearProperty(HdfsDirectoryFactory.BLOCKCACHE_ENABLED);
|
||||
System.clearProperty(HdfsDirectoryFactory.NRTCACHINGDIRECTORY_MAXMERGESIZEMB);
|
||||
System.clearProperty("solr.hdfs.blockcache.blocksperbank");
|
||||
System.clearProperty(HdfsDirectoryFactory.LOCALITYMETRICS_ENABLED);
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testInitArgsOrSysPropConfig() throws Exception {
|
||||
try(HdfsDirectoryFactory hdfsFactory = new HdfsDirectoryFactory()) {
|
||||
|
||||
// test sys prop config
|
||||
System.setProperty("solr.hdfs.home", HdfsTestUtil.getURI(dfsCluster) + "/solr1");
|
||||
System.setProperty(HdfsDirectoryFactory.HDFS_HOME, HdfsTestUtil.getURI(dfsCluster) + "/solr1");
|
||||
hdfsFactory.init(new NamedList<>());
|
||||
String dataHome = hdfsFactory.getDataHome(new MockCoreDescriptor());
|
||||
|
||||
assertTrue(dataHome.endsWith("/solr1/mock/data"));
|
||||
|
||||
System.clearProperty("solr.hdfs.home");
|
||||
System.clearProperty(HdfsDirectoryFactory.HDFS_HOME);
|
||||
|
||||
// test init args config
|
||||
NamedList<Object> nl = new NamedList<>();
|
||||
nl.add("solr.hdfs.home", HdfsTestUtil.getURI(dfsCluster) + "/solr2");
|
||||
nl.add(HdfsDirectoryFactory.HDFS_HOME, HdfsTestUtil.getURI(dfsCluster) + "/solr2");
|
||||
hdfsFactory.init(nl);
|
||||
dataHome = hdfsFactory.getDataHome(new MockCoreDescriptor());
|
||||
|
||||
assertTrue(dataHome.endsWith("/solr2/mock/data"));
|
||||
|
||||
// test sys prop and init args config - init args wins
|
||||
System.setProperty("solr.hdfs.home", HdfsTestUtil.getURI(dfsCluster) + "/solr1");
|
||||
System.setProperty(HdfsDirectoryFactory.HDFS_HOME, HdfsTestUtil.getURI(dfsCluster) + "/solr1");
|
||||
hdfsFactory.init(nl);
|
||||
dataHome = hdfsFactory.getDataHome(new MockCoreDescriptor());
|
||||
|
||||
assertTrue(dataHome.endsWith("/solr2/mock/data"));
|
||||
|
||||
System.clearProperty("solr.hdfs.home");
|
||||
System.clearProperty(HdfsDirectoryFactory.HDFS_HOME);
|
||||
|
||||
// set conf dir by sys prop
|
||||
Path confDir = createTempDir();
|
||||
|
@ -141,44 +142,49 @@ public class HdfsDirectoryFactoryTest extends SolrTestCaseJ4 {
|
|||
@Test
|
||||
public void testCleanupOldIndexDirectories() throws Exception {
|
||||
try (HdfsDirectoryFactory hdfsFactory = new HdfsDirectoryFactory()) {
|
||||
System.setProperty("solr.hdfs.home", HdfsTestUtil.getURI(dfsCluster) + "/solr1");
|
||||
System.setProperty(HdfsDirectoryFactory.HDFS_HOME, HdfsTestUtil.getURI(dfsCluster) + "/solr1");
|
||||
hdfsFactory.init(new NamedList<>());
|
||||
String dataHome = hdfsFactory.getDataHome(new MockCoreDescriptor());
|
||||
assertTrue(dataHome.endsWith("/solr1/mock/data"));
|
||||
System.clearProperty("solr.hdfs.home");
|
||||
System.clearProperty(HdfsDirectoryFactory.HDFS_HOME);
|
||||
|
||||
FileSystem hdfs = dfsCluster.getFileSystem();
|
||||
try(FileSystem hdfs = FileSystem.get(HdfsTestUtil.getClientConfiguration(dfsCluster))) {
|
||||
org.apache.hadoop.fs.Path dataHomePath = new org.apache.hadoop.fs.Path(dataHome);
|
||||
org.apache.hadoop.fs.Path currentIndexDirPath = new org.apache.hadoop.fs.Path(dataHomePath, "index");
|
||||
assertFalse(checkHdfsDirectory(hdfs,currentIndexDirPath));
|
||||
hdfs.mkdirs(currentIndexDirPath);
|
||||
assertTrue(checkHdfsDirectory(hdfs, currentIndexDirPath));
|
||||
|
||||
org.apache.hadoop.fs.Path dataHomePath = new org.apache.hadoop.fs.Path(dataHome);
|
||||
org.apache.hadoop.fs.Path currentIndexDirPath = new org.apache.hadoop.fs.Path(dataHomePath, "index");
|
||||
assertTrue(!hdfs.isDirectory(currentIndexDirPath));
|
||||
hdfs.mkdirs(currentIndexDirPath);
|
||||
assertTrue(hdfs.isDirectory(currentIndexDirPath));
|
||||
String timestamp1 = new SimpleDateFormat(SnapShooter.DATE_FMT, Locale.ROOT).format(new Date());
|
||||
org.apache.hadoop.fs.Path oldIndexDirPath = new org.apache.hadoop.fs.Path(dataHomePath, "index." + timestamp1);
|
||||
assertFalse(checkHdfsDirectory(hdfs,oldIndexDirPath));
|
||||
hdfs.mkdirs(oldIndexDirPath);
|
||||
assertTrue(checkHdfsDirectory(hdfs, oldIndexDirPath));
|
||||
|
||||
String timestamp1 = new SimpleDateFormat(SnapShooter.DATE_FMT, Locale.ROOT).format(new Date());
|
||||
org.apache.hadoop.fs.Path oldIndexDirPath = new org.apache.hadoop.fs.Path(dataHomePath, "index." + timestamp1);
|
||||
assertTrue(!hdfs.isDirectory(oldIndexDirPath));
|
||||
hdfs.mkdirs(oldIndexDirPath);
|
||||
assertTrue(hdfs.isDirectory(oldIndexDirPath));
|
||||
hdfsFactory.cleanupOldIndexDirectories(dataHomePath.toString(), currentIndexDirPath.toString(), false);
|
||||
|
||||
hdfsFactory.cleanupOldIndexDirectories(dataHomePath.toString(), currentIndexDirPath.toString(), false);
|
||||
assertTrue(checkHdfsDirectory(hdfs, currentIndexDirPath));
|
||||
assertFalse(checkHdfsDirectory(hdfs, oldIndexDirPath));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
assertTrue(hdfs.isDirectory(currentIndexDirPath));
|
||||
assertTrue(!hdfs.isDirectory(oldIndexDirPath));
|
||||
private boolean checkHdfsDirectory(FileSystem hdfs, org.apache.hadoop.fs.Path path) throws IOException {
|
||||
try {
|
||||
return hdfs.getFileStatus(path).isDirectory();
|
||||
} catch (FileNotFoundException e) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testLocalityReporter() throws Exception {
|
||||
Configuration conf = HdfsTestUtil.getClientConfiguration(dfsCluster);
|
||||
conf.set("dfs.permissions.enabled", "false");
|
||||
|
||||
Random r = random();
|
||||
try(HdfsDirectoryFactory factory = new HdfsDirectoryFactory()) {
|
||||
SolrMetricManager metricManager = new SolrMetricManager();
|
||||
String registry = TestUtil.randomSimpleString(r, 2, 10);
|
||||
String scope = TestUtil.randomSimpleString(r, 2, 10);
|
||||
Map<String, String> props = new HashMap<String, String>();
|
||||
Map<String, String> props = new HashMap<>();
|
||||
props.put(HdfsDirectoryFactory.HDFS_HOME, HdfsTestUtil.getURI(dfsCluster) + "/solr");
|
||||
props.put(HdfsDirectoryFactory.BLOCKCACHE_ENABLED, "false");
|
||||
props.put(HdfsDirectoryFactory.NRTCACHINGDIRECTORY_ENABLE, "false");
|
||||
|
@ -190,7 +196,7 @@ public class HdfsDirectoryFactoryTest extends SolrTestCaseJ4 {
|
|||
MetricsMap metrics = (MetricsMap) ((SolrMetricManager.GaugeWrapper) metricManager.registry(registry).getMetrics().get("OTHER." + scope + ".hdfsLocality")).getGauge();
|
||||
// We haven't done anything, so there should be no data
|
||||
Map<String, Object> statistics = metrics.getValue();
|
||||
assertEquals("Saw bytes that were not written: " + statistics.get(HdfsLocalityReporter.LOCALITY_BYTES_TOTAL), 0l,
|
||||
assertEquals("Saw bytes that were not written: " + statistics.get(HdfsLocalityReporter.LOCALITY_BYTES_TOTAL), 0L,
|
||||
statistics.get(HdfsLocalityReporter.LOCALITY_BYTES_TOTAL));
|
||||
assertEquals(
|
||||
"Counted bytes as local when none written: " + statistics.get(HdfsLocalityReporter.LOCALITY_BYTES_RATIO), 0,
|
||||
|
|
|
@ -35,7 +35,6 @@ import org.apache.hadoop.hdfs.MiniDFSCluster;
|
|||
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
|
||||
import org.apache.lucene.util.TestUtil;
|
||||
import org.apache.solr.SolrTestCaseJ4;
|
||||
import org.apache.solr.client.solrj.SolrClient;
|
||||
import org.apache.solr.client.solrj.impl.CloudSolrClient;
|
||||
import org.apache.solr.client.solrj.impl.HttpSolrClient;
|
||||
import org.apache.solr.client.solrj.request.CollectionAdminRequest;
|
||||
|
@ -109,7 +108,6 @@ public class TestHdfsBackupRestoreCore extends SolrCloudTestCase {
|
|||
try {
|
||||
URI uri = new URI(hdfsUri);
|
||||
Configuration conf = HdfsTestUtil.getClientConfiguration(dfsCluster);
|
||||
conf.setBoolean("fs.hdfs.impl.disable.cache", true);
|
||||
fs = FileSystem.get(uri, conf);
|
||||
|
||||
if (fs instanceof DistributedFileSystem) {
|
||||
|
@ -147,13 +145,17 @@ public class TestHdfsBackupRestoreCore extends SolrCloudTestCase {
|
|||
IOUtils.closeQuietly(fs);
|
||||
fs = null;
|
||||
try {
|
||||
HdfsTestUtil.teardownClass(dfsCluster);
|
||||
SolrTestCaseJ4.resetFactory();
|
||||
} finally {
|
||||
dfsCluster = null;
|
||||
System.clearProperty("solr.hdfs.home");
|
||||
System.clearProperty("solr.hdfs.default.backup.path");
|
||||
System.clearProperty("test.build.data");
|
||||
System.clearProperty("test.cache.data");
|
||||
try {
|
||||
HdfsTestUtil.teardownClass(dfsCluster);
|
||||
} finally {
|
||||
dfsCluster = null;
|
||||
System.clearProperty("solr.hdfs.home");
|
||||
System.clearProperty("solr.hdfs.default.backup.path");
|
||||
System.clearProperty("test.build.data");
|
||||
System.clearProperty("test.cache.data");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -180,12 +182,12 @@ public class TestHdfsBackupRestoreCore extends SolrCloudTestCase {
|
|||
boolean testViaReplicationHandler = random().nextBoolean();
|
||||
String baseUrl = cluster.getJettySolrRunners().get(0).getBaseUrl().toString();
|
||||
|
||||
try (SolrClient masterClient = getHttpSolrClient(replicaBaseUrl)) {
|
||||
try (HttpSolrClient masterClient = getHttpSolrClient(replicaBaseUrl)) {
|
||||
// Create a backup.
|
||||
if (testViaReplicationHandler) {
|
||||
log.info("Running Backup via replication handler");
|
||||
BackupRestoreUtils.runReplicationHandlerCommand(baseUrl, coreName, ReplicationHandler.CMD_BACKUP, "hdfs", backupName);
|
||||
CheckBackupStatus checkBackupStatus = new CheckBackupStatus((HttpSolrClient) masterClient, coreName, null);
|
||||
CheckBackupStatus checkBackupStatus = new CheckBackupStatus(masterClient, coreName, null);
|
||||
while (!checkBackupStatus.success) {
|
||||
checkBackupStatus.fetchStatus();
|
||||
Thread.sleep(1000);
|
||||
|
|
|
@ -19,6 +19,7 @@ package org.apache.solr.index.hdfs;
|
|||
import java.io.IOException;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||
import org.apache.lucene.index.BaseTestCheckIndex;
|
||||
|
@ -66,8 +67,11 @@ public class CheckHdfsIndexTest extends AbstractFullDistribZkTestBase {
|
|||
|
||||
@AfterClass
|
||||
public static void teardownClass() throws Exception {
|
||||
HdfsTestUtil.teardownClass(dfsCluster);
|
||||
dfsCluster = null;
|
||||
try {
|
||||
HdfsTestUtil.teardownClass(dfsCluster);
|
||||
} finally {
|
||||
dfsCluster = null;
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -76,17 +80,21 @@ public class CheckHdfsIndexTest extends AbstractFullDistribZkTestBase {
|
|||
super.setUp();
|
||||
|
||||
Configuration conf = HdfsTestUtil.getClientConfiguration(dfsCluster);
|
||||
conf.setBoolean("fs.hdfs.impl.disable.cache", true);
|
||||
|
||||
directory = new HdfsDirectory(path, conf);
|
||||
}
|
||||
|
||||
@Override
|
||||
@After
|
||||
public void tearDown() throws Exception {
|
||||
directory.close();
|
||||
dfsCluster.getFileSystem().delete(path, true);
|
||||
super.tearDown();
|
||||
try {
|
||||
directory.close();
|
||||
} finally {
|
||||
try(FileSystem fs = FileSystem.get(HdfsTestUtil.getClientConfiguration(dfsCluster))) {
|
||||
fs.delete(path, true);
|
||||
} finally {
|
||||
super.tearDown();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -108,7 +116,7 @@ public class CheckHdfsIndexTest extends AbstractFullDistribZkTestBase {
|
|||
SolrClient client = clients.get(0);
|
||||
NamedList<Object> response = client.query(new SolrQuery().setRequestHandler("/admin/system")).getResponse();
|
||||
NamedList<Object> coreInfo = (NamedList<Object>) response.get("core");
|
||||
String indexDir = (String) ((NamedList<Object>) coreInfo.get("directory")).get("data") + "/index";
|
||||
String indexDir = ((NamedList<Object>) coreInfo.get("directory")).get("data") + "/index";
|
||||
|
||||
args = new String[] {indexDir};
|
||||
}
|
||||
|
|
|
@ -82,7 +82,6 @@ public class TestRecoveryHdfs extends SolrTestCaseJ4 {
|
|||
try {
|
||||
URI uri = new URI(hdfsUri);
|
||||
Configuration conf = HdfsTestUtil.getClientConfiguration(dfsCluster);
|
||||
conf.setBoolean("fs.hdfs.impl.disable.cache", true);
|
||||
fs = FileSystem.get(uri, conf);
|
||||
} catch (IOException | URISyntaxException e) {
|
||||
throw new RuntimeException(e);
|
||||
|
@ -257,7 +256,7 @@ public class TestRecoveryHdfs extends SolrTestCaseJ4 {
|
|||
ulog.bufferUpdates();
|
||||
assertEquals(UpdateLog.State.BUFFERING, ulog.getState());
|
||||
Future<UpdateLog.RecoveryInfo> rinfoFuture = ulog.applyBufferedUpdates();
|
||||
assertTrue(rinfoFuture == null);
|
||||
assertNull(rinfoFuture);
|
||||
assertEquals(UpdateLog.State.ACTIVE, ulog.getState());
|
||||
|
||||
ulog.bufferUpdates();
|
||||
|
@ -295,7 +294,7 @@ public class TestRecoveryHdfs extends SolrTestCaseJ4 {
|
|||
|
||||
|
||||
rinfoFuture = ulog.applyBufferedUpdates();
|
||||
assertTrue(rinfoFuture != null);
|
||||
assertNotNull(rinfoFuture);
|
||||
|
||||
assertEquals(UpdateLog.State.APPLYING_BUFFERED, ulog.getState());
|
||||
|
||||
|
@ -341,7 +340,7 @@ public class TestRecoveryHdfs extends SolrTestCaseJ4 {
|
|||
|
||||
logReplay.drainPermits();
|
||||
rinfoFuture = ulog.applyBufferedUpdates();
|
||||
assertTrue(rinfoFuture != null);
|
||||
assertNotNull(rinfoFuture);
|
||||
assertEquals(UpdateLog.State.APPLYING_BUFFERED, ulog.getState());
|
||||
|
||||
// apply a single update
|
||||
|
@ -409,7 +408,7 @@ public class TestRecoveryHdfs extends SolrTestCaseJ4 {
|
|||
ulog.bufferUpdates();
|
||||
assertEquals(UpdateLog.State.BUFFERING, ulog.getState());
|
||||
Future<UpdateLog.RecoveryInfo> rinfoFuture = ulog.applyBufferedUpdates();
|
||||
assertTrue(rinfoFuture == null);
|
||||
assertNull(rinfoFuture);
|
||||
assertEquals(UpdateLog.State.ACTIVE, ulog.getState());
|
||||
|
||||
ulog.bufferUpdates();
|
||||
|
@ -737,7 +736,7 @@ public class TestRecoveryHdfs extends SolrTestCaseJ4 {
|
|||
//
|
||||
addDocs(1, start, new LinkedList<Long>()); // don't add this to the versions list because we are going to lose it...
|
||||
h.close();
|
||||
files = HdfsUpdateLog.getLogList(fs, new Path(logDir));;
|
||||
files = HdfsUpdateLog.getLogList(fs, new Path(logDir));
|
||||
Arrays.sort(files);
|
||||
|
||||
FSDataOutputStream dos = fs.create(new Path(new Path(logDir), files[files.length-1]), (short)1);
|
||||
|
@ -850,7 +849,7 @@ public class TestRecoveryHdfs extends SolrTestCaseJ4 {
|
|||
Arrays.sort(files);
|
||||
|
||||
FSDataOutputStream dos = fs.create(new Path(logDir, files[files.length-1]), (short)1);
|
||||
dos.write(new byte[(int)800]); // zero out file
|
||||
dos.write(new byte[800]); // zero out file
|
||||
dos.close();
|
||||
|
||||
|
||||
|
@ -920,34 +919,31 @@ public class TestRecoveryHdfs extends SolrTestCaseJ4 {
|
|||
Arrays.sort(files);
|
||||
String fname = files[files.length-1];
|
||||
|
||||
FSDataOutputStream dos = fs.append(new Path(logDir, files[files.length-1]));
|
||||
dos.writeLong(0xffffffffffffffffL);
|
||||
dos.writeChars("This should be appended to a good log file, representing a bad partially written record.");
|
||||
dos.close();
|
||||
|
||||
FSDataInputStream dis = fs.open(new Path(logDir, files[files.length-1]));
|
||||
byte[] content = new byte[(int)dis.available()];
|
||||
try(FSDataOutputStream dos = fs.append(new Path(logDir, files[files.length-1]))) {
|
||||
dos.writeLong(0xffffffffffffffffL);
|
||||
dos.writeChars("This should be appended to a good log file, representing a bad partially written record.");
|
||||
}
|
||||
|
||||
dis.readFully(content);
|
||||
try(FSDataInputStream dis = fs.open(new Path(logDir, files[files.length-1]))) {
|
||||
byte[] content = new byte[dis.available()];
|
||||
dis.readFully(content);
|
||||
|
||||
dis.close();
|
||||
// Now make a newer log file with just the IDs changed. NOTE: this may not work if log format changes too much!
|
||||
findReplace("AAAAAA".getBytes(StandardCharsets.UTF_8), "aaaaaa".getBytes(StandardCharsets.UTF_8), content);
|
||||
findReplace("BBBBBB".getBytes(StandardCharsets.UTF_8), "bbbbbb".getBytes(StandardCharsets.UTF_8), content);
|
||||
findReplace("CCCCCC".getBytes(StandardCharsets.UTF_8), "cccccc".getBytes(StandardCharsets.UTF_8), content);
|
||||
|
||||
// Now make a newer log file with just the IDs changed. NOTE: this may not work if log format changes too much!
|
||||
findReplace("AAAAAA".getBytes(StandardCharsets.UTF_8), "aaaaaa".getBytes(StandardCharsets.UTF_8), content);
|
||||
findReplace("BBBBBB".getBytes(StandardCharsets.UTF_8), "bbbbbb".getBytes(StandardCharsets.UTF_8), content);
|
||||
findReplace("CCCCCC".getBytes(StandardCharsets.UTF_8), "cccccc".getBytes(StandardCharsets.UTF_8), content);
|
||||
// WARNING... assumes format of .00000n where n is less than 9
|
||||
long logNumber = Long.parseLong(fname.substring(fname.lastIndexOf(".") + 1));
|
||||
String fname2 = String.format(Locale.ROOT,
|
||||
UpdateLog.LOG_FILENAME_PATTERN,
|
||||
UpdateLog.TLOG_NAME,
|
||||
logNumber + 1);
|
||||
|
||||
// WARNING... assumes format of .00000n where n is less than 9
|
||||
long logNumber = Long.parseLong(fname.substring(fname.lastIndexOf(".") + 1));
|
||||
String fname2 = String.format(Locale.ROOT,
|
||||
UpdateLog.LOG_FILENAME_PATTERN,
|
||||
UpdateLog.TLOG_NAME,
|
||||
logNumber + 1);
|
||||
|
||||
dos = fs.create(new Path(logDir, fname2), (short)1);
|
||||
dos.write(content);
|
||||
dos.close();
|
||||
|
||||
try(FSDataOutputStream dos = fs.create(new Path(logDir, fname2), (short)1)) {
|
||||
dos.write(content);
|
||||
}
|
||||
}
|
||||
|
||||
logReplay.release(1000);
|
||||
logReplayFinish.drainPermits();
|
||||
|
@ -971,9 +967,7 @@ public class TestRecoveryHdfs extends SolrTestCaseJ4 {
|
|||
for(;;) {
|
||||
idx = indexOf(from, data, idx + from.length); // skip over previous match
|
||||
if (idx < 0) break;
|
||||
for (int i=0; i<to.length; i++) {
|
||||
data[idx+i] = to[i];
|
||||
}
|
||||
System.arraycopy(to, 0, data, idx, to.length);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -50,14 +50,11 @@ public class TestHdfsUpdateLog extends SolrTestCaseJ4 {
|
|||
try {
|
||||
URI uri = new URI(hdfsUri);
|
||||
Configuration conf = HdfsTestUtil.getClientConfiguration(dfsCluster);
|
||||
conf.setBoolean("fs.hdfs.impl.disable.cache", true);
|
||||
fs = FileSystem.get(uri, conf);
|
||||
} catch (IOException e) {
|
||||
throw new RuntimeException(e);
|
||||
} catch (URISyntaxException e) {
|
||||
} catch (IOException | URISyntaxException e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
|
||||
|
||||
System.setProperty("solr.ulog.dir", hdfsUri + "/solr/shard1");
|
||||
|
||||
initCore("solrconfig-tlog.xml","schema15.xml");
|
||||
|
|
Loading…
Reference in New Issue