SOLR-12489: Fix test failures

This commit is contained in:
Varun Thacker 2018-07-21 13:05:07 -07:00
parent 1bea1da5dc
commit 18c29b7303
3 changed files with 9 additions and 13 deletions

View File

@ -58,6 +58,7 @@ public abstract class AbstractCloudBackupRestoreTestCase extends SolrCloudTestCa
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass()); private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
protected static final int NUM_SHARDS = 2;//granted we sometimes shard split to get more protected static final int NUM_SHARDS = 2;//granted we sometimes shard split to get more
protected static final int NUM_SPLIT_SHARDS = 3; //We always split shard1 so total shards post split will be 3
int replFactor; int replFactor;
int numTlogReplicas; int numTlogReplicas;
@ -102,12 +103,12 @@ public abstract class AbstractCloudBackupRestoreTestCase extends SolrCloudTestCa
if (random().nextBoolean()) { if (random().nextBoolean()) {
create.setMaxShardsPerNode(-1); create.setMaxShardsPerNode(-1);
} else if (doSplitShardOperation) {
create.setMaxShardsPerNode((int) Math.ceil(NUM_SPLIT_SHARDS * backupReplFactor / (double) cluster.getJettySolrRunners().size()));
} else if (NUM_SHARDS * (backupReplFactor) > cluster.getJettySolrRunners().size() || random().nextBoolean()) { } else if (NUM_SHARDS * (backupReplFactor) > cluster.getJettySolrRunners().size() || random().nextBoolean()) {
create.setMaxShardsPerNode((int)Math.ceil(NUM_SHARDS * backupReplFactor / (double) cluster.getJettySolrRunners().size()));//just to assert it survives the restoration create.setMaxShardsPerNode((int) Math.ceil(NUM_SHARDS * backupReplFactor / (double) cluster.getJettySolrRunners().size()));//just to assert it survives the restoration
if (doSplitShardOperation) {
create.setMaxShardsPerNode(create.getMaxShardsPerNode() * 2);
}
} }
if (random().nextBoolean()) { if (random().nextBoolean()) {
create.setAutoAddReplicas(true);//just to assert it survives the restoration create.setAutoAddReplicas(true);//just to assert it survives the restoration
} }
@ -293,12 +294,9 @@ public abstract class AbstractCloudBackupRestoreTestCase extends SolrCloudTestCa
} }
if (rarely()) { // Try with createNodeSet configuration if (rarely()) { // Try with createNodeSet configuration
int nodeSetSize = cluster.getJettySolrRunners().size() / 2; List<String> nodeStrs = new ArrayList<>(1);//Always 1 as cluster.getJettySolrRunners().size()=NUM_SHARDS=2
List<String> nodeStrs = new ArrayList<>(nodeSetSize);
Iterator<JettySolrRunner> iter = cluster.getJettySolrRunners().iterator(); Iterator<JettySolrRunner> iter = cluster.getJettySolrRunners().iterator();
for (int i = 0; i < nodeSetSize ; i++) { nodeStrs.add(iter.next().getNodeName());
nodeStrs.add(iter.next().getNodeName());
}
restore.setCreateNodeSet(String.join(",", nodeStrs)); restore.setCreateNodeSet(String.join(",", nodeStrs));
restore.setCreateNodeSetShuffle(usually()); restore.setCreateNodeSetShuffle(usually());
// we need to double maxShardsPerNode value since we reduced number of available nodes by half. // we need to double maxShardsPerNode value since we reduced number of available nodes by half.
@ -307,8 +305,8 @@ public abstract class AbstractCloudBackupRestoreTestCase extends SolrCloudTestCa
computeRestoreMaxShardsPerNode = restore.getMaxShardsPerNode() * 2; computeRestoreMaxShardsPerNode = restore.getMaxShardsPerNode() * 2;
restore.setMaxShardsPerNode(computeRestoreMaxShardsPerNode); restore.setMaxShardsPerNode(computeRestoreMaxShardsPerNode);
} else { } else {
computeRestoreMaxShardsPerNode = origShardToDocCount.size() * 2; computeRestoreMaxShardsPerNode = origShardToDocCount.size() * backupReplFactor;
restore.setMaxShardsPerNode(origShardToDocCount.size() * 2); restore.setMaxShardsPerNode(origShardToDocCount.size() * backupReplFactor);
} }
} }

View File

@ -62,7 +62,6 @@ import static org.apache.solr.core.backup.BackupManager.ZK_STATE_DIR;
BadHdfsThreadsFilter.class // hdfs currently leaks thread(s) BadHdfsThreadsFilter.class // hdfs currently leaks thread(s)
}) })
//05-Jul-2018 @LuceneTestCase.BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // 04-May-2018 //05-Jul-2018 @LuceneTestCase.BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // 04-May-2018
@LuceneTestCase.BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // added 20-Jul-2018
public class TestHdfsCloudBackupRestore extends AbstractCloudBackupRestoreTestCase { public class TestHdfsCloudBackupRestore extends AbstractCloudBackupRestoreTestCase {
public static final String SOLR_XML = "<solr>\n" + public static final String SOLR_XML = "<solr>\n" +
"\n" + "\n" +

View File

@ -24,7 +24,6 @@ import org.junit.BeforeClass;
* Note that the Solr backup/restore still requires a "shared" file-system. Its just that in this case * Note that the Solr backup/restore still requires a "shared" file-system. Its just that in this case
* such file-system would be exposed via local file-system API. * such file-system would be exposed via local file-system API.
*/ */
@LuceneTestCase.BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // added 20-Jul-2018
public class TestLocalFSCloudBackupRestore extends AbstractCloudBackupRestoreTestCase { public class TestLocalFSCloudBackupRestore extends AbstractCloudBackupRestoreTestCase {
private static String backupLocation; private static String backupLocation;