From 18c29b7303a96faeb125dee8a29b210539d23194 Mon Sep 17 00:00:00 2001 From: Varun Thacker Date: Sat, 21 Jul 2018 13:05:07 -0700 Subject: [PATCH] SOLR-12489: Fix test failures --- .../AbstractCloudBackupRestoreTestCase.java | 20 +++++++++---------- .../TestHdfsCloudBackupRestore.java | 1 - .../TestLocalFSCloudBackupRestore.java | 1 - 3 files changed, 9 insertions(+), 13 deletions(-) diff --git a/solr/core/src/test/org/apache/solr/cloud/api/collections/AbstractCloudBackupRestoreTestCase.java b/solr/core/src/test/org/apache/solr/cloud/api/collections/AbstractCloudBackupRestoreTestCase.java index cdfd2241c25..a78be0304e1 100644 --- a/solr/core/src/test/org/apache/solr/cloud/api/collections/AbstractCloudBackupRestoreTestCase.java +++ b/solr/core/src/test/org/apache/solr/cloud/api/collections/AbstractCloudBackupRestoreTestCase.java @@ -58,6 +58,7 @@ public abstract class AbstractCloudBackupRestoreTestCase extends SolrCloudTestCa private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass()); protected static final int NUM_SHARDS = 2;//granted we sometimes shard split to get more + protected static final int NUM_SPLIT_SHARDS = 3; //We always split shard1 so total shards post split will be 3 int replFactor; int numTlogReplicas; @@ -102,12 +103,12 @@ public abstract class AbstractCloudBackupRestoreTestCase extends SolrCloudTestCa if (random().nextBoolean()) { create.setMaxShardsPerNode(-1); + } else if (doSplitShardOperation) { + create.setMaxShardsPerNode((int) Math.ceil(NUM_SPLIT_SHARDS * backupReplFactor / (double) cluster.getJettySolrRunners().size())); } else if (NUM_SHARDS * (backupReplFactor) > cluster.getJettySolrRunners().size() || random().nextBoolean()) { - create.setMaxShardsPerNode((int)Math.ceil(NUM_SHARDS * backupReplFactor / (double) cluster.getJettySolrRunners().size()));//just to assert it survives the restoration - if (doSplitShardOperation) { - create.setMaxShardsPerNode(create.getMaxShardsPerNode() * 2); - } + create.setMaxShardsPerNode((int) Math.ceil(NUM_SHARDS * backupReplFactor / (double) cluster.getJettySolrRunners().size()));//just to assert it survives the restoration } + if (random().nextBoolean()) { create.setAutoAddReplicas(true);//just to assert it survives the restoration } @@ -293,12 +294,9 @@ public abstract class AbstractCloudBackupRestoreTestCase extends SolrCloudTestCa } if (rarely()) { // Try with createNodeSet configuration - int nodeSetSize = cluster.getJettySolrRunners().size() / 2; - List nodeStrs = new ArrayList<>(nodeSetSize); + List nodeStrs = new ArrayList<>(1);//Always 1 as cluster.getJettySolrRunners().size()=NUM_SHARDS=2 Iterator iter = cluster.getJettySolrRunners().iterator(); - for (int i = 0; i < nodeSetSize ; i++) { - nodeStrs.add(iter.next().getNodeName()); - } + nodeStrs.add(iter.next().getNodeName()); restore.setCreateNodeSet(String.join(",", nodeStrs)); restore.setCreateNodeSetShuffle(usually()); // we need to double maxShardsPerNode value since we reduced number of available nodes by half. @@ -307,8 +305,8 @@ public abstract class AbstractCloudBackupRestoreTestCase extends SolrCloudTestCa computeRestoreMaxShardsPerNode = restore.getMaxShardsPerNode() * 2; restore.setMaxShardsPerNode(computeRestoreMaxShardsPerNode); } else { - computeRestoreMaxShardsPerNode = origShardToDocCount.size() * 2; - restore.setMaxShardsPerNode(origShardToDocCount.size() * 2); + computeRestoreMaxShardsPerNode = origShardToDocCount.size() * backupReplFactor; + restore.setMaxShardsPerNode(origShardToDocCount.size() * backupReplFactor); } } diff --git a/solr/core/src/test/org/apache/solr/cloud/api/collections/TestHdfsCloudBackupRestore.java b/solr/core/src/test/org/apache/solr/cloud/api/collections/TestHdfsCloudBackupRestore.java index 5bfed0abf15..d656f5230ab 100644 --- a/solr/core/src/test/org/apache/solr/cloud/api/collections/TestHdfsCloudBackupRestore.java +++ b/solr/core/src/test/org/apache/solr/cloud/api/collections/TestHdfsCloudBackupRestore.java @@ -62,7 +62,6 @@ import static org.apache.solr.core.backup.BackupManager.ZK_STATE_DIR; BadHdfsThreadsFilter.class // hdfs currently leaks thread(s) }) //05-Jul-2018 @LuceneTestCase.BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // 04-May-2018 -@LuceneTestCase.BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // added 20-Jul-2018 public class TestHdfsCloudBackupRestore extends AbstractCloudBackupRestoreTestCase { public static final String SOLR_XML = "\n" + "\n" + diff --git a/solr/core/src/test/org/apache/solr/cloud/api/collections/TestLocalFSCloudBackupRestore.java b/solr/core/src/test/org/apache/solr/cloud/api/collections/TestLocalFSCloudBackupRestore.java index f17d7d147a1..514b9b9e482 100644 --- a/solr/core/src/test/org/apache/solr/cloud/api/collections/TestLocalFSCloudBackupRestore.java +++ b/solr/core/src/test/org/apache/solr/cloud/api/collections/TestLocalFSCloudBackupRestore.java @@ -24,7 +24,6 @@ import org.junit.BeforeClass; * Note that the Solr backup/restore still requires a "shared" file-system. Its just that in this case * such file-system would be exposed via local file-system API. */ -@LuceneTestCase.BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // added 20-Jul-2018 public class TestLocalFSCloudBackupRestore extends AbstractCloudBackupRestoreTestCase { private static String backupLocation;