mirror of https://github.com/apache/druid.git
Increase timeout of GroupByQueryMergeBufferTest and AppenderatorDriverTest (#4441)
This commit is contained in:
parent
3a5c480405
commit
3e60c9125d
|
@ -59,6 +59,7 @@ import static org.junit.Assert.assertEquals;
|
|||
@RunWith(Parameterized.class)
|
||||
public class GroupByQueryMergeBufferTest
|
||||
{
|
||||
private static final long TIMEOUT = 5000;
|
||||
private static class TestBlockingPool extends BlockingPool<ByteBuffer>
|
||||
{
|
||||
private int minRemainBufferNum;
|
||||
|
@ -230,7 +231,7 @@ public class GroupByQueryMergeBufferTest
|
|||
.setGranularity(Granularities.ALL)
|
||||
.setInterval(QueryRunnerTestHelper.firstToThird)
|
||||
.setAggregatorSpecs(Lists.<AggregatorFactory>newArrayList(new LongSumAggregatorFactory("rows", "rows")))
|
||||
.setContext(ImmutableMap.of(QueryContexts.TIMEOUT_KEY, 500))
|
||||
.setContext(ImmutableMap.of(QueryContexts.TIMEOUT_KEY, TIMEOUT))
|
||||
.build();
|
||||
|
||||
GroupByQueryRunnerTestHelper.runQuery(factory, runner, query);
|
||||
|
@ -258,7 +259,7 @@ public class GroupByQueryMergeBufferTest
|
|||
.setGranularity(Granularities.ALL)
|
||||
.setInterval(QueryRunnerTestHelper.firstToThird)
|
||||
.setAggregatorSpecs(Lists.<AggregatorFactory>newArrayList(new LongSumAggregatorFactory("rows", "rows")))
|
||||
.setContext(ImmutableMap.of(QueryContexts.TIMEOUT_KEY, 500))
|
||||
.setContext(ImmutableMap.of(QueryContexts.TIMEOUT_KEY, TIMEOUT))
|
||||
.build();
|
||||
|
||||
GroupByQueryRunnerTestHelper.runQuery(factory, runner, query);
|
||||
|
@ -297,7 +298,7 @@ public class GroupByQueryMergeBufferTest
|
|||
.setGranularity(Granularities.ALL)
|
||||
.setInterval(QueryRunnerTestHelper.firstToThird)
|
||||
.setAggregatorSpecs(Lists.<AggregatorFactory>newArrayList(new LongSumAggregatorFactory("rows", "rows")))
|
||||
.setContext(ImmutableMap.of(QueryContexts.TIMEOUT_KEY, 500))
|
||||
.setContext(ImmutableMap.of(QueryContexts.TIMEOUT_KEY, TIMEOUT))
|
||||
.build();
|
||||
|
||||
GroupByQueryRunnerTestHelper.runQuery(factory, runner, query);
|
||||
|
@ -349,7 +350,7 @@ public class GroupByQueryMergeBufferTest
|
|||
.setGranularity(Granularities.ALL)
|
||||
.setInterval(QueryRunnerTestHelper.firstToThird)
|
||||
.setAggregatorSpecs(Lists.<AggregatorFactory>newArrayList(new LongSumAggregatorFactory("rows", "rows")))
|
||||
.setContext(ImmutableMap.of(QueryContexts.TIMEOUT_KEY, 500))
|
||||
.setContext(ImmutableMap.of(QueryContexts.TIMEOUT_KEY, TIMEOUT))
|
||||
.build();
|
||||
|
||||
GroupByQueryRunnerTestHelper.runQuery(factory, runner, query);
|
||||
|
|
|
@ -71,7 +71,7 @@ public class AppenderatorDriverTest
|
|||
private static final ObjectMapper OBJECT_MAPPER = new DefaultObjectMapper();
|
||||
private static final int MAX_ROWS_IN_MEMORY = 100;
|
||||
private static final int MAX_ROWS_PER_SEGMENT = 3;
|
||||
private static final long PUBLISH_TIMEOUT = 5000;
|
||||
private static final long PUBLISH_TIMEOUT = 10000;
|
||||
private static final long HANDOFF_CONDITION_TIMEOUT = 1000;
|
||||
|
||||
private static final List<InputRow> ROWS = Arrays.<InputRow>asList(
|
||||
|
@ -221,13 +221,12 @@ public class AppenderatorDriverTest
|
|||
committerSupplier.setMetadata(1);
|
||||
Assert.assertTrue(driver.add(ROWS.get(0), "dummy", committerSupplier).isOk());
|
||||
|
||||
final SegmentsAndMetadata published = driver.publish(
|
||||
final SegmentsAndMetadata segmentsAndMetadata = driver.publishAndRegisterHandoff(
|
||||
makeOkPublisher(),
|
||||
committerSupplier.get(),
|
||||
ImmutableList.of("dummy")
|
||||
).get(PUBLISH_TIMEOUT, TimeUnit.MILLISECONDS);
|
||||
final SegmentsAndMetadata segmentsAndMetadata = driver.registerHandoff(published)
|
||||
.get(HANDOFF_CONDITION_TIMEOUT, TimeUnit.MILLISECONDS);
|
||||
|
||||
Assert.assertEquals(
|
||||
ImmutableSet.of(
|
||||
new SegmentIdentifier(DATA_SOURCE, new Interval("2000/PT1H"), VERSION, new NumberedShardSpec(0, 0))
|
||||
|
@ -243,13 +242,12 @@ public class AppenderatorDriverTest
|
|||
committerSupplier.setMetadata(i + 1);
|
||||
Assert.assertTrue(driver.add(ROWS.get(i), "dummy", committerSupplier).isOk());
|
||||
|
||||
final SegmentsAndMetadata published = driver.publish(
|
||||
final SegmentsAndMetadata segmentsAndMetadata = driver.publishAndRegisterHandoff(
|
||||
makeOkPublisher(),
|
||||
committerSupplier.get(),
|
||||
ImmutableList.of("dummy")
|
||||
).get(PUBLISH_TIMEOUT, TimeUnit.MILLISECONDS);
|
||||
final SegmentsAndMetadata segmentsAndMetadata = driver.registerHandoff(published)
|
||||
.get(HANDOFF_CONDITION_TIMEOUT, TimeUnit.MILLISECONDS);
|
||||
|
||||
Assert.assertEquals(
|
||||
ImmutableSet.of(
|
||||
// The second and third rows have the same dataSource, interval, and version, but different shardSpec of
|
||||
|
@ -265,13 +263,11 @@ public class AppenderatorDriverTest
|
|||
driver.persist(committerSupplier.get());
|
||||
|
||||
// There is no remaining rows in the driver, and thus the result must be empty
|
||||
final SegmentsAndMetadata published = driver.publish(
|
||||
final SegmentsAndMetadata segmentsAndMetadata = driver.publishAndRegisterHandoff(
|
||||
makeOkPublisher(),
|
||||
committerSupplier.get(),
|
||||
ImmutableList.of("dummy")
|
||||
).get(PUBLISH_TIMEOUT, TimeUnit.MILLISECONDS);
|
||||
final SegmentsAndMetadata segmentsAndMetadata = driver.registerHandoff(published)
|
||||
.get(HANDOFF_CONDITION_TIMEOUT, TimeUnit.MILLISECONDS);
|
||||
|
||||
Assert.assertEquals(
|
||||
ImmutableSet.of(),
|
||||
|
|
Loading…
Reference in New Issue