mirror of https://github.com/apache/druid.git
Fix warn msg and added some unit tests
This commit is contained in:
parent
95e0429eb1
commit
01448d264c
|
@ -0,0 +1,180 @@
|
|||
/*
|
||||
* Licensed to Metamarkets Group Inc. (Metamarkets) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. Metamarkets licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package io.druid.indexer;
|
||||
|
||||
import com.google.common.collect.ImmutableList;
|
||||
import com.google.common.collect.ImmutableMap;
|
||||
import com.metamx.common.Granularity;
|
||||
import io.druid.data.input.impl.DelimitedParseSpec;
|
||||
import io.druid.data.input.impl.DimensionsSpec;
|
||||
import io.druid.data.input.impl.StringInputRowParser;
|
||||
import io.druid.data.input.impl.TimestampSpec;
|
||||
import io.druid.granularity.QueryGranularity;
|
||||
import io.druid.indexer.partitions.HashedPartitionsSpec;
|
||||
import io.druid.query.aggregation.AggregatorFactory;
|
||||
import io.druid.query.aggregation.DoubleSumAggregatorFactory;
|
||||
import io.druid.segment.indexing.DataSchema;
|
||||
import io.druid.segment.indexing.granularity.UniformGranularitySpec;
|
||||
import org.joda.time.DateTime;
|
||||
import org.joda.time.Interval;
|
||||
import org.junit.Assert;
|
||||
import org.junit.Test;
|
||||
|
||||
import java.io.File;
|
||||
import java.io.IOException;
|
||||
import com.google.common.io.Files;
|
||||
import org.junit.runner.RunWith;
|
||||
import org.junit.runners.Parameterized;
|
||||
|
||||
import java.util.Arrays;
|
||||
import java.util.Collection;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
@RunWith(Parameterized.class)
|
||||
public class DetermineHashedPartitionsJobTest
|
||||
{
|
||||
private HadoopDruidIndexerConfig indexerConfig;
|
||||
private int expectedNumTimeBuckets;
|
||||
private int[] expectedNumOfShards;
|
||||
private int errorMargin;
|
||||
|
||||
@Parameterized.Parameters(name = "File={0}, TargetPartitionSize={1}, Interval={2}, ErrorMargin={3}, NumTimeBuckets={4}, NumShards={5}")
|
||||
public static Collection<?> data(){
|
||||
int[] first = new int[1];
|
||||
Arrays.fill(first, 13);
|
||||
int[] second = new int[6];
|
||||
Arrays.fill(second, 1);
|
||||
int[] third = new int[6];
|
||||
Arrays.fill(third, 13);
|
||||
third[2] = 12;
|
||||
third[5] = 11;
|
||||
|
||||
return Arrays.asList(
|
||||
new Object[][]{
|
||||
{
|
||||
DetermineHashedPartitionsJobTest.class.getClass().getResource("/druid.test.data.with.duplicate.rows.tsv").getPath(),
|
||||
1L,
|
||||
"2011-04-10T00:00:00.000Z/2011-04-11T00:00:00.000Z",
|
||||
0,
|
||||
1,
|
||||
first
|
||||
},
|
||||
{
|
||||
DetermineHashedPartitionsJobTest.class.getClass().getResource("/druid.test.data.with.duplicate.rows.tsv").getPath(),
|
||||
100L,
|
||||
"2011-04-10T00:00:00.000Z/2011-04-16T00:00:00.000Z",
|
||||
0,
|
||||
6,
|
||||
second
|
||||
},
|
||||
{
|
||||
DetermineHashedPartitionsJobTest.class.getClass().getResource("/druid.test.data.with.duplicate.rows.tsv").getPath(),
|
||||
1L,
|
||||
"2011-04-10T00:00:00.000Z/2011-04-16T00:00:00.000Z",
|
||||
0,
|
||||
6,
|
||||
third
|
||||
}
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
public DetermineHashedPartitionsJobTest(String dataFilePath, long targetPartitionSize, String interval, int errorMargin, int expectedNumTimeBuckets, int[] expectedNumOfShards) throws IOException
|
||||
{
|
||||
this.expectedNumOfShards = expectedNumOfShards;
|
||||
this.expectedNumTimeBuckets = expectedNumTimeBuckets;
|
||||
this.errorMargin = errorMargin;
|
||||
File tmpDir = Files.createTempDir();
|
||||
tmpDir.deleteOnExit();
|
||||
|
||||
HadoopIngestionSpec ingestionSpec = new HadoopIngestionSpec(
|
||||
new DataSchema(
|
||||
"test_schema",
|
||||
new StringInputRowParser(
|
||||
new DelimitedParseSpec(
|
||||
new TimestampSpec("ts", null, null),
|
||||
new DimensionsSpec(ImmutableList.of("market", "quality", "placement", "placementish"), null, null),
|
||||
"\t",
|
||||
null,
|
||||
Arrays.asList("ts",
|
||||
"market",
|
||||
"quality",
|
||||
"placement",
|
||||
"placementish",
|
||||
"index")
|
||||
)
|
||||
),
|
||||
new AggregatorFactory[]{new DoubleSumAggregatorFactory("index", "index")},
|
||||
new UniformGranularitySpec(
|
||||
Granularity.DAY,
|
||||
QueryGranularity.NONE,
|
||||
ImmutableList.of(new Interval(interval))
|
||||
)
|
||||
),
|
||||
new HadoopIOConfig(
|
||||
ImmutableMap.<String, Object>of(
|
||||
"paths",
|
||||
dataFilePath,
|
||||
"type",
|
||||
"static"
|
||||
), null, tmpDir.getAbsolutePath()
|
||||
),
|
||||
new HadoopTuningConfig(
|
||||
tmpDir.getAbsolutePath(),
|
||||
null,
|
||||
new HashedPartitionsSpec(targetPartitionSize, null, true, null),
|
||||
null,
|
||||
null,
|
||||
null,
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
null,
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
null,
|
||||
null
|
||||
)
|
||||
);
|
||||
this.indexerConfig = new HadoopDruidIndexerConfig(ingestionSpec);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testDetermineHashedPartitions(){
|
||||
DetermineHashedPartitionsJob determineHashedPartitionsJob = new DetermineHashedPartitionsJob(indexerConfig);
|
||||
determineHashedPartitionsJob.run();
|
||||
Map<DateTime, List<HadoopyShardSpec>> shardSpecs = indexerConfig.getSchema().getTuningConfig().getShardSpecs();
|
||||
Assert.assertEquals(
|
||||
expectedNumTimeBuckets,
|
||||
shardSpecs.entrySet().size()
|
||||
);
|
||||
int i=0;
|
||||
for(Map.Entry<DateTime, List<HadoopyShardSpec>> entry : shardSpecs.entrySet()) {
|
||||
Assert.assertEquals(
|
||||
expectedNumOfShards[i++],
|
||||
entry.getValue().size(),
|
||||
errorMargin
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,82 @@
|
|||
2011-04-10T00:00:00.000Z spot automotive preferred apreferred 113.221448
|
||||
2011-04-10T00:00:00.000Z spot automotive preferred apreferred 11.221448
|
||||
2011-04-10T00:00:00.000Z spot automotive preferred apreferred 103.221448
|
||||
2011-04-10T00:00:00.000Z spot automotive preferred apreferred 53.221448
|
||||
2011-04-10T00:00:00.000Z spot business preferred bpreferred 95.570457
|
||||
2011-04-10T00:00:00.000Z spot entertainment preferred epreferred 131.766616
|
||||
2011-04-10T00:00:00.000Z spot health preferred hpreferred 99.950855
|
||||
2011-04-10T00:00:00.000Z spot mezzanine preferred mpreferred 91.470524
|
||||
2011-04-10T00:00:00.000Z spot news preferred npreferred 99.393076
|
||||
2011-04-10T00:00:00.000Z spot premium preferred ppreferred 123.207579
|
||||
2011-04-10T00:00:00.000Z spot technology preferred tpreferred 84.898691
|
||||
2011-04-10T00:00:00.000Z spot travel preferred tpreferred 114.353962
|
||||
2011-04-10T00:00:00.000Z total_market mezzanine preferred mpreferred 1005.253077
|
||||
2011-04-10T00:00:00.000Z total_market premium preferred ppreferred 1030.094757
|
||||
2011-04-10T00:00:00.000Z upfront mezzanine preferred mpreferred 1031.741509
|
||||
2011-04-10T00:00:00.000Z upfront premium preferred ppreferred 775.965555
|
||||
2011-04-11T00:00:00.000Z spot automotive preferred apreferred 130.165796
|
||||
2011-04-11T00:00:00.000Z spot business preferred bpreferred 107.765101
|
||||
2011-04-11T00:00:00.000Z spot entertainment preferred epreferred 142.751726
|
||||
2011-04-11T00:00:00.000Z spot health preferred hpreferred 104.847285
|
||||
2011-04-11T00:00:00.000Z spot mezzanine preferred mpreferred 95.272956
|
||||
2011-04-11T00:00:00.000Z spot news preferred npreferred 106.229286
|
||||
2011-04-11T00:00:00.000Z spot premium preferred ppreferred 126.823859
|
||||
2011-04-11T00:00:00.000Z spot technology preferred tpreferred 89.250155
|
||||
2011-04-11T00:00:00.000Z spot travel preferred tpreferred 122.049678
|
||||
2011-04-11T00:00:00.000Z total_market mezzanine preferred mpreferred 1112.794811
|
||||
2011-04-11T00:00:00.000Z total_market premium preferred ppreferred 1113.357530
|
||||
2011-04-11T00:00:00.000Z upfront mezzanine preferred mpreferred 1374.968412
|
||||
2011-04-11T00:00:00.000Z upfront premium preferred ppreferred 853.163039
|
||||
2011-04-12T00:00:00.000Z spot automotive preferred apreferred 122.386348
|
||||
2011-04-12T00:00:00.000Z spot business preferred bpreferred 106.380995
|
||||
2011-04-12T00:00:00.000Z spot entertainment preferred epreferred 141.932300
|
||||
2011-04-12T00:00:00.000Z spot health preferred hpreferred 103.142372
|
||||
2011-04-12T00:00:00.000Z spot mezzanine preferred mpreferred 97.340631
|
||||
2011-04-12T00:00:00.000Z spot news preferred npreferred 105.381244
|
||||
2011-04-12T00:00:00.000Z spot premium preferred ppreferred 125.189098
|
||||
2011-04-12T00:00:00.000Z spot technology preferred tpreferred 90.533391
|
||||
2011-04-12T00:00:00.000Z spot travel preferred tpreferred 122.128172
|
||||
2011-04-12T00:00:00.000Z total_market mezzanine preferred mpreferred 1153.974725
|
||||
2011-04-12T00:00:00.000Z total_market premium preferred ppreferred 1069.640880
|
||||
2011-04-12T00:00:00.000Z upfront mezzanine preferred mpreferred 1456.611830
|
||||
2011-04-12T00:00:00.000Z upfront premium preferred ppreferred 811.925240
|
||||
2011-04-13T00:00:00.000Z spot automotive preferred apreferred 122.688340
|
||||
2011-04-13T00:00:00.000Z spot business preferred bpreferred 105.739623
|
||||
2011-04-13T00:00:00.000Z spot entertainment preferred epreferred 136.983407
|
||||
2011-04-13T00:00:00.000Z spot health preferred hpreferred 100.860813
|
||||
2011-04-13T00:00:00.000Z spot mezzanine preferred mpreferred 94.839191
|
||||
2011-04-13T00:00:00.000Z spot news preferred npreferred 105.261296
|
||||
2011-04-13T00:00:00.000Z spot premium preferred ppreferred 119.836611
|
||||
2011-04-13T00:00:00.000Z spot technology preferred tpreferred 91.972558
|
||||
2011-04-13T00:00:00.000Z spot travel preferred tpreferred 120.145572
|
||||
2011-04-13T00:00:00.000Z total_market mezzanine preferred mpreferred 1016.137449
|
||||
2011-04-13T00:00:00.000Z total_market premium preferred ppreferred 994.902292
|
||||
2011-04-13T00:00:00.000Z upfront mezzanine preferred mpreferred 989.032799
|
||||
2011-04-13T00:00:00.000Z upfront premium preferred ppreferred 744.744657
|
||||
2011-04-14T00:00:00.000Z spot automotive preferred apreferred 111.179339
|
||||
2011-04-14T00:00:00.000Z spot business preferred bpreferred 101.984377
|
||||
2011-04-14T00:00:00.000Z spot entertainment preferred epreferred 133.606430
|
||||
2011-04-14T00:00:00.000Z spot health preferred hpreferred 99.738319
|
||||
2011-04-14T00:00:00.000Z spot mezzanine preferred mpreferred 91.270553
|
||||
2011-04-14T00:00:00.000Z spot news preferred npreferred 101.251756
|
||||
2011-04-14T00:00:00.000Z spot premium preferred ppreferred 118.285128
|
||||
2011-04-14T00:00:00.000Z spot technology preferred tpreferred 84.951300
|
||||
2011-04-14T00:00:00.000Z spot travel preferred tpreferred 119.768525
|
||||
2011-04-14T00:00:00.000Z total_market mezzanine preferred mpreferred 1032.154263
|
||||
2011-04-14T00:00:00.000Z total_market premium preferred ppreferred 999.586450
|
||||
2011-04-14T00:00:00.000Z upfront mezzanine preferred mpreferred 1166.401205
|
||||
2011-04-14T00:00:00.000Z upfront premium preferred ppreferred 753.104985
|
||||
2011-04-14T00:00:00.000Z upfront premium preferred ppreferred 7.056987
|
||||
2011-04-15T00:00:00.000Z spot automotive preferred apreferred 106.793700
|
||||
2011-04-15T00:00:00.000Z spot business preferred bpreferred 94.469747
|
||||
2011-04-15T00:00:00.000Z spot entertainment preferred epreferred 135.109191
|
||||
2011-04-15T00:00:00.000Z spot health preferred hpreferred 99.596909
|
||||
2011-04-15T00:00:00.000Z spot mezzanine preferred mpreferred 92.782760
|
||||
2011-04-15T00:00:00.000Z spot news preferred npreferred 97.859766
|
||||
2011-04-15T00:00:00.000Z spot premium preferred ppreferred 120.508160
|
||||
2011-04-15T00:00:00.000Z spot technology preferred tpreferred 89.646236
|
||||
2011-04-15T00:00:00.000Z spot travel preferred tpreferred 120.290348
|
||||
2011-04-15T00:00:00.000Z total_market mezzanine preferred mpreferred 994.752744
|
||||
2011-04-15T00:00:00.000Z total_market premium preferred ppreferred 1029.056992
|
||||
2011-04-15T00:00:00.000Z upfront mezzanine preferred mpreferred 962.731172
|
||||
2011-04-15T00:00:00.000Z upfront premium preferred ppreferred 780.271977
|
|
|
@ -131,4 +131,42 @@ public class DruidDataSource
|
|||
);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object o)
|
||||
{
|
||||
if (this == o) {
|
||||
return true;
|
||||
}
|
||||
if (o == null || getClass() != o.getClass()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
DruidDataSource that = (DruidDataSource) o;
|
||||
|
||||
if (name != null ? !name.equals(that.name) : that.name != null) {
|
||||
return false;
|
||||
}
|
||||
if (partitionNames != null ? !partitionNames.equals(that.partitionNames) : that.partitionNames != null) {
|
||||
return false;
|
||||
}
|
||||
if (properties != null ? !properties.equals(that.properties) : that.properties != null) {
|
||||
return false;
|
||||
}
|
||||
if (segmentsHolder != null ? !segmentsHolder.equals(that.segmentsHolder) : that.segmentsHolder != null) {
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode()
|
||||
{
|
||||
int result = name != null ? name.hashCode() : 0;
|
||||
result = 31 * result + (properties != null ? properties.hashCode() : 0);
|
||||
result = 31 * result + (partitionNames != null ? partitionNames.hashCode() : 0);
|
||||
result = 31 * result + (segmentsHolder != null ? segmentsHolder.hashCode() : 0);
|
||||
return result;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -741,17 +741,18 @@ public class RealtimePlumber implements Plumber
|
|||
);
|
||||
long minTimestamp = minTimestampAsDate.getMillis();
|
||||
|
||||
log.info("Found [%,d] sinks. minTimestamp [%s]", sinks.size(), minTimestampAsDate);
|
||||
log.info("Found [%,d] segments. Attempting to hand off segments that start before [%s].", sinks.size(), minTimestampAsDate);
|
||||
|
||||
List<Map.Entry<Long, Sink>> sinksToPush = Lists.newArrayList();
|
||||
for (Map.Entry<Long, Sink> entry : sinks.entrySet()) {
|
||||
final Long intervalStart = entry.getKey();
|
||||
if (intervalStart < minTimestamp) {
|
||||
log.info("Adding entry[%s] for merge and push.", entry);
|
||||
log.info("Adding entry [%s] for merge and push.", entry);
|
||||
sinksToPush.add(entry);
|
||||
} else {
|
||||
log.warn(
|
||||
"[%s] < [%s] Skipping persist and merge.",
|
||||
log.info(
|
||||
"Skipping persist and merge for entry [%s] : Start time [%s] >= [%s] min timestamp required in this run. Segment will be picked up in a future run.",
|
||||
entry,
|
||||
new DateTime(intervalStart),
|
||||
minTimestampAsDate
|
||||
);
|
||||
|
|
|
@ -0,0 +1,87 @@
|
|||
/*
|
||||
* Licensed to Metamarkets Group Inc. (Metamarkets) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. Metamarkets licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package io.druid.segment.loading;
|
||||
|
||||
import com.fasterxml.jackson.databind.ObjectMapper;
|
||||
import com.google.common.io.ByteStreams;
|
||||
import com.google.common.io.Files;
|
||||
import com.google.common.primitives.Ints;
|
||||
import io.druid.timeline.DataSegment;
|
||||
import io.druid.timeline.partition.NoneShardSpec;
|
||||
import org.apache.commons.io.FileUtils;
|
||||
import org.joda.time.Interval;
|
||||
import org.junit.After;
|
||||
import org.junit.Assert;
|
||||
import org.junit.Before;
|
||||
import org.junit.Test;
|
||||
|
||||
import java.io.File;
|
||||
import java.io.IOException;
|
||||
|
||||
public class LocalDataSegmentPusherTest
|
||||
{
|
||||
DataSegment dataSegment;
|
||||
LocalDataSegmentPusher localDataSegmentPusher;
|
||||
File dataSegmentFiles;
|
||||
File outDir;
|
||||
|
||||
@Before
|
||||
public void setUp() throws IOException
|
||||
{
|
||||
dataSegment = new DataSegment("",
|
||||
new Interval(0, 1),
|
||||
"",
|
||||
null,
|
||||
null,
|
||||
null,
|
||||
new NoneShardSpec(),
|
||||
null,
|
||||
-1);
|
||||
localDataSegmentPusher = new LocalDataSegmentPusher(new LocalDataSegmentPusherConfig(), new ObjectMapper());
|
||||
dataSegmentFiles = Files.createTempDir();
|
||||
ByteStreams.write(
|
||||
Ints.toByteArray(0x9),
|
||||
Files.newOutputStreamSupplier(new File(dataSegmentFiles, "version.bin"))
|
||||
);
|
||||
}
|
||||
|
||||
|
||||
@Test
|
||||
public void testPush() throws IOException
|
||||
{
|
||||
/* DataSegment segment - Used to create LoadSpec and Create outDir (Local Deep Storage location in this case)
|
||||
File dataSegmentFile - Used to get location of segment files like version.bin, meta.smoosh and xxxxx.smoosh
|
||||
*/
|
||||
DataSegment returnSegment = localDataSegmentPusher.push(dataSegmentFiles, dataSegment);
|
||||
Assert.assertNotNull(returnSegment);
|
||||
Assert.assertEquals(dataSegment, returnSegment);
|
||||
outDir = new File(new LocalDataSegmentPusherConfig().getStorageDirectory(), DataSegmentPusherUtil.getStorageDir(returnSegment));
|
||||
File versionFile = new File(outDir, "index.zip");
|
||||
File descriptorJson = new File(outDir, "descriptor.json");
|
||||
Assert.assertTrue(versionFile.exists());
|
||||
Assert.assertTrue(descriptorJson.exists());
|
||||
}
|
||||
|
||||
@After
|
||||
public void tearDown() throws IOException{
|
||||
FileUtils.deleteDirectory(dataSegmentFiles);
|
||||
FileUtils.deleteDirectory(outDir);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,196 @@
|
|||
/*
|
||||
* Licensed to Metamarkets Group Inc. (Metamarkets) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. Metamarkets licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package io.druid.server.http;
|
||||
|
||||
import com.google.common.collect.ImmutableList;
|
||||
import io.druid.client.DruidDataSource;
|
||||
import io.druid.client.DruidServer;
|
||||
import io.druid.client.InventoryView;
|
||||
import io.druid.timeline.DataSegment;
|
||||
import org.easymock.EasyMock;
|
||||
import org.joda.time.Interval;
|
||||
import org.junit.Assert;
|
||||
import org.junit.Test;
|
||||
|
||||
import javax.ws.rs.core.Response;
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
|
||||
public class DatasourcesResourceTest
|
||||
{
|
||||
|
||||
@Test
|
||||
public void testGetFullQueryableDataSources() throws Exception
|
||||
{
|
||||
InventoryView inventoryView = EasyMock.createStrictMock(InventoryView.class);
|
||||
DruidServer server = EasyMock.createStrictMock(DruidServer.class);
|
||||
DruidDataSource[] druidDataSources = {
|
||||
new DruidDataSource("datasource1", new HashMap()),
|
||||
new DruidDataSource("datasource2", new HashMap())
|
||||
};
|
||||
EasyMock.expect(server.getDataSources()).andReturn(
|
||||
ImmutableList.of(druidDataSources[0], druidDataSources[1])
|
||||
).atLeastOnce();
|
||||
EasyMock.expect(inventoryView.getInventory()).andReturn(
|
||||
ImmutableList.of(server)
|
||||
).atLeastOnce();
|
||||
EasyMock.replay(inventoryView, server);
|
||||
DatasourcesResource datasourcesResource = new DatasourcesResource(inventoryView, null, null);
|
||||
Response response = datasourcesResource.getQueryableDataSources("full", null);
|
||||
Set<DruidDataSource> result = (Set<DruidDataSource>)response.getEntity();
|
||||
DruidDataSource[] resultantDruidDataSources = new DruidDataSource[result.size()];
|
||||
result.toArray(resultantDruidDataSources);
|
||||
Assert.assertEquals(200, response.getStatus());
|
||||
Assert.assertEquals(2, resultantDruidDataSources.length);
|
||||
Assert.assertArrayEquals(druidDataSources, resultantDruidDataSources);
|
||||
|
||||
response = datasourcesResource.getQueryableDataSources(null, null);
|
||||
List<String> result1 = (List<String>)response.getEntity();
|
||||
Assert.assertEquals(200, response.getStatus());
|
||||
Assert.assertEquals(2, result1.size());
|
||||
Assert.assertTrue(result1.contains("datasource1"));
|
||||
Assert.assertTrue(result1.contains("datasource2"));
|
||||
EasyMock.verify(inventoryView, server);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testGetSimpleQueryableDataSources() throws Exception
|
||||
{
|
||||
InventoryView inventoryView = EasyMock.createStrictMock(InventoryView.class);
|
||||
DruidServer server = EasyMock.createStrictMock(DruidServer.class);
|
||||
List<Map<String, Object>> input = new ArrayList(2);
|
||||
HashMap<String, Object> dataSourceProp1 = new HashMap<>();
|
||||
dataSourceProp1.put("name", "datasource1");
|
||||
dataSourceProp1.put("partitionName", "partition");
|
||||
dataSourceProp1.put("datasegment",
|
||||
new DataSegment("datasource1", new Interval("2010-01-01/P1D"), null, null, null, null, null, 0x9, 0));
|
||||
|
||||
HashMap<String, Object> dataSourceProp2 = new HashMap<>();
|
||||
dataSourceProp2.put("name", "datasource2");
|
||||
dataSourceProp2.put("partitionName", "partition");
|
||||
dataSourceProp2.put("datasegment",
|
||||
new DataSegment("datasource2", new Interval("2010-01-01/P1D"), null, null, null, null, null, 0x9, 0));
|
||||
input.add(dataSourceProp1);
|
||||
input.add(dataSourceProp2);
|
||||
List<DruidDataSource> listDataSources = new ArrayList<>();
|
||||
for(Map<String, Object> entry : input){
|
||||
listDataSources.add(new DruidDataSource(entry.get("name").toString(), new HashMap())
|
||||
.addSegment(entry.get("partitionName").toString(), (DataSegment)entry.get("datasegment")));
|
||||
}
|
||||
EasyMock.expect(server.getDataSources()).andReturn(
|
||||
listDataSources
|
||||
).atLeastOnce();
|
||||
EasyMock.expect(server.getDataSource("datasource1")).andReturn(
|
||||
listDataSources.get(0)
|
||||
).atLeastOnce();
|
||||
EasyMock.expect(server.getTier()).andReturn(null).atLeastOnce();
|
||||
EasyMock.expect(server.getDataSource("datasource2")).andReturn(
|
||||
listDataSources.get(1)
|
||||
).atLeastOnce();
|
||||
EasyMock.expect(server.getTier()).andReturn(null).atLeastOnce();
|
||||
EasyMock.expect(inventoryView.getInventory()).andReturn(
|
||||
ImmutableList.of(server)
|
||||
).atLeastOnce();
|
||||
|
||||
EasyMock.replay(inventoryView, server);
|
||||
DatasourcesResource datasourcesResource = new DatasourcesResource(inventoryView, null, null);
|
||||
Response response = datasourcesResource.getQueryableDataSources(null, "simple");
|
||||
Assert.assertEquals(200, response.getStatus());
|
||||
List<Map<String, Object>> results = (List<Map<String, Object>>)response.getEntity();
|
||||
int index = 0;
|
||||
for(Map<String, Object> entry : results){
|
||||
Assert.assertEquals(input.get(index).get("name"), entry.get("name").toString());
|
||||
Assert.assertTrue(((Map) ((Map) entry.get("properties")).get("tiers")).containsKey(null));
|
||||
Assert.assertNotNull((((Map) entry.get("properties")).get("segments")));
|
||||
Assert.assertEquals(1, ((Map) ((Map) entry.get("properties")).get("segments")).get("count"));
|
||||
index++;
|
||||
}
|
||||
EasyMock.verify(inventoryView, server);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testFullGetTheDataSource() throws Exception
|
||||
{
|
||||
InventoryView inventoryView = EasyMock.createStrictMock(InventoryView.class);
|
||||
DruidServer server = EasyMock.createStrictMock(DruidServer.class);
|
||||
DruidDataSource dataSource1 = new DruidDataSource("datasource1", new HashMap());
|
||||
EasyMock.expect(server.getDataSource("datasource1")).andReturn(
|
||||
dataSource1
|
||||
).atLeastOnce();
|
||||
EasyMock.expect(inventoryView.getInventory()).andReturn(
|
||||
ImmutableList.of(server)
|
||||
).atLeastOnce();
|
||||
|
||||
EasyMock.replay(inventoryView, server);
|
||||
DatasourcesResource datasourcesResource = new DatasourcesResource(inventoryView, null, null);
|
||||
Response response = datasourcesResource.getTheDataSource("datasource1", "full");
|
||||
DruidDataSource result = (DruidDataSource)response.getEntity();
|
||||
Assert.assertEquals(200, response.getStatus());
|
||||
Assert.assertEquals(dataSource1, result);
|
||||
EasyMock.verify(inventoryView, server);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testNullGetTheDataSource() throws Exception
|
||||
{
|
||||
InventoryView inventoryView = EasyMock.createStrictMock(InventoryView.class);
|
||||
DruidServer server = EasyMock.createStrictMock(DruidServer.class);
|
||||
EasyMock.expect(server.getDataSource("none")).andReturn(null).atLeastOnce();
|
||||
EasyMock.expect(inventoryView.getInventory()).andReturn(
|
||||
ImmutableList.of(server)
|
||||
).atLeastOnce();
|
||||
|
||||
EasyMock.replay(inventoryView, server);
|
||||
DatasourcesResource datasourcesResource = new DatasourcesResource(inventoryView, null, null);
|
||||
Assert.assertEquals(204, datasourcesResource.getTheDataSource("none", null).getStatus());
|
||||
EasyMock.verify(inventoryView, server);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testSimpleGetTheDataSource() throws Exception
|
||||
{
|
||||
InventoryView inventoryView = EasyMock.createStrictMock(InventoryView.class);
|
||||
DruidServer server = EasyMock.createStrictMock(DruidServer.class);
|
||||
DruidDataSource dataSource1 = new DruidDataSource("datasource1", new HashMap());
|
||||
dataSource1.addSegment("partition",
|
||||
new DataSegment("datasegment1", new Interval("2010-01-01/P1D"), null, null, null, null, null, 0x9, 0));
|
||||
EasyMock.expect(server.getDataSource("datasource1")).andReturn(
|
||||
dataSource1
|
||||
).atLeastOnce();
|
||||
EasyMock.expect(server.getTier()).andReturn(null).atLeastOnce();
|
||||
EasyMock.expect(inventoryView.getInventory()).andReturn(
|
||||
ImmutableList.of(server)
|
||||
).atLeastOnce();
|
||||
|
||||
EasyMock.replay(inventoryView, server);
|
||||
DatasourcesResource datasourcesResource = new DatasourcesResource(inventoryView, null, null);
|
||||
Response response = datasourcesResource.getTheDataSource("datasource1", null);
|
||||
Assert.assertEquals(200, response.getStatus());
|
||||
Map<String, Map<String, Object>> result = (Map<String, Map<String, Object>>)response.getEntity();
|
||||
Assert.assertEquals(1, ((Map)(result.get("tiers").get(null))).get("segmentCount"));
|
||||
Assert.assertNotNull(result.get("segments"));
|
||||
Assert.assertNotNull(result.get("segments").get("minTime").toString(), "2010-01-01T00:00:00.000Z");
|
||||
Assert.assertNotNull(result.get("segments").get("maxTime").toString(), "2010-01-02T00:00:00.000Z");
|
||||
EasyMock.verify(inventoryView, server);
|
||||
}
|
||||
}
|
Loading…
Reference in New Issue