Make hashed partitions spec default

- make hashed partitionsSpec as default partitions spec for 0.7
This commit is contained in:
nishantmonu51 2014-11-17 19:48:12 +05:30
parent 0b45942cff
commit edf0fc0851
2 changed files with 5 additions and 51 deletions

View File

@ -26,7 +26,7 @@ import com.fasterxml.jackson.annotation.JsonTypeInfo;
import io.druid.indexer.HadoopDruidIndexerConfig; import io.druid.indexer.HadoopDruidIndexerConfig;
import io.druid.indexer.Jobby; import io.druid.indexer.Jobby;
@JsonTypeInfo(use = JsonTypeInfo.Id.NAME, property = "type", defaultImpl = SingleDimensionPartitionsSpec.class) @JsonTypeInfo(use = JsonTypeInfo.Id.NAME, property = "type", defaultImpl = HashedPartitionsSpec.class)
@JsonSubTypes(value = { @JsonSubTypes(value = {
@JsonSubTypes.Type(name = "dimension", value = SingleDimensionPartitionsSpec.class), @JsonSubTypes.Type(name = "dimension", value = SingleDimensionPartitionsSpec.class),
@JsonSubTypes.Type(name = "random", value = RandomPartitionsSpec.class), @JsonSubTypes.Type(name = "random", value = RandomPartitionsSpec.class),

View File

@ -22,6 +22,7 @@ package io.druid.indexer;
import com.fasterxml.jackson.databind.ObjectMapper; import com.fasterxml.jackson.databind.ObjectMapper;
import com.google.common.base.Throwables; import com.google.common.base.Throwables;
import com.google.common.collect.Lists; import com.google.common.collect.Lists;
import io.druid.indexer.partitions.HashedPartitionsSpec;
import io.druid.metadata.MetadataStorageConnectorConfig; import io.druid.metadata.MetadataStorageConnectorConfig;
import io.druid.indexer.partitions.PartitionsSpec; import io.druid.indexer.partitions.PartitionsSpec;
import io.druid.indexer.partitions.RandomPartitionsSpec; import io.druid.indexer.partitions.RandomPartitionsSpec;
@ -133,7 +134,7 @@ public class HadoopIngestionSpecTest
} }
@Test @Test
public void testPartitionsSpecAutoDimension() public void testPartitionsSpecAutoDHashed()
{ {
final HadoopIngestionSpec schema; final HadoopIngestionSpec schema;
@ -167,55 +168,7 @@ public class HadoopIngestionSpecTest
Assert.assertTrue( Assert.assertTrue(
"partitionSpec", "partitionSpec",
partitionsSpec instanceof SingleDimensionPartitionsSpec partitionsSpec instanceof HashedPartitionsSpec
);
}
@Test
public void testPartitionsSpecSpecificDimensionLegacy()
{
final HadoopIngestionSpec schema;
try {
schema = jsonReadWriteRead(
"{"
+ "\"partitionsSpec\":{"
+ " \"targetPartitionSize\":100,"
+ " \"partitionDimension\":\"foo\""
+ " }"
+ "}",
HadoopIngestionSpec.class
);
}
catch (Exception e) {
throw Throwables.propagate(e);
}
final PartitionsSpec partitionsSpec = schema.getTuningConfig().getPartitionsSpec();
Assert.assertEquals(
"isDeterminingPartitions",
partitionsSpec.isDeterminingPartitions(),
true
);
Assert.assertEquals(
"getTargetPartitionSize",
partitionsSpec.getTargetPartitionSize(),
100
);
Assert.assertEquals(
"getMaxPartitionSize",
partitionsSpec.getMaxPartitionSize(),
150
);
Assert.assertTrue("partitionsSpec" , partitionsSpec instanceof SingleDimensionPartitionsSpec);
Assert.assertEquals(
"getPartitionDimension",
((SingleDimensionPartitionsSpec)partitionsSpec).getPartitionDimension(),
"foo"
); );
} }
@ -274,6 +227,7 @@ public class HadoopIngestionSpecTest
schema = jsonReadWriteRead( schema = jsonReadWriteRead(
"{" "{"
+ "\"partitionsSpec\":{" + "\"partitionsSpec\":{"
+ " \"type\":\"dimension\","
+ " \"targetPartitionSize\":100," + " \"targetPartitionSize\":100,"
+ " \"maxPartitionSize\":200," + " \"maxPartitionSize\":200,"
+ " \"partitionDimension\":\"foo\"" + " \"partitionDimension\":\"foo\""