mirror of https://github.com/apache/druid.git
Merge branch 'master' into improve-bard
This commit is contained in:
commit
1e14ea7fab
|
@ -28,7 +28,7 @@
|
||||||
<parent>
|
<parent>
|
||||||
<groupId>com.metamx</groupId>
|
<groupId>com.metamx</groupId>
|
||||||
<artifactId>druid</artifactId>
|
<artifactId>druid</artifactId>
|
||||||
<version>0.4.13-SNAPSHOT</version>
|
<version>0.4.15-SNAPSHOT</version>
|
||||||
</parent>
|
</parent>
|
||||||
|
|
||||||
<dependencies>
|
<dependencies>
|
||||||
|
|
|
@ -28,7 +28,7 @@
|
||||||
<parent>
|
<parent>
|
||||||
<groupId>com.metamx</groupId>
|
<groupId>com.metamx</groupId>
|
||||||
<artifactId>druid</artifactId>
|
<artifactId>druid</artifactId>
|
||||||
<version>0.4.13-SNAPSHOT</version>
|
<version>0.4.15-SNAPSHOT</version>
|
||||||
</parent>
|
</parent>
|
||||||
|
|
||||||
<dependencies>
|
<dependencies>
|
||||||
|
|
|
@ -181,7 +181,7 @@ public class DbConnector
|
||||||
dataSource.setPassword(config.getDatabasePassword());
|
dataSource.setPassword(config.getDatabasePassword());
|
||||||
dataSource.setUrl(config.getDatabaseConnectURI());
|
dataSource.setUrl(config.getDatabaseConnectURI());
|
||||||
|
|
||||||
if (config.isValidationQuery()) {
|
if (config.useValidationQuery()) {
|
||||||
dataSource.setValidationQuery(config.getValidationQuery());
|
dataSource.setValidationQuery(config.getValidationQuery());
|
||||||
dataSource.setTestOnBorrow(true);
|
dataSource.setTestOnBorrow(true);
|
||||||
}
|
}
|
||||||
|
|
|
@ -44,7 +44,7 @@ public abstract class DbConnectorConfig
|
||||||
|
|
||||||
@JsonProperty("useValidationQuery")
|
@JsonProperty("useValidationQuery")
|
||||||
@Config("druid.database.validation")
|
@Config("druid.database.validation")
|
||||||
public boolean isValidationQuery() {
|
public boolean useValidationQuery() {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -9,7 +9,7 @@
|
||||||
<parent>
|
<parent>
|
||||||
<groupId>com.metamx</groupId>
|
<groupId>com.metamx</groupId>
|
||||||
<artifactId>druid</artifactId>
|
<artifactId>druid</artifactId>
|
||||||
<version>0.4.13-SNAPSHOT</version>
|
<version>0.4.15-SNAPSHOT</version>
|
||||||
</parent>
|
</parent>
|
||||||
|
|
||||||
<dependencies>
|
<dependencies>
|
||||||
|
|
|
@ -28,7 +28,7 @@
|
||||||
<parent>
|
<parent>
|
||||||
<groupId>com.metamx</groupId>
|
<groupId>com.metamx</groupId>
|
||||||
<artifactId>druid</artifactId>
|
<artifactId>druid</artifactId>
|
||||||
<version>0.4.13-SNAPSHOT</version>
|
<version>0.4.15-SNAPSHOT</version>
|
||||||
</parent>
|
</parent>
|
||||||
|
|
||||||
<dependencies>
|
<dependencies>
|
||||||
|
|
|
@ -28,7 +28,7 @@
|
||||||
<parent>
|
<parent>
|
||||||
<groupId>com.metamx</groupId>
|
<groupId>com.metamx</groupId>
|
||||||
<artifactId>druid</artifactId>
|
<artifactId>druid</artifactId>
|
||||||
<version>0.4.13-SNAPSHOT</version>
|
<version>0.4.15-SNAPSHOT</version>
|
||||||
</parent>
|
</parent>
|
||||||
|
|
||||||
<dependencies>
|
<dependencies>
|
||||||
|
|
|
@ -504,7 +504,6 @@ public class DeterminePartitionsJob implements Jobby
|
||||||
public static class DeterminePartitionsDimSelectionReducer extends DeterminePartitionsDimSelectionBaseReducer
|
public static class DeterminePartitionsDimSelectionReducer extends DeterminePartitionsDimSelectionBaseReducer
|
||||||
{
|
{
|
||||||
private static final double SHARD_COMBINE_THRESHOLD = 0.25;
|
private static final double SHARD_COMBINE_THRESHOLD = 0.25;
|
||||||
private static final double SHARD_OVERSIZE_THRESHOLD = 1.5;
|
|
||||||
private static final int HIGH_CARDINALITY_THRESHOLD = 3000000;
|
private static final int HIGH_CARDINALITY_THRESHOLD = 3000000;
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
@ -672,7 +671,7 @@ public class DeterminePartitionsJob implements Jobby
|
||||||
// Make sure none of these shards are oversized
|
// Make sure none of these shards are oversized
|
||||||
boolean oversized = false;
|
boolean oversized = false;
|
||||||
for(final DimPartition partition : dimPartitions.partitions) {
|
for(final DimPartition partition : dimPartitions.partitions) {
|
||||||
if(partition.rows > config.getTargetPartitionSize() * SHARD_OVERSIZE_THRESHOLD) {
|
if(partition.rows > config.getMaxPartitionSize()) {
|
||||||
log.info("Dimension[%s] has an oversized shard: %s", dimPartitions.dim, partition.shardSpec);
|
log.info("Dimension[%s] has an oversized shard: %s", dimPartitions.dim, partition.shardSpec);
|
||||||
oversized = true;
|
oversized = true;
|
||||||
}
|
}
|
||||||
|
|
|
@ -236,7 +236,7 @@ public class HadoopDruidIndexerConfig
|
||||||
this.partitionsSpec = partitionsSpec;
|
this.partitionsSpec = partitionsSpec;
|
||||||
} else {
|
} else {
|
||||||
// Backwards compatibility
|
// Backwards compatibility
|
||||||
this.partitionsSpec = new PartitionsSpec(partitionDimension, targetPartitionSize, false);
|
this.partitionsSpec = new PartitionsSpec(partitionDimension, targetPartitionSize, null, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
if(granularitySpec != null) {
|
if(granularitySpec != null) {
|
||||||
|
@ -431,6 +431,11 @@ public class HadoopDruidIndexerConfig
|
||||||
return partitionsSpec.getTargetPartitionSize();
|
return partitionsSpec.getTargetPartitionSize();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public long getMaxPartitionSize()
|
||||||
|
{
|
||||||
|
return partitionsSpec.getMaxPartitionSize();
|
||||||
|
}
|
||||||
|
|
||||||
public boolean isUpdaterJobSpecSet()
|
public boolean isUpdaterJobSpecSet()
|
||||||
{
|
{
|
||||||
return (updaterJobSpec != null);
|
return (updaterJobSpec != null);
|
||||||
|
|
|
@ -8,22 +8,30 @@ import javax.annotation.Nullable;
|
||||||
|
|
||||||
public class PartitionsSpec
|
public class PartitionsSpec
|
||||||
{
|
{
|
||||||
|
private static final double DEFAULT_OVERSIZE_THRESHOLD = 1.5;
|
||||||
|
|
||||||
@Nullable
|
@Nullable
|
||||||
private final String partitionDimension;
|
private final String partitionDimension;
|
||||||
|
|
||||||
private final long targetPartitionSize;
|
private final long targetPartitionSize;
|
||||||
|
|
||||||
|
private final long maxPartitionSize;
|
||||||
|
|
||||||
private final boolean assumeGrouped;
|
private final boolean assumeGrouped;
|
||||||
|
|
||||||
@JsonCreator
|
@JsonCreator
|
||||||
public PartitionsSpec(
|
public PartitionsSpec(
|
||||||
@JsonProperty("partitionDimension") @Nullable String partitionDimension,
|
@JsonProperty("partitionDimension") @Nullable String partitionDimension,
|
||||||
@JsonProperty("targetPartitionSize") @Nullable Long targetPartitionSize,
|
@JsonProperty("targetPartitionSize") @Nullable Long targetPartitionSize,
|
||||||
|
@JsonProperty("maxPartitionSize") @Nullable Long maxPartitionSize,
|
||||||
@JsonProperty("assumeGrouped") @Nullable Boolean assumeGrouped
|
@JsonProperty("assumeGrouped") @Nullable Boolean assumeGrouped
|
||||||
)
|
)
|
||||||
{
|
{
|
||||||
this.partitionDimension = partitionDimension;
|
this.partitionDimension = partitionDimension;
|
||||||
this.targetPartitionSize = targetPartitionSize == null ? -1 : targetPartitionSize;
|
this.targetPartitionSize = targetPartitionSize == null ? -1 : targetPartitionSize;
|
||||||
|
this.maxPartitionSize = maxPartitionSize == null
|
||||||
|
? (long) (this.targetPartitionSize * DEFAULT_OVERSIZE_THRESHOLD)
|
||||||
|
: maxPartitionSize;
|
||||||
this.assumeGrouped = assumeGrouped == null ? false : assumeGrouped;
|
this.assumeGrouped = assumeGrouped == null ? false : assumeGrouped;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -46,6 +54,12 @@ public class PartitionsSpec
|
||||||
return targetPartitionSize;
|
return targetPartitionSize;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@JsonProperty
|
||||||
|
public long getMaxPartitionSize()
|
||||||
|
{
|
||||||
|
return maxPartitionSize;
|
||||||
|
}
|
||||||
|
|
||||||
@JsonProperty
|
@JsonProperty
|
||||||
public boolean isAssumeGrouped()
|
public boolean isAssumeGrouped()
|
||||||
{
|
{
|
||||||
|
|
|
@ -24,6 +24,7 @@ import com.google.common.base.Throwables;
|
||||||
import com.google.common.collect.Lists;
|
import com.google.common.collect.Lists;
|
||||||
import com.metamx.druid.indexer.granularity.UniformGranularitySpec;
|
import com.metamx.druid.indexer.granularity.UniformGranularitySpec;
|
||||||
import com.metamx.druid.indexer.partitions.PartitionsSpec;
|
import com.metamx.druid.indexer.partitions.PartitionsSpec;
|
||||||
|
import com.metamx.druid.indexer.updater.DbUpdaterJobSpec;
|
||||||
import com.metamx.druid.jackson.DefaultObjectMapper;
|
import com.metamx.druid.jackson.DefaultObjectMapper;
|
||||||
|
|
||||||
import org.joda.time.Interval;
|
import org.joda.time.Interval;
|
||||||
|
@ -39,8 +40,8 @@ public class HadoopDruidIndexerConfigTest
|
||||||
final HadoopDruidIndexerConfig cfg;
|
final HadoopDruidIndexerConfig cfg;
|
||||||
|
|
||||||
try {
|
try {
|
||||||
cfg = jsonMapper.readValue(
|
cfg = jsonReadWriteRead(
|
||||||
"{"
|
"{"
|
||||||
+ " \"granularitySpec\":{"
|
+ " \"granularitySpec\":{"
|
||||||
+ " \"type\":\"uniform\","
|
+ " \"type\":\"uniform\","
|
||||||
+ " \"gran\":\"hour\","
|
+ " \"gran\":\"hour\","
|
||||||
|
@ -74,7 +75,7 @@ public class HadoopDruidIndexerConfigTest
|
||||||
final HadoopDruidIndexerConfig cfg;
|
final HadoopDruidIndexerConfig cfg;
|
||||||
|
|
||||||
try {
|
try {
|
||||||
cfg = jsonMapper.readValue(
|
cfg = jsonReadWriteRead(
|
||||||
"{"
|
"{"
|
||||||
+ "\"segmentGranularity\":\"day\","
|
+ "\"segmentGranularity\":\"day\","
|
||||||
+ "\"intervals\":[\"2012-02-01/P1D\"]"
|
+ "\"intervals\":[\"2012-02-01/P1D\"]"
|
||||||
|
@ -137,7 +138,7 @@ public class HadoopDruidIndexerConfigTest
|
||||||
public void testInvalidGranularityCombination() {
|
public void testInvalidGranularityCombination() {
|
||||||
boolean thrown = false;
|
boolean thrown = false;
|
||||||
try {
|
try {
|
||||||
final HadoopDruidIndexerConfig cfg = jsonMapper.readValue(
|
final HadoopDruidIndexerConfig cfg = jsonReadWriteRead(
|
||||||
"{"
|
"{"
|
||||||
+ "\"segmentGranularity\":\"day\","
|
+ "\"segmentGranularity\":\"day\","
|
||||||
+ "\"intervals\":[\"2012-02-01/P1D\"],"
|
+ "\"intervals\":[\"2012-02-01/P1D\"],"
|
||||||
|
@ -161,7 +162,7 @@ public class HadoopDruidIndexerConfigTest
|
||||||
final HadoopDruidIndexerConfig cfg;
|
final HadoopDruidIndexerConfig cfg;
|
||||||
|
|
||||||
try {
|
try {
|
||||||
cfg = jsonMapper.readValue(
|
cfg = jsonReadWriteRead(
|
||||||
"{}",
|
"{}",
|
||||||
HadoopDruidIndexerConfig.class
|
HadoopDruidIndexerConfig.class
|
||||||
);
|
);
|
||||||
|
@ -183,7 +184,7 @@ public class HadoopDruidIndexerConfigTest
|
||||||
final HadoopDruidIndexerConfig cfg;
|
final HadoopDruidIndexerConfig cfg;
|
||||||
|
|
||||||
try {
|
try {
|
||||||
cfg = jsonMapper.readValue(
|
cfg = jsonReadWriteRead(
|
||||||
"{"
|
"{"
|
||||||
+ "\"partitionsSpec\":{"
|
+ "\"partitionsSpec\":{"
|
||||||
+ " \"targetPartitionSize\":100"
|
+ " \"targetPartitionSize\":100"
|
||||||
|
@ -221,7 +222,7 @@ public class HadoopDruidIndexerConfigTest
|
||||||
final HadoopDruidIndexerConfig cfg;
|
final HadoopDruidIndexerConfig cfg;
|
||||||
|
|
||||||
try {
|
try {
|
||||||
cfg = jsonMapper.readValue(
|
cfg = jsonReadWriteRead(
|
||||||
"{"
|
"{"
|
||||||
+ "\"partitionsSpec\":{"
|
+ "\"partitionsSpec\":{"
|
||||||
+ " \"targetPartitionSize\":100,"
|
+ " \"targetPartitionSize\":100,"
|
||||||
|
@ -248,6 +249,12 @@ public class HadoopDruidIndexerConfigTest
|
||||||
100
|
100
|
||||||
);
|
);
|
||||||
|
|
||||||
|
Assert.assertEquals(
|
||||||
|
"getMaxPartitionSize",
|
||||||
|
partitionsSpec.getMaxPartitionSize(),
|
||||||
|
150
|
||||||
|
);
|
||||||
|
|
||||||
Assert.assertEquals(
|
Assert.assertEquals(
|
||||||
"getPartitionDimension",
|
"getPartitionDimension",
|
||||||
partitionsSpec.getPartitionDimension(),
|
partitionsSpec.getPartitionDimension(),
|
||||||
|
@ -260,7 +267,7 @@ public class HadoopDruidIndexerConfigTest
|
||||||
final HadoopDruidIndexerConfig cfg;
|
final HadoopDruidIndexerConfig cfg;
|
||||||
|
|
||||||
try {
|
try {
|
||||||
cfg = jsonMapper.readValue(
|
cfg = jsonReadWriteRead(
|
||||||
"{"
|
"{"
|
||||||
+ "\"targetPartitionSize\":100,"
|
+ "\"targetPartitionSize\":100,"
|
||||||
+ "\"partitionDimension\":\"foo\""
|
+ "\"partitionDimension\":\"foo\""
|
||||||
|
@ -285,6 +292,58 @@ public class HadoopDruidIndexerConfigTest
|
||||||
100
|
100
|
||||||
);
|
);
|
||||||
|
|
||||||
|
Assert.assertEquals(
|
||||||
|
"getMaxPartitionSize",
|
||||||
|
partitionsSpec.getMaxPartitionSize(),
|
||||||
|
150
|
||||||
|
);
|
||||||
|
|
||||||
|
Assert.assertEquals(
|
||||||
|
"getPartitionDimension",
|
||||||
|
partitionsSpec.getPartitionDimension(),
|
||||||
|
"foo"
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testPartitionsSpecMaxPartitionSize() {
|
||||||
|
final HadoopDruidIndexerConfig cfg;
|
||||||
|
|
||||||
|
try {
|
||||||
|
cfg = jsonReadWriteRead(
|
||||||
|
"{"
|
||||||
|
+ "\"partitionsSpec\":{"
|
||||||
|
+ " \"targetPartitionSize\":100,"
|
||||||
|
+ " \"maxPartitionSize\":200,"
|
||||||
|
+ " \"partitionDimension\":\"foo\""
|
||||||
|
+ " }"
|
||||||
|
+ "}",
|
||||||
|
HadoopDruidIndexerConfig.class
|
||||||
|
);
|
||||||
|
} catch(Exception e) {
|
||||||
|
throw Throwables.propagate(e);
|
||||||
|
}
|
||||||
|
|
||||||
|
final PartitionsSpec partitionsSpec = cfg.getPartitionsSpec();
|
||||||
|
|
||||||
|
Assert.assertEquals(
|
||||||
|
"isDeterminingPartitions",
|
||||||
|
partitionsSpec.isDeterminingPartitions(),
|
||||||
|
true
|
||||||
|
);
|
||||||
|
|
||||||
|
Assert.assertEquals(
|
||||||
|
"getTargetPartitionSize",
|
||||||
|
partitionsSpec.getTargetPartitionSize(),
|
||||||
|
100
|
||||||
|
);
|
||||||
|
|
||||||
|
Assert.assertEquals(
|
||||||
|
"getMaxPartitionSize",
|
||||||
|
partitionsSpec.getMaxPartitionSize(),
|
||||||
|
200
|
||||||
|
);
|
||||||
|
|
||||||
Assert.assertEquals(
|
Assert.assertEquals(
|
||||||
"getPartitionDimension",
|
"getPartitionDimension",
|
||||||
partitionsSpec.getPartitionDimension(),
|
partitionsSpec.getPartitionDimension(),
|
||||||
|
@ -296,7 +355,7 @@ public class HadoopDruidIndexerConfigTest
|
||||||
public void testInvalidPartitionsCombination() {
|
public void testInvalidPartitionsCombination() {
|
||||||
boolean thrown = false;
|
boolean thrown = false;
|
||||||
try {
|
try {
|
||||||
final HadoopDruidIndexerConfig cfg = jsonMapper.readValue(
|
final HadoopDruidIndexerConfig cfg = jsonReadWriteRead(
|
||||||
"{"
|
"{"
|
||||||
+ "\"targetPartitionSize\":100,"
|
+ "\"targetPartitionSize\":100,"
|
||||||
+ "\"partitionsSpec\":{"
|
+ "\"partitionsSpec\":{"
|
||||||
|
@ -311,4 +370,40 @@ public class HadoopDruidIndexerConfigTest
|
||||||
|
|
||||||
Assert.assertTrue("Exception thrown", thrown);
|
Assert.assertTrue("Exception thrown", thrown);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testDbUpdaterJobSpec() throws Exception
|
||||||
|
{
|
||||||
|
final HadoopDruidIndexerConfig cfg;
|
||||||
|
|
||||||
|
cfg = jsonReadWriteRead(
|
||||||
|
"{"
|
||||||
|
+ "\"updaterJobSpec\":{\n"
|
||||||
|
+ " \"type\" : \"db\",\n"
|
||||||
|
+ " \"connectURI\" : \"jdbc:mysql://localhost/druid\",\n"
|
||||||
|
+ " \"user\" : \"rofl\",\n"
|
||||||
|
+ " \"password\" : \"p4ssw0rd\",\n"
|
||||||
|
+ " \"segmentTable\" : \"segments\"\n"
|
||||||
|
+ " }"
|
||||||
|
+ "}",
|
||||||
|
HadoopDruidIndexerConfig.class
|
||||||
|
);
|
||||||
|
|
||||||
|
final DbUpdaterJobSpec spec = (DbUpdaterJobSpec) cfg.getUpdaterJobSpec();
|
||||||
|
Assert.assertEquals("segments", spec.getSegmentTable());
|
||||||
|
Assert.assertEquals("jdbc:mysql://localhost/druid", spec.getDatabaseConnectURI());
|
||||||
|
Assert.assertEquals("rofl", spec.getDatabaseUser());
|
||||||
|
Assert.assertEquals("p4ssw0rd", spec.getDatabasePassword());
|
||||||
|
Assert.assertEquals(false, spec.useValidationQuery());
|
||||||
|
}
|
||||||
|
|
||||||
|
private <T> T jsonReadWriteRead(String s, Class<T> klass)
|
||||||
|
{
|
||||||
|
try {
|
||||||
|
return jsonMapper.readValue(jsonMapper.writeValueAsBytes(jsonMapper.readValue(s, klass)), klass);
|
||||||
|
}
|
||||||
|
catch (Exception e) {
|
||||||
|
throw Throwables.propagate(e);
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -28,7 +28,7 @@
|
||||||
<parent>
|
<parent>
|
||||||
<groupId>com.metamx</groupId>
|
<groupId>com.metamx</groupId>
|
||||||
<artifactId>druid</artifactId>
|
<artifactId>druid</artifactId>
|
||||||
<version>0.4.13-SNAPSHOT</version>
|
<version>0.4.15-SNAPSHOT</version>
|
||||||
</parent>
|
</parent>
|
||||||
|
|
||||||
<dependencies>
|
<dependencies>
|
||||||
|
|
2
pom.xml
2
pom.xml
|
@ -23,7 +23,7 @@
|
||||||
<groupId>com.metamx</groupId>
|
<groupId>com.metamx</groupId>
|
||||||
<artifactId>druid</artifactId>
|
<artifactId>druid</artifactId>
|
||||||
<packaging>pom</packaging>
|
<packaging>pom</packaging>
|
||||||
<version>0.4.13-SNAPSHOT</version>
|
<version>0.4.15-SNAPSHOT</version>
|
||||||
<name>druid</name>
|
<name>druid</name>
|
||||||
<description>druid</description>
|
<description>druid</description>
|
||||||
<scm>
|
<scm>
|
||||||
|
|
|
@ -28,7 +28,7 @@
|
||||||
<parent>
|
<parent>
|
||||||
<groupId>com.metamx</groupId>
|
<groupId>com.metamx</groupId>
|
||||||
<artifactId>druid</artifactId>
|
<artifactId>druid</artifactId>
|
||||||
<version>0.4.13-SNAPSHOT</version>
|
<version>0.4.15-SNAPSHOT</version>
|
||||||
</parent>
|
</parent>
|
||||||
|
|
||||||
<dependencies>
|
<dependencies>
|
||||||
|
|
|
@ -28,7 +28,7 @@
|
||||||
<parent>
|
<parent>
|
||||||
<groupId>com.metamx</groupId>
|
<groupId>com.metamx</groupId>
|
||||||
<artifactId>druid</artifactId>
|
<artifactId>druid</artifactId>
|
||||||
<version>0.4.13-SNAPSHOT</version>
|
<version>0.4.15-SNAPSHOT</version>
|
||||||
</parent>
|
</parent>
|
||||||
|
|
||||||
<dependencies>
|
<dependencies>
|
||||||
|
|
|
@ -24,11 +24,11 @@
|
||||||
<artifactId>druid-services</artifactId>
|
<artifactId>druid-services</artifactId>
|
||||||
<name>druid-services</name>
|
<name>druid-services</name>
|
||||||
<description>druid-services</description>
|
<description>druid-services</description>
|
||||||
<version>0.4.13-SNAPSHOT</version>
|
<version>0.4.15-SNAPSHOT</version>
|
||||||
<parent>
|
<parent>
|
||||||
<groupId>com.metamx</groupId>
|
<groupId>com.metamx</groupId>
|
||||||
<artifactId>druid</artifactId>
|
<artifactId>druid</artifactId>
|
||||||
<version>0.4.13-SNAPSHOT</version>
|
<version>0.4.15-SNAPSHOT</version>
|
||||||
</parent>
|
</parent>
|
||||||
|
|
||||||
<dependencies>
|
<dependencies>
|
||||||
|
|
Loading…
Reference in New Issue