Split SegmentInsertAction and SegmentTransactionalInsertAction for backwards compat. (#2922)

Fixes #2912.
This commit is contained in:
Gian Merlino 2016-05-04 13:54:34 -07:00 committed by Fangjin Yang
parent e067acd443
commit f8ddfb9a4b
6 changed files with 319 additions and 129 deletions

View File

@ -52,6 +52,7 @@ import io.druid.indexing.appenderator.ActionBasedUsedSegmentChecker;
import io.druid.indexing.common.TaskStatus;
import io.druid.indexing.common.TaskToolbox;
import io.druid.indexing.common.actions.SegmentInsertAction;
import io.druid.indexing.common.actions.SegmentTransactionalInsertAction;
import io.druid.indexing.common.actions.TaskActionClient;
import io.druid.indexing.common.task.AbstractTask;
import io.druid.indexing.common.task.TaskResource;
@ -476,16 +477,16 @@ public class KafkaIndexTask extends AbstractTask implements ChatHandler
throw new ISE("WTF?! Driver attempted to publish invalid metadata[%s].", commitMetadata);
}
final SegmentInsertAction action;
final SegmentTransactionalInsertAction action;
if (ioConfig.isUseTransaction()) {
action = new SegmentInsertAction(
action = new SegmentTransactionalInsertAction(
segments,
new KafkaDataSourceMetadata(ioConfig.getStartPartitions()),
new KafkaDataSourceMetadata(finalPartitions)
);
} else {
action = new SegmentInsertAction(segments, null, null);
action = new SegmentTransactionalInsertAction(segments, null, null);
}
log.info("Publishing with isTransaction[%s].", ioConfig.isUseTransaction());

View File

@ -23,11 +23,8 @@ import com.fasterxml.jackson.annotation.JsonCreator;
import com.fasterxml.jackson.annotation.JsonProperty;
import com.fasterxml.jackson.core.type.TypeReference;
import com.google.common.collect.ImmutableSet;
import com.metamx.emitter.service.ServiceMetricEvent;
import io.druid.indexing.common.task.Task;
import io.druid.indexing.overlord.DataSourceMetadata;
import io.druid.indexing.overlord.SegmentPublishResult;
import io.druid.query.DruidMetrics;
import io.druid.timeline.DataSegment;
import java.io.IOException;
@ -41,29 +38,16 @@ import java.util.Set;
* that the task cannot actually complete. Callers should avoid this by avoiding inserting too many segments in the
* same action.
*/
public class SegmentInsertAction implements TaskAction<SegmentPublishResult>
public class SegmentInsertAction implements TaskAction<Set<DataSegment>>
{
private final Set<DataSegment> segments;
private final DataSourceMetadata startMetadata;
private final DataSourceMetadata endMetadata;
public SegmentInsertAction(
Set<DataSegment> segments
)
{
this(segments, null, null);
}
@JsonCreator
public SegmentInsertAction(
@JsonProperty("segments") Set<DataSegment> segments,
@JsonProperty("startMetadata") DataSourceMetadata startMetadata,
@JsonProperty("endMetadata") DataSourceMetadata endMetadata
@JsonProperty("segments") Set<DataSegment> segments
)
{
this.segments = ImmutableSet.copyOf(segments);
this.startMetadata = startMetadata;
this.endMetadata = endMetadata;
}
@JsonProperty
@ -72,21 +56,9 @@ public class SegmentInsertAction implements TaskAction<SegmentPublishResult>
return segments;
}
@JsonProperty
public DataSourceMetadata getStartMetadata()
public TypeReference<Set<DataSegment>> getReturnTypeReference()
{
return startMetadata;
}
@JsonProperty
public DataSourceMetadata getEndMetadata()
{
return endMetadata;
}
public TypeReference<SegmentPublishResult> getReturnTypeReference()
{
return new TypeReference<SegmentPublishResult>()
return new TypeReference<Set<DataSegment>>()
{
};
}
@ -94,36 +66,12 @@ public class SegmentInsertAction implements TaskAction<SegmentPublishResult>
/**
* Behaves similarly to
* {@link io.druid.indexing.overlord.IndexerMetadataStorageCoordinator#announceHistoricalSegments(Set, DataSourceMetadata, DataSourceMetadata)},
* including the possibility of returning null in case of metadata transaction failure.
* with startMetadata and endMetadata both null.
*/
@Override
public SegmentPublishResult perform(Task task, TaskActionToolbox toolbox) throws IOException
public Set<DataSegment> perform(Task task, TaskActionToolbox toolbox) throws IOException
{
toolbox.verifyTaskLocks(task, segments);
final SegmentPublishResult retVal = toolbox.getIndexerMetadataStorageCoordinator().announceHistoricalSegments(
segments,
startMetadata,
endMetadata
);
// Emit metrics
final ServiceMetricEvent.Builder metricBuilder = new ServiceMetricEvent.Builder()
.setDimension(DruidMetrics.DATASOURCE, task.getDataSource())
.setDimension(DruidMetrics.TASK_TYPE, task.getType());
if (retVal.isSuccess()) {
toolbox.getEmitter().emit(metricBuilder.build("segment/txn/success", 1));
} else {
toolbox.getEmitter().emit(metricBuilder.build("segment/txn/failure", 1));
}
for (DataSegment segment : retVal.getSegments()) {
metricBuilder.setDimension(DruidMetrics.INTERVAL, segment.getInterval().toString());
toolbox.getEmitter().emit(metricBuilder.build("segment/added/bytes", segment.getSize()));
}
return retVal;
return new SegmentTransactionalInsertAction(segments, null, null).perform(task, toolbox).getSegments();
}
@Override
@ -137,8 +85,6 @@ public class SegmentInsertAction implements TaskAction<SegmentPublishResult>
{
return "SegmentInsertAction{" +
"segments=" + segments +
", startMetadata=" + startMetadata +
", endMetadata=" + endMetadata +
'}';
}
}

View File

@ -0,0 +1,143 @@
/*
* Licensed to Metamarkets Group Inc. (Metamarkets) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. Metamarkets licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package io.druid.indexing.common.actions;
import com.fasterxml.jackson.annotation.JsonCreator;
import com.fasterxml.jackson.annotation.JsonProperty;
import com.fasterxml.jackson.core.type.TypeReference;
import com.google.common.collect.ImmutableSet;
import com.metamx.emitter.service.ServiceMetricEvent;
import io.druid.indexing.common.task.Task;
import io.druid.indexing.overlord.DataSourceMetadata;
import io.druid.indexing.overlord.SegmentPublishResult;
import io.druid.query.DruidMetrics;
import io.druid.timeline.DataSegment;
import java.io.IOException;
import java.util.Set;
/**
* Insert segments into metadata storage. The segment versions must all be less than or equal to a lock held by
* your task for the segment intervals.
* <p/>
* Word of warning: Very large "segments" sets can cause oversized audit log entries, which is bad because it means
* that the task cannot actually complete. Callers should avoid this by avoiding inserting too many segments in the
* same action.
*/
public class SegmentTransactionalInsertAction implements TaskAction<SegmentPublishResult>
{
private final Set<DataSegment> segments;
private final DataSourceMetadata startMetadata;
private final DataSourceMetadata endMetadata;
public SegmentTransactionalInsertAction(
Set<DataSegment> segments
)
{
this(segments, null, null);
}
@JsonCreator
public SegmentTransactionalInsertAction(
@JsonProperty("segments") Set<DataSegment> segments,
@JsonProperty("startMetadata") DataSourceMetadata startMetadata,
@JsonProperty("endMetadata") DataSourceMetadata endMetadata
)
{
this.segments = ImmutableSet.copyOf(segments);
this.startMetadata = startMetadata;
this.endMetadata = endMetadata;
}
@JsonProperty
public Set<DataSegment> getSegments()
{
return segments;
}
@JsonProperty
public DataSourceMetadata getStartMetadata()
{
return startMetadata;
}
@JsonProperty
public DataSourceMetadata getEndMetadata()
{
return endMetadata;
}
public TypeReference<SegmentPublishResult> getReturnTypeReference()
{
return new TypeReference<SegmentPublishResult>()
{
};
}
/**
* Behaves similarly to
* {@link io.druid.indexing.overlord.IndexerMetadataStorageCoordinator#announceHistoricalSegments(Set, DataSourceMetadata, DataSourceMetadata)}.
*/
@Override
public SegmentPublishResult perform(Task task, TaskActionToolbox toolbox) throws IOException
{
toolbox.verifyTaskLocks(task, segments);
final SegmentPublishResult retVal = toolbox.getIndexerMetadataStorageCoordinator().announceHistoricalSegments(
segments,
startMetadata,
endMetadata
);
// Emit metrics
final ServiceMetricEvent.Builder metricBuilder = new ServiceMetricEvent.Builder()
.setDimension(DruidMetrics.DATASOURCE, task.getDataSource())
.setDimension(DruidMetrics.TASK_TYPE, task.getType());
if (retVal.isSuccess()) {
toolbox.getEmitter().emit(metricBuilder.build("segment/txn/success", 1));
} else {
toolbox.getEmitter().emit(metricBuilder.build("segment/txn/failure", 1));
}
for (DataSegment segment : retVal.getSegments()) {
metricBuilder.setDimension(DruidMetrics.INTERVAL, segment.getInterval().toString());
toolbox.getEmitter().emit(metricBuilder.build("segment/added/bytes", segment.getSize()));
}
return retVal;
}
@Override
public boolean isAudited()
{
return true;
}
@Override
public String toString()
{
return "SegmentInsertAction{" +
"segments=" + segments +
", startMetadata=" + startMetadata +
", endMetadata=" + endMetadata +
'}';
}
}

View File

@ -33,6 +33,7 @@ import java.io.IOException;
@JsonSubTypes.Type(name = "lockList", value = LockListAction.class),
@JsonSubTypes.Type(name = "lockRelease", value = LockReleaseAction.class),
@JsonSubTypes.Type(name = "segmentInsertion", value = SegmentInsertAction.class),
@JsonSubTypes.Type(name = "segmentTransactionalInsert", value = SegmentTransactionalInsertAction.class),
@JsonSubTypes.Type(name = "segmentListUsed", value = SegmentListUsedAction.class),
@JsonSubTypes.Type(name = "segmentListUnused", value = SegmentListUnusedAction.class),
@JsonSubTypes.Type(name = "segmentNuke", value = SegmentNukeAction.class),

View File

@ -24,8 +24,6 @@ import com.google.common.collect.ImmutableMap;
import com.google.common.collect.ImmutableSet;
import io.druid.indexing.common.task.NoopTask;
import io.druid.indexing.common.task.Task;
import io.druid.indexing.overlord.ObjectMetadata;
import io.druid.indexing.overlord.SegmentPublishResult;
import io.druid.timeline.DataSegment;
import io.druid.timeline.partition.LinearShardSpec;
import org.hamcrest.CoreMatchers;
@ -34,7 +32,8 @@ import org.junit.Assert;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.ExpectedException;
import org.skife.jdbi.v2.exceptions.CallbackFailedException;
import java.util.Set;
public class SegmentInsertActionTest
{
@ -103,47 +102,6 @@ public class SegmentInsertActionTest
);
}
@Test
public void testTransactional() throws Exception
{
final Task task = new NoopTask(null, 0, 0, null, null, null);
actionTestKit.getTaskLockbox().add(task);
actionTestKit.getTaskLockbox().lock(task, new Interval(INTERVAL));
SegmentPublishResult result1 = new SegmentInsertAction(
ImmutableSet.of(SEGMENT1),
new ObjectMetadata(null),
new ObjectMetadata(ImmutableList.of(1))
).perform(
task,
actionTestKit.getTaskActionToolbox()
);
Assert.assertEquals(new SegmentPublishResult(ImmutableSet.of(SEGMENT1), true), result1);
SegmentPublishResult result2 = new SegmentInsertAction(
ImmutableSet.of(SEGMENT2),
new ObjectMetadata(ImmutableList.of(1)),
new ObjectMetadata(ImmutableList.of(2))
).perform(
task,
actionTestKit.getTaskActionToolbox()
);
Assert.assertEquals(new SegmentPublishResult(ImmutableSet.of(SEGMENT2), true), result2);
Assert.assertEquals(
ImmutableSet.of(SEGMENT1, SEGMENT2),
ImmutableSet.copyOf(
actionTestKit.getMetadataStorageCoordinator()
.getUsedSegmentsForInterval(DATA_SOURCE, INTERVAL)
)
);
Assert.assertEquals(
new ObjectMetadata(ImmutableList.of(2)),
actionTestKit.getMetadataStorageCoordinator().getDataSourceMetadata(DATA_SOURCE)
);
}
@Test
public void testFailBadVersion() throws Exception
{
@ -154,26 +112,7 @@ public class SegmentInsertActionTest
thrown.expect(IllegalStateException.class);
thrown.expectMessage(CoreMatchers.startsWith("Segments not covered by locks for task"));
SegmentPublishResult result = action.perform(task, actionTestKit.getTaskActionToolbox());
Assert.assertEquals(new SegmentPublishResult(ImmutableSet.of(SEGMENT3), true), result);
}
@Test
public void testFailTransactional() throws Exception
{
final Task task = new NoopTask(null, 0, 0, null, null, null);
actionTestKit.getTaskLockbox().add(task);
actionTestKit.getTaskLockbox().lock(task, new Interval(INTERVAL));
SegmentPublishResult result = new SegmentInsertAction(
ImmutableSet.of(SEGMENT1),
new ObjectMetadata(ImmutableList.of(1)),
new ObjectMetadata(ImmutableList.of(2))
).perform(
task,
actionTestKit.getTaskActionToolbox()
);
Assert.assertEquals(new SegmentPublishResult(ImmutableSet.<DataSegment>of(), false), result);
final Set<DataSegment> segments = action.perform(task, actionTestKit.getTaskActionToolbox());
Assert.assertEquals(ImmutableSet.of(SEGMENT3), segments);
}
}

View File

@ -0,0 +1,160 @@
/*
* Licensed to Metamarkets Group Inc. (Metamarkets) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. Metamarkets licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package io.druid.indexing.common.actions;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.ImmutableSet;
import io.druid.indexing.common.task.NoopTask;
import io.druid.indexing.common.task.Task;
import io.druid.indexing.overlord.ObjectMetadata;
import io.druid.indexing.overlord.SegmentPublishResult;
import io.druid.timeline.DataSegment;
import io.druid.timeline.partition.LinearShardSpec;
import org.hamcrest.CoreMatchers;
import org.joda.time.Interval;
import org.junit.Assert;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.ExpectedException;
public class SegmentTransactionalInsertActionTest
{
@Rule
public ExpectedException thrown = ExpectedException.none();
@Rule
public TaskActionTestKit actionTestKit = new TaskActionTestKit();
private static final String DATA_SOURCE = "none";
private static final Interval INTERVAL = new Interval("2020/2020T01");
private static final String PARTY_YEAR = "1999";
private static final String THE_DISTANT_FUTURE = "3000";
private static final DataSegment SEGMENT1 = new DataSegment(
DATA_SOURCE,
INTERVAL,
PARTY_YEAR,
ImmutableMap.<String, Object>of(),
ImmutableList.<String>of(),
ImmutableList.<String>of(),
new LinearShardSpec(0),
9,
1024
);
private static final DataSegment SEGMENT2 = new DataSegment(
DATA_SOURCE,
INTERVAL,
PARTY_YEAR,
ImmutableMap.<String, Object>of(),
ImmutableList.<String>of(),
ImmutableList.<String>of(),
new LinearShardSpec(1),
9,
1024
);
private static final DataSegment SEGMENT3 = new DataSegment(
DATA_SOURCE,
INTERVAL,
THE_DISTANT_FUTURE,
ImmutableMap.<String, Object>of(),
ImmutableList.<String>of(),
ImmutableList.<String>of(),
new LinearShardSpec(1),
9,
1024
);
@Test
public void testTransactional() throws Exception
{
final Task task = new NoopTask(null, 0, 0, null, null, null);
actionTestKit.getTaskLockbox().add(task);
actionTestKit.getTaskLockbox().lock(task, new Interval(INTERVAL));
SegmentPublishResult result1 = new SegmentTransactionalInsertAction(
ImmutableSet.of(SEGMENT1),
new ObjectMetadata(null),
new ObjectMetadata(ImmutableList.of(1))
).perform(
task,
actionTestKit.getTaskActionToolbox()
);
Assert.assertEquals(new SegmentPublishResult(ImmutableSet.of(SEGMENT1), true), result1);
SegmentPublishResult result2 = new SegmentTransactionalInsertAction(
ImmutableSet.of(SEGMENT2),
new ObjectMetadata(ImmutableList.of(1)),
new ObjectMetadata(ImmutableList.of(2))
).perform(
task,
actionTestKit.getTaskActionToolbox()
);
Assert.assertEquals(new SegmentPublishResult(ImmutableSet.of(SEGMENT2), true), result2);
Assert.assertEquals(
ImmutableSet.of(SEGMENT1, SEGMENT2),
ImmutableSet.copyOf(
actionTestKit.getMetadataStorageCoordinator()
.getUsedSegmentsForInterval(DATA_SOURCE, INTERVAL)
)
);
Assert.assertEquals(
new ObjectMetadata(ImmutableList.of(2)),
actionTestKit.getMetadataStorageCoordinator().getDataSourceMetadata(DATA_SOURCE)
);
}
@Test
public void testFailTransactional() throws Exception
{
final Task task = new NoopTask(null, 0, 0, null, null, null);
actionTestKit.getTaskLockbox().add(task);
actionTestKit.getTaskLockbox().lock(task, new Interval(INTERVAL));
SegmentPublishResult result = new SegmentTransactionalInsertAction(
ImmutableSet.of(SEGMENT1),
new ObjectMetadata(ImmutableList.of(1)),
new ObjectMetadata(ImmutableList.of(2))
).perform(
task,
actionTestKit.getTaskActionToolbox()
);
Assert.assertEquals(new SegmentPublishResult(ImmutableSet.<DataSegment>of(), false), result);
}
@Test
public void testFailBadVersion() throws Exception
{
final Task task = new NoopTask(null, 0, 0, null, null, null);
final SegmentTransactionalInsertAction action = new SegmentTransactionalInsertAction(ImmutableSet.of(SEGMENT3));
actionTestKit.getTaskLockbox().add(task);
actionTestKit.getTaskLockbox().lock(task, new Interval(INTERVAL));
thrown.expect(IllegalStateException.class);
thrown.expectMessage(CoreMatchers.startsWith("Segments not covered by locks for task"));
SegmentPublishResult result = action.perform(task, actionTestKit.getTaskActionToolbox());
Assert.assertEquals(new SegmentPublishResult(ImmutableSet.of(SEGMENT3), true), result);
}
}