mirror of https://github.com/apache/druid.git
Logic adjustments to SeekableStreamIndexTaskRunner. (#7267)
* Logic adjustments to SeekableStreamIndexTaskRunner. A mix of simplifications and bug fixes. They are intermingled because some of the bugs were made difficult to fix, and also more likely to happen in the first place, by how the code was structured. I tried to keep restructuring to a minimum. The changes are: - Remove "initialOffsetsSnapshot", which was used to determine when to skip start offsets. Replace it with "lastReadOffsets", which I hope is more intuitive. (There is a connection: start offsets must be skipped if and only if they have already been read, either by a previous task or by a previous sequence in the same task, post-restoring.) - Remove "isStartingSequenceOffsetsExclusive", because it should always be the opposite of isEndOffsetExclusive. The reason is that starts are exclusive exactly when the prior ends are inclusive: they must match up in that way for adjacent reads to link up properly. - Don't call "seekToStartingSequence" after the initial seek. There is no reason to, since we expect to read continuous message streams throughout the task. And calling it makes offset-tracking logic trickier, so better to avoid the need for trickiness. I believe the call being here was causing a bug in Kinesis ingestion where a message might get double-read. - Remove the "continue" calls in the main read loop. They are bad because they prevent keeping currOffsets and lastReadOffsets up to date, and prevent us from detecting that we have finished reading. - Rework "verifyInitialRecordAndSkipExclusivePartition" into "verifyRecordInRange". It no longer has side effects. It does a sanity check on the message offset and also makes sure that it is not past the endOffsets. - Rework "assignPartitions" to replace inline comparisons with "isRecordAlreadyRead" and "isMoreToReadBeforeReadingRecord" calls. I believe this fixes an off-by-one error with Kinesis where the last record would not get read. It also makes the logic easier to read. - When doing the final publish, only adjust end offsets of the final sequence, rather than potentially adjusting any unpublished sequence. Adjusting sequences other than the last one is a mistake since it will extend their endOffsets beyond what they actually read. (I'm not sure if this was an issue in practice, since I'm not sure if real world situations would have more than one unpublished sequence.) - Rename "isEndSequenceOffsetsExclusive" to "isEndOffsetExclusive". It's shorter and more clear, I think. - Add equals/hashCode/toString methods to OrderedSequenceNumber. Kafka test changes: - Added a Kafka "testRestoreAtEndOffset" test to verify that restores at the very end of the task lifecycle still work properly. Kinesis test changes: - Renamed "testRunOnNothing" to "testRunOnSingletonRange". I think that given Kinesis semantics, the right behavior when start offset equals end offset (and there aren't exclusive partitions set) is to read that single offset. This is because they are both meant to be treated as inclusive. - Adjusted "testRestoreAfterPersistingSequences" to expect one more message read. I believe the old test was wrong; it expected the task not to read message number 5. - Adjusted "testRunContextSequenceAheadOfStartingOffsets" to use a checkpoint starting from 1 rather than 2. I believe the old test was wrong here too; it was expecting the task to start reading from the checkpointed offset, but it actually should have started reading from one past the checkpointed offset. - Adjusted "testIncrementalHandOffReadsThroughEndOffsets" to expect 11 messages read instead of 12. It's starting at message 0 and reading up to 10, which should be 11 messages. * Changes from code review.
This commit is contained in:
parent
69a6f1154a
commit
a8c7132482
|
@ -84,7 +84,7 @@ public class IncrementalPublishingKafkaIndexTaskRunner extends SeekableStreamInd
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
protected Long getSequenceNumberToStoreAfterRead(@NotNull Long sequenceNumber)
|
protected Long getNextStartOffset(@NotNull Long sequenceNumber)
|
||||||
{
|
{
|
||||||
return sequenceNumber + 1;
|
return sequenceNumber + 1;
|
||||||
}
|
}
|
||||||
|
@ -209,17 +209,11 @@ public class IncrementalPublishingKafkaIndexTaskRunner extends SeekableStreamInd
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
protected boolean isEndSequenceOffsetsExclusive()
|
protected boolean isEndOffsetExclusive()
|
||||||
{
|
{
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
|
||||||
protected boolean isStartingSequenceOffsetsExclusive()
|
|
||||||
{
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
protected boolean isEndOfShard(Long seqNum)
|
protected boolean isEndOfShard(Long seqNum)
|
||||||
{
|
{
|
||||||
|
|
|
@ -706,18 +706,11 @@ public class LegacyKafkaIndexTaskRunner extends SeekableStreamIndexTaskRunner<In
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
protected boolean isEndSequenceOffsetsExclusive()
|
protected boolean isEndOffsetExclusive()
|
||||||
{
|
{
|
||||||
return false;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
|
||||||
protected boolean isStartingSequenceOffsetsExclusive()
|
|
||||||
{
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
protected SeekableStreamPartitions<Integer, Long> deserializePartitionsFromMetadata(
|
protected SeekableStreamPartitions<Integer, Long> deserializePartitionsFromMetadata(
|
||||||
ObjectMapper mapper,
|
ObjectMapper mapper,
|
||||||
|
@ -805,7 +798,7 @@ public class LegacyKafkaIndexTaskRunner extends SeekableStreamIndexTaskRunner<In
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
protected Long getSequenceNumberToStoreAfterRead(Long sequenceNumber)
|
protected Long getNextStartOffset(Long sequenceNumber)
|
||||||
{
|
{
|
||||||
throw new UnsupportedOperationException();
|
throw new UnsupportedOperationException();
|
||||||
}
|
}
|
||||||
|
|
|
@ -78,7 +78,7 @@ public class KinesisIndexTaskRunner extends SeekableStreamIndexTaskRunner<String
|
||||||
|
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
protected String getSequenceNumberToStoreAfterRead(String sequenceNumber)
|
protected String getNextStartOffset(String sequenceNumber)
|
||||||
{
|
{
|
||||||
return sequenceNumber;
|
return sequenceNumber;
|
||||||
}
|
}
|
||||||
|
@ -160,17 +160,11 @@ public class KinesisIndexTaskRunner extends SeekableStreamIndexTaskRunner<String
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
protected boolean isEndSequenceOffsetsExclusive()
|
protected boolean isEndOffsetExclusive()
|
||||||
{
|
{
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
|
||||||
protected boolean isStartingSequenceOffsetsExclusive()
|
|
||||||
{
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
protected boolean isEndOfShard(String seqNum)
|
protected boolean isEndOfShard(String seqNum)
|
||||||
{
|
{
|
||||||
|
|
|
@ -83,6 +83,7 @@ import org.apache.druid.indexing.overlord.supervisor.SupervisorManager;
|
||||||
import org.apache.druid.indexing.seekablestream.SeekableStreamIndexTaskRunner;
|
import org.apache.druid.indexing.seekablestream.SeekableStreamIndexTaskRunner;
|
||||||
import org.apache.druid.indexing.seekablestream.SeekableStreamPartitions;
|
import org.apache.druid.indexing.seekablestream.SeekableStreamPartitions;
|
||||||
import org.apache.druid.indexing.seekablestream.common.OrderedPartitionableRecord;
|
import org.apache.druid.indexing.seekablestream.common.OrderedPartitionableRecord;
|
||||||
|
import org.apache.druid.indexing.seekablestream.common.StreamPartition;
|
||||||
import org.apache.druid.indexing.seekablestream.supervisor.SeekableStreamSupervisor;
|
import org.apache.druid.indexing.seekablestream.supervisor.SeekableStreamSupervisor;
|
||||||
import org.apache.druid.indexing.test.TestDataSegmentAnnouncer;
|
import org.apache.druid.indexing.test.TestDataSegmentAnnouncer;
|
||||||
import org.apache.druid.indexing.test.TestDataSegmentKiller;
|
import org.apache.druid.indexing.test.TestDataSegmentKiller;
|
||||||
|
@ -1079,7 +1080,7 @@ public class KinesisIndexTaskTest extends EasyMockSupport
|
||||||
|
|
||||||
|
|
||||||
@Test(timeout = 120_000L)
|
@Test(timeout = 120_000L)
|
||||||
public void testRunOnNothing() throws Exception
|
public void testRunOnSingletonRange() throws Exception
|
||||||
{
|
{
|
||||||
recordSupplier.assign(anyObject());
|
recordSupplier.assign(anyObject());
|
||||||
expectLastCall().anyTimes();
|
expectLastCall().anyTimes();
|
||||||
|
@ -1089,11 +1090,15 @@ public class KinesisIndexTaskTest extends EasyMockSupport
|
||||||
recordSupplier.seek(anyObject(), anyString());
|
recordSupplier.seek(anyObject(), anyString());
|
||||||
expectLastCall().anyTimes();
|
expectLastCall().anyTimes();
|
||||||
|
|
||||||
|
expect(recordSupplier.poll(anyLong())).andReturn(records.subList(2, 3)).once();
|
||||||
|
|
||||||
recordSupplier.close();
|
recordSupplier.close();
|
||||||
expectLastCall().once();
|
expectLastCall().once();
|
||||||
|
|
||||||
replayAll();
|
replayAll();
|
||||||
|
|
||||||
|
// When start and end offsets are the same, it means we need to read one message (since in Kinesis, end offsets
|
||||||
|
// are inclusive).
|
||||||
final KinesisIndexTask task = createTask(
|
final KinesisIndexTask task = createTask(
|
||||||
null,
|
null,
|
||||||
new KinesisIndexTaskIOConfig(
|
new KinesisIndexTaskIOConfig(
|
||||||
|
@ -1128,12 +1133,12 @@ public class KinesisIndexTaskTest extends EasyMockSupport
|
||||||
verifyAll();
|
verifyAll();
|
||||||
|
|
||||||
// Check metrics
|
// Check metrics
|
||||||
Assert.assertEquals(0, task.getRunner().getRowIngestionMeters().getProcessed());
|
Assert.assertEquals(1, task.getRunner().getRowIngestionMeters().getProcessed());
|
||||||
Assert.assertEquals(0, task.getRunner().getRowIngestionMeters().getUnparseable());
|
Assert.assertEquals(0, task.getRunner().getRowIngestionMeters().getUnparseable());
|
||||||
Assert.assertEquals(0, task.getRunner().getRowIngestionMeters().getThrownAway());
|
Assert.assertEquals(0, task.getRunner().getRowIngestionMeters().getThrownAway());
|
||||||
|
|
||||||
// Check published metadata
|
// Check published metadata
|
||||||
Assert.assertEquals(ImmutableSet.of(), publishedDescriptors());
|
Assert.assertEquals(ImmutableSet.of(sd(task, "2010/P1D", 0)), publishedDescriptors());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -2099,14 +2104,11 @@ public class KinesisIndexTaskTest extends EasyMockSupport
|
||||||
@Test(timeout = 120_000L)
|
@Test(timeout = 120_000L)
|
||||||
public void testRestore() throws Exception
|
public void testRestore() throws Exception
|
||||||
{
|
{
|
||||||
recordSupplier.assign(anyObject());
|
final StreamPartition<String> streamPartition = StreamPartition.of(stream, shardId1);
|
||||||
expectLastCall().anyTimes();
|
recordSupplier.assign(ImmutableSet.of(streamPartition));
|
||||||
|
expectLastCall();
|
||||||
expect(recordSupplier.getEarliestSequenceNumber(anyObject())).andReturn("0").anyTimes();
|
recordSupplier.seek(streamPartition, "2");
|
||||||
|
expectLastCall();
|
||||||
recordSupplier.seek(anyObject(), anyString());
|
|
||||||
expectLastCall().anyTimes();
|
|
||||||
|
|
||||||
expect(recordSupplier.poll(anyLong())).andReturn(records.subList(2, 4))
|
expect(recordSupplier.poll(anyLong())).andReturn(records.subList(2, 4))
|
||||||
.once()
|
.once()
|
||||||
.andReturn(Collections.emptyList())
|
.andReturn(Collections.emptyList())
|
||||||
|
@ -2157,16 +2159,13 @@ public class KinesisIndexTaskTest extends EasyMockSupport
|
||||||
verifyAll();
|
verifyAll();
|
||||||
reset(recordSupplier);
|
reset(recordSupplier);
|
||||||
|
|
||||||
recordSupplier.assign(anyObject());
|
recordSupplier.assign(ImmutableSet.of(streamPartition));
|
||||||
expectLastCall().anyTimes();
|
expectLastCall();
|
||||||
|
recordSupplier.seek(streamPartition, "3");
|
||||||
expect(recordSupplier.getEarliestSequenceNumber(anyObject())).andReturn("0").anyTimes();
|
expectLastCall();
|
||||||
|
|
||||||
recordSupplier.seek(anyObject(), anyString());
|
|
||||||
expectLastCall().anyTimes();
|
|
||||||
|
|
||||||
expect(recordSupplier.poll(anyLong())).andReturn(records.subList(3, 6)).once();
|
expect(recordSupplier.poll(anyLong())).andReturn(records.subList(3, 6)).once();
|
||||||
|
recordSupplier.assign(ImmutableSet.of());
|
||||||
|
expectLastCall();
|
||||||
recordSupplier.close();
|
recordSupplier.close();
|
||||||
expectLastCall();
|
expectLastCall();
|
||||||
|
|
||||||
|
@ -2248,8 +2247,6 @@ public class KinesisIndexTaskTest extends EasyMockSupport
|
||||||
recordSupplier.assign(anyObject());
|
recordSupplier.assign(anyObject());
|
||||||
expectLastCall().anyTimes();
|
expectLastCall().anyTimes();
|
||||||
|
|
||||||
expect(recordSupplier.getEarliestSequenceNumber(anyObject())).andReturn("0").anyTimes();
|
|
||||||
|
|
||||||
recordSupplier.seek(anyObject(), anyString());
|
recordSupplier.seek(anyObject(), anyString());
|
||||||
expectLastCall().anyTimes();
|
expectLastCall().anyTimes();
|
||||||
|
|
||||||
|
@ -2321,9 +2318,6 @@ public class KinesisIndexTaskTest extends EasyMockSupport
|
||||||
recordSupplier.assign(anyObject());
|
recordSupplier.assign(anyObject());
|
||||||
expectLastCall().anyTimes();
|
expectLastCall().anyTimes();
|
||||||
|
|
||||||
expect(recordSupplier.getEarliestSequenceNumber(anyObject())).andReturn("0").anyTimes();
|
|
||||||
|
|
||||||
|
|
||||||
recordSupplier.seek(anyObject(), anyString());
|
recordSupplier.seek(anyObject(), anyString());
|
||||||
expectLastCall().anyTimes();
|
expectLastCall().anyTimes();
|
||||||
|
|
||||||
|
@ -2377,7 +2371,7 @@ public class KinesisIndexTaskTest extends EasyMockSupport
|
||||||
Assert.assertEquals(5, task1.getRunner().getRowIngestionMeters().getProcessed());
|
Assert.assertEquals(5, task1.getRunner().getRowIngestionMeters().getProcessed());
|
||||||
Assert.assertEquals(0, task1.getRunner().getRowIngestionMeters().getUnparseable());
|
Assert.assertEquals(0, task1.getRunner().getRowIngestionMeters().getUnparseable());
|
||||||
Assert.assertEquals(0, task1.getRunner().getRowIngestionMeters().getThrownAway());
|
Assert.assertEquals(0, task1.getRunner().getRowIngestionMeters().getThrownAway());
|
||||||
Assert.assertEquals(1, task2.getRunner().getRowIngestionMeters().getProcessed());
|
Assert.assertEquals(2, task2.getRunner().getRowIngestionMeters().getProcessed());
|
||||||
Assert.assertEquals(0, task2.getRunner().getRowIngestionMeters().getUnparseable());
|
Assert.assertEquals(0, task2.getRunner().getRowIngestionMeters().getUnparseable());
|
||||||
Assert.assertEquals(0, task2.getRunner().getRowIngestionMeters().getThrownAway());
|
Assert.assertEquals(0, task2.getRunner().getRowIngestionMeters().getThrownAway());
|
||||||
|
|
||||||
|
@ -2386,8 +2380,9 @@ public class KinesisIndexTaskTest extends EasyMockSupport
|
||||||
SegmentDescriptor desc2 = sd(task1, "2009/P1D", 0);
|
SegmentDescriptor desc2 = sd(task1, "2009/P1D", 0);
|
||||||
SegmentDescriptor desc3 = sd(task1, "2010/P1D", 0);
|
SegmentDescriptor desc3 = sd(task1, "2010/P1D", 0);
|
||||||
SegmentDescriptor desc4 = sd(task1, "2011/P1D", 0);
|
SegmentDescriptor desc4 = sd(task1, "2011/P1D", 0);
|
||||||
SegmentDescriptor desc5 = sd(task1, "2013/P1D", 0);
|
SegmentDescriptor desc5 = sd(task1, "2012/P1D", 0);
|
||||||
Assert.assertEquals(ImmutableSet.of(desc1, desc2, desc3, desc4, desc5), publishedDescriptors());
|
SegmentDescriptor desc6 = sd(task1, "2013/P1D", 0);
|
||||||
|
Assert.assertEquals(ImmutableSet.of(desc1, desc2, desc3, desc4, desc5, desc6), publishedDescriptors());
|
||||||
Assert.assertEquals(
|
Assert.assertEquals(
|
||||||
new KinesisDataSourceMetadata(
|
new KinesisDataSourceMetadata(
|
||||||
new SeekableStreamPartitions<>(stream, ImmutableMap.of(
|
new SeekableStreamPartitions<>(stream, ImmutableMap.of(
|
||||||
|
@ -2401,14 +2396,11 @@ public class KinesisIndexTaskTest extends EasyMockSupport
|
||||||
@Test(timeout = 120_000L)
|
@Test(timeout = 120_000L)
|
||||||
public void testRunWithPauseAndResume() throws Exception
|
public void testRunWithPauseAndResume() throws Exception
|
||||||
{
|
{
|
||||||
recordSupplier.assign(anyObject());
|
final StreamPartition<String> streamPartition = StreamPartition.of(stream, shardId1);
|
||||||
expectLastCall().anyTimes();
|
recordSupplier.assign(ImmutableSet.of(streamPartition));
|
||||||
|
expectLastCall();
|
||||||
expect(recordSupplier.getEarliestSequenceNumber(anyObject())).andReturn("0").anyTimes();
|
recordSupplier.seek(streamPartition, "2");
|
||||||
|
expectLastCall();
|
||||||
recordSupplier.seek(anyObject(), anyString());
|
|
||||||
expectLastCall().anyTimes();
|
|
||||||
|
|
||||||
expect(recordSupplier.poll(anyLong())).andReturn(records.subList(2, 5))
|
expect(recordSupplier.poll(anyLong())).andReturn(records.subList(2, 5))
|
||||||
.once()
|
.once()
|
||||||
.andReturn(Collections.emptyList())
|
.andReturn(Collections.emptyList())
|
||||||
|
@ -2475,14 +2467,8 @@ public class KinesisIndexTaskTest extends EasyMockSupport
|
||||||
|
|
||||||
reset(recordSupplier);
|
reset(recordSupplier);
|
||||||
|
|
||||||
recordSupplier.assign(anyObject());
|
recordSupplier.assign(ImmutableSet.of());
|
||||||
expectLastCall().anyTimes();
|
expectLastCall();
|
||||||
|
|
||||||
expect(recordSupplier.getEarliestSequenceNumber(anyObject())).andReturn("0").anyTimes();
|
|
||||||
|
|
||||||
recordSupplier.seek(anyObject(), anyString());
|
|
||||||
expectLastCall().anyTimes();
|
|
||||||
|
|
||||||
recordSupplier.close();
|
recordSupplier.close();
|
||||||
expectLastCall().once();
|
expectLastCall().once();
|
||||||
|
|
||||||
|
@ -2546,8 +2532,8 @@ public class KinesisIndexTaskTest extends EasyMockSupport
|
||||||
|
|
||||||
final TreeMap<Integer, Map<String, String>> sequences = new TreeMap<>();
|
final TreeMap<Integer, Map<String, String>> sequences = new TreeMap<>();
|
||||||
// Here the sequence number is 1 meaning that one incremental handoff was done by the failed task
|
// Here the sequence number is 1 meaning that one incremental handoff was done by the failed task
|
||||||
// and this task should start reading from stream 2 for partition 0
|
// and this task should start reading from offset 2 for partition 0 (not offset 1, because end is inclusive)
|
||||||
sequences.put(1, ImmutableMap.of(shardId1, "2"));
|
sequences.put(1, ImmutableMap.of(shardId1, "1"));
|
||||||
final Map<String, Object> context = new HashMap<>();
|
final Map<String, Object> context = new HashMap<>();
|
||||||
context.put("checkpoints", objectMapper.writerWithType(new TypeReference<TreeMap<Integer, Map<String, String>>>()
|
context.put("checkpoints", objectMapper.writerWithType(new TypeReference<TreeMap<Integer, Map<String, String>>>()
|
||||||
{
|
{
|
||||||
|
@ -2784,7 +2770,7 @@ public class KinesisIndexTaskTest extends EasyMockSupport
|
||||||
throw new ISE("Task is not ready");
|
throw new ISE("Task is not ready");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
catch (Exception e) {
|
catch (Throwable e) {
|
||||||
log.warn(e, "Task failed");
|
log.warn(e, "Task failed");
|
||||||
return TaskStatus.failure(task.getId(), Throwables.getStackTraceAsString(e));
|
return TaskStatus.failure(task.getId(), Throwables.getStackTraceAsString(e));
|
||||||
}
|
}
|
||||||
|
|
|
@ -142,6 +142,12 @@ public abstract class SeekableStreamIndexTaskRunner<PartitionIdType, SequenceOff
|
||||||
static final String METADATA_PUBLISH_PARTITIONS = "publishPartitions";
|
static final String METADATA_PUBLISH_PARTITIONS = "publishPartitions";
|
||||||
|
|
||||||
private final Map<PartitionIdType, SequenceOffsetType> endOffsets;
|
private final Map<PartitionIdType, SequenceOffsetType> endOffsets;
|
||||||
|
|
||||||
|
// lastReadOffsets are the last offsets that were read and processed.
|
||||||
|
private final Map<PartitionIdType, SequenceOffsetType> lastReadOffsets = new HashMap<>();
|
||||||
|
|
||||||
|
// currOffsets are what should become the start offsets of the next reader, if we stopped reading now. They are
|
||||||
|
// initialized to the start offsets when the task begins.
|
||||||
private final ConcurrentMap<PartitionIdType, SequenceOffsetType> currOffsets = new ConcurrentHashMap<>();
|
private final ConcurrentMap<PartitionIdType, SequenceOffsetType> currOffsets = new ConcurrentHashMap<>();
|
||||||
private final ConcurrentMap<PartitionIdType, SequenceOffsetType> lastPersistedOffsets = new ConcurrentHashMap<>();
|
private final ConcurrentMap<PartitionIdType, SequenceOffsetType> lastPersistedOffsets = new ConcurrentHashMap<>();
|
||||||
|
|
||||||
|
@ -192,8 +198,6 @@ public abstract class SeekableStreamIndexTaskRunner<PartitionIdType, SequenceOff
|
||||||
private final Set<String> publishingSequences = Sets.newConcurrentHashSet();
|
private final Set<String> publishingSequences = Sets.newConcurrentHashSet();
|
||||||
private final List<ListenableFuture<SegmentsAndMetadata>> publishWaitList = new ArrayList<>();
|
private final List<ListenableFuture<SegmentsAndMetadata>> publishWaitList = new ArrayList<>();
|
||||||
private final List<ListenableFuture<SegmentsAndMetadata>> handOffWaitList = new ArrayList<>();
|
private final List<ListenableFuture<SegmentsAndMetadata>> handOffWaitList = new ArrayList<>();
|
||||||
private final Set<PartitionIdType> initialOffsetsSnapshot = new HashSet<>();
|
|
||||||
private final Set<PartitionIdType> exclusiveStartingPartitions = new HashSet<>();
|
|
||||||
|
|
||||||
private volatile DateTime startTime;
|
private volatile DateTime startTime;
|
||||||
private volatile Status status = Status.NOT_STARTED; // this is only ever set by the task runner thread (runThread)
|
private volatile Status status = Status.NOT_STARTED; // this is only ever set by the task runner thread (runThread)
|
||||||
|
@ -272,7 +276,7 @@ public abstract class SeekableStreamIndexTaskRunner<PartitionIdType, SequenceOff
|
||||||
Map.Entry<Integer, Map<PartitionIdType, SequenceOffsetType>> previous = sequenceOffsets.next();
|
Map.Entry<Integer, Map<PartitionIdType, SequenceOffsetType>> previous = sequenceOffsets.next();
|
||||||
while (sequenceOffsets.hasNext()) {
|
while (sequenceOffsets.hasNext()) {
|
||||||
Map.Entry<Integer, Map<PartitionIdType, SequenceOffsetType>> current = sequenceOffsets.next();
|
Map.Entry<Integer, Map<PartitionIdType, SequenceOffsetType>> current = sequenceOffsets.next();
|
||||||
sequences.add(new SequenceMetadata<>(
|
addSequence(new SequenceMetadata<>(
|
||||||
previous.getKey(),
|
previous.getKey(),
|
||||||
StringUtils.format("%s_%s", ioConfig.getBaseSequenceName(), previous.getKey()),
|
StringUtils.format("%s_%s", ioConfig.getBaseSequenceName(), previous.getKey()),
|
||||||
previous.getValue(),
|
previous.getValue(),
|
||||||
|
@ -283,7 +287,7 @@ public abstract class SeekableStreamIndexTaskRunner<PartitionIdType, SequenceOff
|
||||||
previous = current;
|
previous = current;
|
||||||
exclusive = true;
|
exclusive = true;
|
||||||
}
|
}
|
||||||
sequences.add(new SequenceMetadata<>(
|
addSequence(new SequenceMetadata<>(
|
||||||
previous.getKey(),
|
previous.getKey(),
|
||||||
StringUtils.format("%s_%s", ioConfig.getBaseSequenceName(), previous.getKey()),
|
StringUtils.format("%s_%s", ioConfig.getBaseSequenceName(), previous.getKey()),
|
||||||
previous.getValue(),
|
previous.getValue(),
|
||||||
|
@ -292,7 +296,7 @@ public abstract class SeekableStreamIndexTaskRunner<PartitionIdType, SequenceOff
|
||||||
exclusive ? previous.getValue().keySet() : null
|
exclusive ? previous.getValue().keySet() : null
|
||||||
));
|
));
|
||||||
} else {
|
} else {
|
||||||
sequences.add(new SequenceMetadata<>(
|
addSequence(new SequenceMetadata<>(
|
||||||
0,
|
0,
|
||||||
StringUtils.format("%s_%s", ioConfig.getBaseSequenceName(), 0),
|
StringUtils.format("%s_%s", ioConfig.getBaseSequenceName(), 0),
|
||||||
ioConfig.getStartPartitions().getPartitionSequenceNumberMap(),
|
ioConfig.getStartPartitions().getPartitionSequenceNumberMap(),
|
||||||
|
@ -408,6 +412,21 @@ public abstract class SeekableStreamIndexTaskRunner<PartitionIdType, SequenceOff
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Initialize lastReadOffsets immediately after restoring currOffsets. This is only done when end offsets are
|
||||||
|
// inclusive, because the point of initializing lastReadOffsets here is so we know when to skip the start record.
|
||||||
|
// When end offsets are exclusive, we never skip the start record.
|
||||||
|
if (!isEndOffsetExclusive()) {
|
||||||
|
for (Map.Entry<PartitionIdType, SequenceOffsetType> entry : currOffsets.entrySet()) {
|
||||||
|
final boolean isAtStart = entry.getValue().equals(
|
||||||
|
ioConfig.getStartPartitions().getPartitionSequenceNumberMap().get(entry.getKey())
|
||||||
|
);
|
||||||
|
|
||||||
|
if (!isAtStart || ioConfig.getExclusiveStartSequenceNumberPartitions().contains(entry.getKey())) {
|
||||||
|
lastReadOffsets.put(entry.getKey(), entry.getValue());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Set up committer.
|
// Set up committer.
|
||||||
final Supplier<Committer> committerSupplier = () -> {
|
final Supplier<Committer> committerSupplier = () -> {
|
||||||
final Map<PartitionIdType, SequenceOffsetType> snapshot = ImmutableMap.copyOf(currOffsets);
|
final Map<PartitionIdType, SequenceOffsetType> snapshot = ImmutableMap.copyOf(currOffsets);
|
||||||
|
@ -450,17 +469,14 @@ public abstract class SeekableStreamIndexTaskRunner<PartitionIdType, SequenceOff
|
||||||
status = Status.READING;
|
status = Status.READING;
|
||||||
Throwable caughtExceptionInner = null;
|
Throwable caughtExceptionInner = null;
|
||||||
|
|
||||||
initialOffsetsSnapshot.addAll(currOffsets.keySet());
|
|
||||||
exclusiveStartingPartitions.addAll(ioConfig.getExclusiveStartSequenceNumberPartitions());
|
|
||||||
|
|
||||||
try {
|
try {
|
||||||
while (stillReading) {
|
while (stillReading) {
|
||||||
if (possiblyPause()) {
|
if (possiblyPause()) {
|
||||||
// The partition assignments may have changed while paused by a call to setEndOffsets() so reassign
|
// The partition assignments may have changed while paused by a call to setEndOffsets() so reassign
|
||||||
// partitions upon resuming. This is safe even if the end sequences have not been modified.
|
// partitions upon resuming. Don't call "seekToStartingSequence" after "assignPartitions", because there's
|
||||||
|
// no need to re-seek here. All we're going to be doing is dropping partitions.
|
||||||
assignment = assignPartitions(recordSupplier);
|
assignment = assignPartitions(recordSupplier);
|
||||||
possiblyResetDataSourceMetadata(toolbox, recordSupplier, assignment, currOffsets);
|
possiblyResetDataSourceMetadata(toolbox, recordSupplier, assignment, currOffsets);
|
||||||
seekToStartingSequence(recordSupplier, assignment);
|
|
||||||
|
|
||||||
if (assignment.isEmpty()) {
|
if (assignment.isEmpty()) {
|
||||||
log.info("All partitions have been fully read");
|
log.info("All partitions have been fully read");
|
||||||
|
@ -498,34 +514,17 @@ public abstract class SeekableStreamIndexTaskRunner<PartitionIdType, SequenceOff
|
||||||
|
|
||||||
SequenceMetadata sequenceToCheckpoint = null;
|
SequenceMetadata sequenceToCheckpoint = null;
|
||||||
for (OrderedPartitionableRecord<PartitionIdType, SequenceOffsetType> record : records) {
|
for (OrderedPartitionableRecord<PartitionIdType, SequenceOffsetType> record : records) {
|
||||||
|
final boolean shouldProcess = verifyRecordInRange(record.getPartitionId(), record.getSequenceNumber());
|
||||||
|
|
||||||
// for Kafka, the end offsets are exclusive, so skip it
|
|
||||||
if (isEndSequenceOffsetsExclusive() &&
|
|
||||||
createSequenceNumber(record.getSequenceNumber()).compareTo(
|
|
||||||
createSequenceNumber(endOffsets.get(record.getPartitionId()))) >= 0) {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
// for the first message we receive, check that we were given a message with a sequenceNumber that matches
|
|
||||||
// our expected starting sequenceNumber
|
|
||||||
if (!verifyInitialRecordAndSkipExclusivePartition(record)) {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
log.trace(
|
log.trace(
|
||||||
"Got stream[%s] partition[%s] sequence[%s].",
|
"Got stream[%s] partition[%s] sequenceNumber[%s], shouldProcess[%s].",
|
||||||
record.getStream(),
|
record.getStream(),
|
||||||
record.getPartitionId(),
|
record.getPartitionId(),
|
||||||
record.getSequenceNumber()
|
record.getSequenceNumber(),
|
||||||
|
shouldProcess
|
||||||
);
|
);
|
||||||
|
|
||||||
if (isEndOfShard(record.getSequenceNumber())) {
|
if (shouldProcess) {
|
||||||
// shard is closed, applies to Kinesis only
|
|
||||||
currOffsets.put(record.getPartitionId(), record.getSequenceNumber());
|
|
||||||
} else if (createSequenceNumber(record.getSequenceNumber()).compareTo(
|
|
||||||
createSequenceNumber(endOffsets.get(record.getPartitionId()))) <= 0) {
|
|
||||||
|
|
||||||
try {
|
try {
|
||||||
final List<byte[]> valueBytess = record.getData();
|
final List<byte[]> valueBytess = record.getData();
|
||||||
final List<InputRow> rows;
|
final List<InputRow> rows;
|
||||||
|
@ -547,7 +546,7 @@ public abstract class SeekableStreamIndexTaskRunner<PartitionIdType, SequenceOff
|
||||||
|
|
||||||
if (sequenceToUse == null) {
|
if (sequenceToUse == null) {
|
||||||
throw new ISE(
|
throw new ISE(
|
||||||
"WTH?! cannot find any valid sequence for record with partition [%d] and sequence [%d]. Current sequences: %s",
|
"WTH?! cannot find any valid sequence for record with partition [%s] and sequenceNumber [%s]. Current sequences: %s",
|
||||||
record.getPartitionId(),
|
record.getPartitionId(),
|
||||||
record.getSequenceNumber(),
|
record.getSequenceNumber(),
|
||||||
sequences
|
sequences
|
||||||
|
@ -617,12 +616,18 @@ public abstract class SeekableStreamIndexTaskRunner<PartitionIdType, SequenceOff
|
||||||
// in kafka, we can easily get the next offset by adding 1, but for kinesis, there's no way
|
// in kafka, we can easily get the next offset by adding 1, but for kinesis, there's no way
|
||||||
// to get the next sequence number without having to make an expensive api call. So the behavior
|
// to get the next sequence number without having to make an expensive api call. So the behavior
|
||||||
// here for kafka is to +1 while for kinesis we simply save the current sequence number
|
// here for kafka is to +1 while for kinesis we simply save the current sequence number
|
||||||
currOffsets.put(record.getPartitionId(), getSequenceNumberToStoreAfterRead(record.getSequenceNumber()));
|
lastReadOffsets.put(record.getPartitionId(), record.getSequenceNumber());
|
||||||
|
currOffsets.put(record.getPartitionId(), getNextStartOffset(record.getSequenceNumber()));
|
||||||
}
|
}
|
||||||
|
|
||||||
if ((currOffsets.get(record.getPartitionId()).equals(endOffsets.get(record.getPartitionId()))
|
// Use record.getSequenceNumber() in the moreToRead check, since currOffsets might not have been
|
||||||
|| isEndOfShard(currOffsets.get(record.getPartitionId())))
|
// updated if we were skipping records for being beyond the end.
|
||||||
&& assignment.remove(record.getStreamPartition())) {
|
final boolean moreToReadAfterThisRecord = isMoreToReadAfterReadingRecord(
|
||||||
|
record.getSequenceNumber(),
|
||||||
|
endOffsets.get(record.getPartitionId())
|
||||||
|
);
|
||||||
|
|
||||||
|
if (!moreToReadAfterThisRecord && assignment.remove(record.getStreamPartition())) {
|
||||||
log.info("Finished reading stream[%s], partition[%s].", record.getStream(), record.getPartitionId());
|
log.info("Finished reading stream[%s], partition[%s].", record.getStream(), record.getPartitionId());
|
||||||
recordSupplier.assign(assignment);
|
recordSupplier.assign(assignment);
|
||||||
stillReading = !assignment.isEmpty();
|
stillReading = !assignment.isEmpty();
|
||||||
|
@ -688,11 +693,18 @@ public abstract class SeekableStreamIndexTaskRunner<PartitionIdType, SequenceOff
|
||||||
status = Status.PUBLISHING;
|
status = Status.PUBLISHING;
|
||||||
}
|
}
|
||||||
|
|
||||||
for (SequenceMetadata<PartitionIdType, SequenceOffsetType> sequenceMetadata : sequences) {
|
for (int i = 0; i < sequences.size(); i++) {
|
||||||
|
final SequenceMetadata<PartitionIdType, SequenceOffsetType> sequenceMetadata = sequences.get(i);
|
||||||
if (!publishingSequences.contains(sequenceMetadata.getSequenceName())) {
|
if (!publishingSequences.contains(sequenceMetadata.getSequenceName())) {
|
||||||
// this is done to prevent checks in sequence specific commit supplier from failing
|
final boolean isLast = i == (sequences.size() - 1);
|
||||||
sequenceMetadata.setEndOffsets(currOffsets);
|
if (isLast) {
|
||||||
sequenceMetadata.updateAssignments(this, currOffsets);
|
// Shorten endOffsets of the last sequence to match currOffsets.
|
||||||
|
sequenceMetadata.setEndOffsets(currOffsets);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update assignments of the sequence, which should clear them. (This will be checked later, when the
|
||||||
|
// Committer is built.)
|
||||||
|
sequenceMetadata.updateAssignments(currOffsets, this::isMoreToReadAfterReadingRecord);
|
||||||
publishingSequences.add(sequenceMetadata.getSequenceName());
|
publishingSequences.add(sequenceMetadata.getSequenceName());
|
||||||
// persist already done in finally, so directly add to publishQueue
|
// persist already done in finally, so directly add to publishQueue
|
||||||
publishAndRegisterHandoff(sequenceMetadata);
|
publishAndRegisterHandoff(sequenceMetadata);
|
||||||
|
@ -795,7 +807,7 @@ public abstract class SeekableStreamIndexTaskRunner<PartitionIdType, SequenceOff
|
||||||
toolbox.getDruidNodeAnnouncer().unannounce(discoveryDruidNode);
|
toolbox.getDruidNodeAnnouncer().unannounce(discoveryDruidNode);
|
||||||
toolbox.getDataSegmentServerAnnouncer().unannounce();
|
toolbox.getDataSegmentServerAnnouncer().unannounce();
|
||||||
}
|
}
|
||||||
catch (Exception e) {
|
catch (Throwable e) {
|
||||||
if (caughtExceptionOuter != null) {
|
if (caughtExceptionOuter != null) {
|
||||||
caughtExceptionOuter.addSuppressed(e);
|
caughtExceptionOuter.addSuppressed(e);
|
||||||
} else {
|
} else {
|
||||||
|
@ -912,7 +924,7 @@ public abstract class SeekableStreamIndexTaskRunner<PartitionIdType, SequenceOff
|
||||||
@Override
|
@Override
|
||||||
public void onFailure(Throwable t)
|
public void onFailure(Throwable t)
|
||||||
{
|
{
|
||||||
log.error(t, "Error while publishing segments for sequence[%s]", sequenceMetadata);
|
log.error(t, "Error while publishing segments for sequenceNumber[%s]", sequenceMetadata);
|
||||||
handoffFuture.setException(t);
|
handoffFuture.setException(t);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -990,7 +1002,7 @@ public abstract class SeekableStreamIndexTaskRunner<PartitionIdType, SequenceOff
|
||||||
throws InterruptedException
|
throws InterruptedException
|
||||||
{
|
{
|
||||||
for (SequenceMetadata<PartitionIdType, SequenceOffsetType> sequenceMetadata : sequences) {
|
for (SequenceMetadata<PartitionIdType, SequenceOffsetType> sequenceMetadata : sequences) {
|
||||||
sequenceMetadata.updateAssignments(this, currOffsets);
|
sequenceMetadata.updateAssignments(currOffsets, this::isMoreToReadBeforeReadingRecord);
|
||||||
if (!sequenceMetadata.isOpen() && !publishingSequences.contains(sequenceMetadata.getSequenceName())) {
|
if (!sequenceMetadata.isOpen() && !publishingSequences.contains(sequenceMetadata.getSequenceName())) {
|
||||||
publishingSequences.add(sequenceMetadata.getSequenceName());
|
publishingSequences.add(sequenceMetadata.getSequenceName());
|
||||||
try {
|
try {
|
||||||
|
@ -1016,19 +1028,21 @@ public abstract class SeekableStreamIndexTaskRunner<PartitionIdType, SequenceOff
|
||||||
{
|
{
|
||||||
final Set<StreamPartition<PartitionIdType>> assignment = new HashSet<>();
|
final Set<StreamPartition<PartitionIdType>> assignment = new HashSet<>();
|
||||||
for (Map.Entry<PartitionIdType, SequenceOffsetType> entry : currOffsets.entrySet()) {
|
for (Map.Entry<PartitionIdType, SequenceOffsetType> entry : currOffsets.entrySet()) {
|
||||||
final SequenceOffsetType endOffset = endOffsets.get(entry.getKey());
|
final PartitionIdType partition = entry.getKey();
|
||||||
if (isEndOfShard(endOffset)
|
final SequenceOffsetType currOffset = entry.getValue();
|
||||||
|| SeekableStreamPartitions.NO_END_SEQUENCE_NUMBER.equals(endOffset)
|
final SequenceOffsetType endOffset = endOffsets.get(partition);
|
||||||
|| createSequenceNumber(entry.getValue()).compareTo(createSequenceNumber(endOffset)) < 0) {
|
|
||||||
assignment.add(StreamPartition.of(stream, entry.getKey()));
|
if (!isRecordAlreadyRead(partition, endOffset) && isMoreToReadBeforeReadingRecord(currOffset, endOffset)) {
|
||||||
} else if (entry.getValue().equals(endOffset)) {
|
log.info(
|
||||||
log.info("Finished reading partition[%s].", entry.getKey());
|
"Adding partition[%s], start[%s] -> end[%s] to assignment.",
|
||||||
} else {
|
partition,
|
||||||
throw new ISE(
|
currOffset,
|
||||||
"WTF?! Cannot start from sequence[%,d] > endOffset[%,d]",
|
|
||||||
entry.getValue(),
|
|
||||||
endOffset
|
endOffset
|
||||||
);
|
);
|
||||||
|
|
||||||
|
assignment.add(StreamPartition.of(stream, partition));
|
||||||
|
} else {
|
||||||
|
log.info("Finished reading partition[%s].", partition);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1037,6 +1051,77 @@ public abstract class SeekableStreamIndexTaskRunner<PartitionIdType, SequenceOff
|
||||||
return assignment;
|
return assignment;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private void addSequence(final SequenceMetadata<PartitionIdType, SequenceOffsetType> sequenceMetadata)
|
||||||
|
{
|
||||||
|
// Sanity check that the start of the new sequence matches up with the end of the prior sequence.
|
||||||
|
for (Map.Entry<PartitionIdType, SequenceOffsetType> entry : sequenceMetadata.getStartOffsets().entrySet()) {
|
||||||
|
final PartitionIdType partition = entry.getKey();
|
||||||
|
final SequenceOffsetType startOffset = entry.getValue();
|
||||||
|
|
||||||
|
if (!sequences.isEmpty()) {
|
||||||
|
final SequenceOffsetType priorOffset = sequences.get(sequences.size() - 1).endOffsets.get(partition);
|
||||||
|
|
||||||
|
if (!startOffset.equals(priorOffset)) {
|
||||||
|
throw new ISE(
|
||||||
|
"New sequence startOffset[%s] does not equal expected prior offset[%s]",
|
||||||
|
startOffset,
|
||||||
|
priorOffset
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Actually do the add.
|
||||||
|
sequences.add(sequenceMetadata);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Returns true if the given record has already been read, based on lastReadOffsets.
|
||||||
|
*/
|
||||||
|
private boolean isRecordAlreadyRead(
|
||||||
|
final PartitionIdType recordPartition,
|
||||||
|
final SequenceOffsetType recordSequenceNumber
|
||||||
|
)
|
||||||
|
{
|
||||||
|
final SequenceOffsetType lastReadOffset = lastReadOffsets.get(recordPartition);
|
||||||
|
|
||||||
|
if (lastReadOffset == null) {
|
||||||
|
return false;
|
||||||
|
} else {
|
||||||
|
return createSequenceNumber(recordSequenceNumber).compareTo(createSequenceNumber(lastReadOffset)) <= 0;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Returns true if, given that we want to start reading from recordSequenceNumber and end at endSequenceNumber, there
|
||||||
|
* is more left to read. Used in pre-read checks to determine if there is anything left to read.
|
||||||
|
*/
|
||||||
|
private boolean isMoreToReadBeforeReadingRecord(
|
||||||
|
final SequenceOffsetType recordSequenceNumber,
|
||||||
|
final SequenceOffsetType endSequenceNumber
|
||||||
|
)
|
||||||
|
{
|
||||||
|
final int compareToEnd = createSequenceNumber(recordSequenceNumber)
|
||||||
|
.compareTo(createSequenceNumber(endSequenceNumber));
|
||||||
|
|
||||||
|
return isEndOffsetExclusive() ? compareToEnd < 0 : compareToEnd <= 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Returns true if, given that recordSequenceNumber has already been read and we want to end at endSequenceNumber,
|
||||||
|
* there is more left to read. Used in post-read checks to determine if there is anything left to read.
|
||||||
|
*/
|
||||||
|
private boolean isMoreToReadAfterReadingRecord(
|
||||||
|
final SequenceOffsetType recordSequenceNumber,
|
||||||
|
final SequenceOffsetType endSequenceNumber
|
||||||
|
)
|
||||||
|
{
|
||||||
|
final int compareNextToEnd = createSequenceNumber(getNextStartOffset(recordSequenceNumber))
|
||||||
|
.compareTo(createSequenceNumber(endSequenceNumber));
|
||||||
|
|
||||||
|
// Unlike isMoreToReadBeforeReadingRecord, we don't care if the end is exclusive or not. If we read it, we're done.
|
||||||
|
return compareNextToEnd < 0;
|
||||||
|
}
|
||||||
|
|
||||||
private void seekToStartingSequence(
|
private void seekToStartingSequence(
|
||||||
RecordSupplier<PartitionIdType, SequenceOffsetType> recordSupplier,
|
RecordSupplier<PartitionIdType, SequenceOffsetType> recordSupplier,
|
||||||
|
@ -1045,7 +1130,7 @@ public abstract class SeekableStreamIndexTaskRunner<PartitionIdType, SequenceOff
|
||||||
{
|
{
|
||||||
for (final StreamPartition<PartitionIdType> partition : partitions) {
|
for (final StreamPartition<PartitionIdType> partition : partitions) {
|
||||||
final SequenceOffsetType sequence = currOffsets.get(partition.getPartitionId());
|
final SequenceOffsetType sequence = currOffsets.get(partition.getPartitionId());
|
||||||
log.info("Seeking partition[%s] to sequence[%s].", partition.getPartitionId(), sequence);
|
log.info("Seeking partition[%s] to sequenceNumber[%s].", partition.getPartitionId(), sequence);
|
||||||
recordSupplier.seek(partition, sequence);
|
recordSupplier.seek(partition, sequence);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1104,7 +1189,7 @@ public abstract class SeekableStreamIndexTaskRunner<PartitionIdType, SequenceOff
|
||||||
if (tuningConfig.isLogParseExceptions()) {
|
if (tuningConfig.isLogParseExceptions()) {
|
||||||
log.error(
|
log.error(
|
||||||
pe,
|
pe,
|
||||||
"Encountered parse exception on row from partition[%s] sequence[%s]",
|
"Encountered parse exception on row from partition[%s] sequenceNumber[%s]",
|
||||||
record.getPartitionId(),
|
record.getPartitionId(),
|
||||||
record.getSequenceNumber()
|
record.getSequenceNumber()
|
||||||
);
|
);
|
||||||
|
@ -1366,17 +1451,20 @@ public abstract class SeekableStreamIndexTaskRunner<PartitionIdType, SequenceOff
|
||||||
Preconditions.checkState(sequenceNumbers.size() > 0, "WTH?! No Sequences found to set end sequences");
|
Preconditions.checkState(sequenceNumbers.size() > 0, "WTH?! No Sequences found to set end sequences");
|
||||||
|
|
||||||
final SequenceMetadata<PartitionIdType, SequenceOffsetType> latestSequence = sequences.get(sequences.size() - 1);
|
final SequenceMetadata<PartitionIdType, SequenceOffsetType> latestSequence = sequences.get(sequences.size() - 1);
|
||||||
// if a partition has not been read yet (contained in initialOffsetsSnapshot), then
|
final Set<PartitionIdType> exclusiveStartPartitions;
|
||||||
// do not mark the starting sequence number as exclusive
|
|
||||||
Set<PartitionIdType> exclusivePartitions = sequenceNumbers.keySet()
|
if (isEndOffsetExclusive()) {
|
||||||
.stream()
|
// When end offsets are exclusive, there's no need for marking the next sequence as having any
|
||||||
.filter(x -> !initialOffsetsSnapshot.contains(x)
|
// exclusive-start partitions. It should always start from the end offsets of the prior sequence.
|
||||||
|| ioConfig.getExclusiveStartSequenceNumberPartitions()
|
exclusiveStartPartitions = Collections.emptySet();
|
||||||
.contains(x))
|
} else {
|
||||||
.collect(Collectors.toSet());
|
// When end offsets are inclusive, we must mark all partitions as exclusive-start, to avoid reading
|
||||||
|
// their final messages (which have already been read).
|
||||||
|
exclusiveStartPartitions = sequenceNumbers.keySet();
|
||||||
|
}
|
||||||
|
|
||||||
if ((latestSequence.getStartOffsets().equals(sequenceNumbers)
|
if ((latestSequence.getStartOffsets().equals(sequenceNumbers)
|
||||||
&& latestSequence.getExclusiveStartPartitions().equals(exclusivePartitions)
|
&& latestSequence.getExclusiveStartPartitions().equals(exclusiveStartPartitions)
|
||||||
&& !finish)
|
&& !finish)
|
||||||
|| (latestSequence.getEndOffsets().equals(sequenceNumbers) && finish)) {
|
|| (latestSequence.getEndOffsets().equals(sequenceNumbers) && finish)) {
|
||||||
log.warn("Ignoring duplicate request, end sequences already set for sequences [%s]", sequenceNumbers);
|
log.warn("Ignoring duplicate request, end sequences already set for sequences [%s]", sequenceNumbers);
|
||||||
|
@ -1416,19 +1504,17 @@ public abstract class SeekableStreamIndexTaskRunner<PartitionIdType, SequenceOff
|
||||||
log.info("Updating endOffsets from [%s] to [%s]", endOffsets, sequenceNumbers);
|
log.info("Updating endOffsets from [%s] to [%s]", endOffsets, sequenceNumbers);
|
||||||
endOffsets.putAll(sequenceNumbers);
|
endOffsets.putAll(sequenceNumbers);
|
||||||
} else {
|
} else {
|
||||||
exclusiveStartingPartitions.addAll(exclusivePartitions);
|
|
||||||
|
|
||||||
// create new sequence
|
// create new sequence
|
||||||
|
log.info("Creating new sequence with startOffsets [%s] and endOffsets [%s]", sequenceNumbers, endOffsets);
|
||||||
final SequenceMetadata<PartitionIdType, SequenceOffsetType> newSequence = new SequenceMetadata<>(
|
final SequenceMetadata<PartitionIdType, SequenceOffsetType> newSequence = new SequenceMetadata<>(
|
||||||
latestSequence.getSequenceId() + 1,
|
latestSequence.getSequenceId() + 1,
|
||||||
StringUtils.format("%s_%d", ioConfig.getBaseSequenceName(), latestSequence.getSequenceId() + 1),
|
StringUtils.format("%s_%d", ioConfig.getBaseSequenceName(), latestSequence.getSequenceId() + 1),
|
||||||
sequenceNumbers,
|
sequenceNumbers,
|
||||||
endOffsets,
|
endOffsets,
|
||||||
false,
|
false,
|
||||||
exclusivePartitions
|
exclusiveStartPartitions
|
||||||
);
|
);
|
||||||
sequences.add(newSequence);
|
addSequence(newSequence);
|
||||||
initialOffsetsSnapshot.addAll(sequenceNumbers.keySet());
|
|
||||||
}
|
}
|
||||||
persistSequences();
|
persistSequences();
|
||||||
}
|
}
|
||||||
|
@ -1582,45 +1668,47 @@ public abstract class SeekableStreamIndexTaskRunner<PartitionIdType, SequenceOff
|
||||||
return startTime;
|
return startTime;
|
||||||
}
|
}
|
||||||
|
|
||||||
private boolean verifyInitialRecordAndSkipExclusivePartition(
|
/**
|
||||||
final OrderedPartitionableRecord<PartitionIdType, SequenceOffsetType> record
|
* This method does two things:
|
||||||
|
*
|
||||||
|
* 1) Verifies that the sequence numbers we read are at least as high as those read previously, and throws an
|
||||||
|
* exception if not.
|
||||||
|
* 2) Returns false if we should skip this record because it's either (a) the first record in a partition that we are
|
||||||
|
* needing to be exclusive on; (b) too late to read, past the endOffsets.
|
||||||
|
*/
|
||||||
|
private boolean verifyRecordInRange(
|
||||||
|
final PartitionIdType partition,
|
||||||
|
final SequenceOffsetType recordOffset
|
||||||
)
|
)
|
||||||
{
|
{
|
||||||
// Check only for the first record among the record batch.
|
// Verify that the record is at least as high as its currOffset.
|
||||||
if (initialOffsetsSnapshot.contains(record.getPartitionId())) {
|
final SequenceOffsetType currOffset = Preconditions.checkNotNull(
|
||||||
final SequenceOffsetType currOffset = Preconditions.checkNotNull(
|
currOffsets.get(partition),
|
||||||
currOffsets.get(record.getPartitionId()),
|
"Current offset is null for sequenceNumber[%s] and partition[%s]",
|
||||||
"Current offset is null for sequenceNumber[%s] and partitionId[%s]",
|
recordOffset,
|
||||||
record.getSequenceNumber(),
|
partition
|
||||||
record.getPartitionId()
|
);
|
||||||
);
|
|
||||||
final OrderedSequenceNumber<SequenceOffsetType> recordSequenceNumber = createSequenceNumber(
|
|
||||||
record.getSequenceNumber()
|
|
||||||
);
|
|
||||||
final OrderedSequenceNumber<SequenceOffsetType> currentSequenceNumber = createSequenceNumber(
|
|
||||||
currOffset
|
|
||||||
);
|
|
||||||
if (recordSequenceNumber.compareTo(currentSequenceNumber) < 0) {
|
|
||||||
throw new ISE(
|
|
||||||
"sequenceNumber of the start record[%s] is smaller than current sequenceNumber[%s] for partition[%s]",
|
|
||||||
record.getSequenceNumber(),
|
|
||||||
currOffset,
|
|
||||||
record.getPartitionId()
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Remove the mark to notify that this partition has been read.
|
final OrderedSequenceNumber<SequenceOffsetType> recordSequenceNumber = createSequenceNumber(recordOffset);
|
||||||
initialOffsetsSnapshot.remove(record.getPartitionId());
|
final OrderedSequenceNumber<SequenceOffsetType> currentSequenceNumber = createSequenceNumber(currOffset);
|
||||||
|
|
||||||
// check exclusive starting sequence
|
final int comparisonToCurrent = recordSequenceNumber.compareTo(currentSequenceNumber);
|
||||||
if (isStartingSequenceOffsetsExclusive() && exclusiveStartingPartitions.contains(record.getPartitionId())) {
|
if (comparisonToCurrent < 0) {
|
||||||
log.info("Skipping starting sequenceNumber for partition[%s] marked exclusive", record.getPartitionId());
|
throw new ISE(
|
||||||
|
"Record sequenceNumber[%s] is smaller than current sequenceNumber[%s] for partition[%s]",
|
||||||
return false;
|
recordOffset,
|
||||||
}
|
currOffset,
|
||||||
|
partition
|
||||||
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
return true;
|
// Check if the record has already been read.
|
||||||
|
if (isRecordAlreadyRead(partition, recordOffset)) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Finally, check if this record comes before the endOffsets for this partition.
|
||||||
|
return isMoreToReadBeforeReadingRecord(recordSequenceNumber.get(), endOffsets.get(partition));
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -1645,16 +1733,14 @@ public abstract class SeekableStreamIndexTaskRunner<PartitionIdType, SequenceOff
|
||||||
) throws IOException;
|
) throws IOException;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Calculates the sequence number used to update `currentOffsets` after finished reading a record.
|
* Calculates the sequence number used to update currOffsets after finished reading a record.
|
||||||
* In Kafka this returns sequenceNumeber + 1 since that's the next expected offset
|
* This is what would become the start offsets of the next reader, if we stopped reading now.
|
||||||
* In Kinesis this simply returns sequenceNumber, since the sequence numbers in Kinesis are not
|
|
||||||
* contiguous and finding the next sequence number requires an expensive API call
|
|
||||||
*
|
*
|
||||||
* @param sequenceNumber the sequence number that has already been processed
|
* @param sequenceNumber the sequence number that has already been processed
|
||||||
*
|
*
|
||||||
* @return next sequence number to be stored
|
* @return next sequence number to be stored
|
||||||
*/
|
*/
|
||||||
protected abstract SequenceOffsetType getSequenceNumberToStoreAfterRead(SequenceOffsetType sequenceNumber);
|
protected abstract SequenceOffsetType getNextStartOffset(SequenceOffsetType sequenceNumber);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* deserializes stored metadata into SeekableStreamPartitions
|
* deserializes stored metadata into SeekableStreamPartitions
|
||||||
|
@ -1726,14 +1812,7 @@ public abstract class SeekableStreamIndexTaskRunner<PartitionIdType, SequenceOff
|
||||||
* In Kafka, the endOffsets are exclusive, so skip it.
|
* In Kafka, the endOffsets are exclusive, so skip it.
|
||||||
* In Kinesis the endOffsets are inclusive
|
* In Kinesis the endOffsets are inclusive
|
||||||
*/
|
*/
|
||||||
protected abstract boolean isEndSequenceOffsetsExclusive();
|
protected abstract boolean isEndOffsetExclusive();
|
||||||
|
|
||||||
/**
|
|
||||||
* In Kafka, the startingOffsets are inclusive.
|
|
||||||
* In Kinesis, the startingOffsets are exclusive, except for the first
|
|
||||||
* partition we read from stream
|
|
||||||
*/
|
|
||||||
protected abstract boolean isStartingSequenceOffsetsExclusive();
|
|
||||||
|
|
||||||
protected abstract TypeReference<List<SequenceMetadata<PartitionIdType, SequenceOffsetType>>> getSequenceMetadataTypeReference();
|
protected abstract TypeReference<List<SequenceMetadata<PartitionIdType, SequenceOffsetType>>> getSequenceMetadataTypeReference();
|
||||||
}
|
}
|
||||||
|
|
|
@ -143,9 +143,9 @@ public class SeekableStreamPartitions<PartitionIdType, SequenceOffsetType>
|
||||||
@Override
|
@Override
|
||||||
public String toString()
|
public String toString()
|
||||||
{
|
{
|
||||||
return "SeekableStreamPartitions{" +
|
return getClass().getSimpleName() + "{" +
|
||||||
"stream/topic='" + stream + '\'' +
|
"stream='" + stream + '\'' +
|
||||||
", partitionSequenceNumberMap/partitionOffsetMap=" + partitionIdToSequenceNumberMap +
|
", partitionSequenceNumberMap=" + partitionIdToSequenceNumberMap +
|
||||||
'}';
|
'}';
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -38,6 +38,7 @@ import java.util.HashSet;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
import java.util.Set;
|
import java.util.Set;
|
||||||
import java.util.concurrent.locks.ReentrantLock;
|
import java.util.concurrent.locks.ReentrantLock;
|
||||||
|
import java.util.function.BiFunction;
|
||||||
|
|
||||||
public class SequenceMetadata<PartitionIdType, SequenceOffsetType>
|
public class SequenceMetadata<PartitionIdType, SequenceOffsetType>
|
||||||
{
|
{
|
||||||
|
@ -148,17 +149,16 @@ public class SequenceMetadata<PartitionIdType, SequenceOffsetType>
|
||||||
}
|
}
|
||||||
|
|
||||||
void updateAssignments(
|
void updateAssignments(
|
||||||
SeekableStreamIndexTaskRunner<PartitionIdType, SequenceOffsetType> runner,
|
Map<PartitionIdType, SequenceOffsetType> currOffsets,
|
||||||
Map<PartitionIdType, SequenceOffsetType> nextPartitionOffset
|
BiFunction<SequenceOffsetType, SequenceOffsetType, Boolean> moreToReadFn
|
||||||
)
|
)
|
||||||
{
|
{
|
||||||
lock.lock();
|
lock.lock();
|
||||||
try {
|
try {
|
||||||
assignments.clear();
|
assignments.clear();
|
||||||
nextPartitionOffset.forEach((key, value) -> {
|
currOffsets.forEach((key, value) -> {
|
||||||
SequenceOffsetType endOffset = endOffsets.get(key);
|
SequenceOffsetType endOffset = endOffsets.get(key);
|
||||||
if (SeekableStreamPartitions.NO_END_SEQUENCE_NUMBER.equals(endOffset)
|
if (moreToReadFn.apply(value, endOffset)) {
|
||||||
|| runner.createSequenceNumber(endOffset).compareTo(runner.createSequenceNumber(nextPartitionOffset.get(key))) > 0) {
|
|
||||||
assignments.add(key);
|
assignments.add(key);
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
@ -188,14 +188,15 @@ public class SequenceMetadata<PartitionIdType, SequenceOffsetType>
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
boolean ret;
|
boolean ret;
|
||||||
if (runner.isStartingSequenceOffsetsExclusive()) {
|
if (!runner.isEndOffsetExclusive()) {
|
||||||
|
// Inclusive endOffsets mean that we must skip the first record of any partition that has been read before.
|
||||||
ret = recordOffset.compareTo(partitionStartOffset)
|
ret = recordOffset.compareTo(partitionStartOffset)
|
||||||
>= (getExclusiveStartPartitions().contains(record.getPartitionId()) ? 1 : 0);
|
>= (getExclusiveStartPartitions().contains(record.getPartitionId()) ? 1 : 0);
|
||||||
} else {
|
} else {
|
||||||
ret = recordOffset.compareTo(partitionStartOffset) >= 0;
|
ret = recordOffset.compareTo(partitionStartOffset) >= 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (runner.isEndSequenceOffsetsExclusive()) {
|
if (runner.isEndOffsetExclusive()) {
|
||||||
ret &= recordOffset.compareTo(partitionEndOffset) < 0;
|
ret &= recordOffset.compareTo(partitionEndOffset) < 0;
|
||||||
} else {
|
} else {
|
||||||
ret &= recordOffset.compareTo(partitionEndOffset) <= 0;
|
ret &= recordOffset.compareTo(partitionEndOffset) <= 0;
|
||||||
|
|
|
@ -19,6 +19,7 @@
|
||||||
|
|
||||||
package org.apache.druid.indexing.seekablestream.common;
|
package org.apache.druid.indexing.seekablestream.common;
|
||||||
|
|
||||||
|
import java.util.Objects;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Represents a Kafka/Kinesis stream sequence number. Mainly used to do
|
* Represents a Kafka/Kinesis stream sequence number. Mainly used to do
|
||||||
|
@ -51,4 +52,33 @@ public abstract class OrderedSequenceNumber<SequenceOffsetType>
|
||||||
{
|
{
|
||||||
return isExclusive;
|
return isExclusive;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean equals(Object o)
|
||||||
|
{
|
||||||
|
if (this == o) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
if (o == null || getClass() != o.getClass()) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
OrderedSequenceNumber<?> that = (OrderedSequenceNumber<?>) o;
|
||||||
|
return isExclusive == that.isExclusive &&
|
||||||
|
Objects.equals(sequenceNumber, that.sequenceNumber);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public int hashCode()
|
||||||
|
{
|
||||||
|
return Objects.hash(sequenceNumber, isExclusive);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String toString()
|
||||||
|
{
|
||||||
|
return getClass().getSimpleName() + "{" +
|
||||||
|
"sequenceNumber=" + sequenceNumber +
|
||||||
|
", isExclusive=" + isExclusive +
|
||||||
|
'}';
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue