Improve OvershadowableManager performance (#9441)

* Use the iterator instead of higherKey(); use the iterator API instead of stream

* Fix tests; fix a concurrency bug in timeline

* fix test

* add tests for findNonOvershadowedObjectsInInterval

* fix test

* add missing tests; fix a bug in QueueEntry

* equals tests

* fix test
This commit is contained in:
Jihoon Son 2020-03-10 13:22:19 -07:00 committed by GitHub
parent 7e0e767cc2
commit 7401bb3f93
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
19 changed files with 561 additions and 258 deletions

View File

@ -67,7 +67,7 @@ public class VersionedIntervalTimelineBenchmark
@Param({"10", "100", "1000"}) @Param({"10", "100", "1000"})
private int numInitialRootGenSegmentsPerInterval; private int numInitialRootGenSegmentsPerInterval;
@Param({"1", "5"}) @Param({"1", "2"})
private int numNonRootGenerations; private int numNonRootGenerations;
@Param({"false", "true"}) @Param({"false", "true"})

View File

@ -22,12 +22,12 @@ package org.apache.druid.timeline;
import com.google.common.annotations.VisibleForTesting; import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Function; import com.google.common.base.Function;
import com.google.common.base.Preconditions; import com.google.common.base.Preconditions;
import com.google.common.collect.FluentIterable;
import com.google.common.collect.Iterators; import com.google.common.collect.Iterators;
import com.google.errorprone.annotations.concurrent.GuardedBy; import com.google.errorprone.annotations.concurrent.GuardedBy;
import org.apache.druid.java.util.common.DateTimes; import org.apache.druid.java.util.common.DateTimes;
import org.apache.druid.java.util.common.UOE; import org.apache.druid.java.util.common.UOE;
import org.apache.druid.java.util.common.guava.Comparators; import org.apache.druid.java.util.common.guava.Comparators;
import org.apache.druid.timeline.partition.ImmutablePartitionHolder;
import org.apache.druid.timeline.partition.PartitionChunk; import org.apache.druid.timeline.partition.PartitionChunk;
import org.apache.druid.timeline.partition.PartitionHolder; import org.apache.druid.timeline.partition.PartitionHolder;
import org.apache.druid.utils.CollectionUtils; import org.apache.druid.utils.CollectionUtils;
@ -158,14 +158,17 @@ public class VersionedIntervalTimeline<VersionType, ObjectType extends Overshado
/** /**
* Computes a set with all objects falling within the specified interval which are at least partially "visible" in * Computes a set with all objects falling within the specified interval which are at least partially "visible" in
* this interval (that is, are not fully overshadowed within this interval). * this interval (that is, are not fully overshadowed within this interval).
*
* Note that this method returns a set of {@link ObjectType}. Duplicate objects in different time chunks will be
* removed in the result.
*/ */
public Set<ObjectType> findNonOvershadowedObjectsInInterval(Interval interval, Partitions completeness) public Set<ObjectType> findNonOvershadowedObjectsInInterval(Interval interval, Partitions completeness)
{ {
return lookup(interval, completeness) return FluentIterable
.stream() .from(lookup(interval, completeness))
.flatMap(timelineObjectHolder -> timelineObjectHolder.getObject().stream()) .transformAndConcat(TimelineObjectHolder::getObject)
.map(PartitionChunk::getObject) .transform(PartitionChunk::getObject)
.collect(Collectors.toSet()); .toSet();
} }
public void add(final Interval interval, VersionType version, PartitionChunk<ObjectType> object) public void add(final Interval interval, VersionType version, PartitionChunk<ObjectType> object)
@ -278,7 +281,7 @@ public class VersionedIntervalTimeline<VersionType, ObjectType extends Overshado
if (entry.getKey().equals(interval) || entry.getKey().contains(interval)) { if (entry.getKey().equals(interval) || entry.getKey().contains(interval)) {
TimelineEntry foundEntry = entry.getValue().get(version); TimelineEntry foundEntry = entry.getValue().get(version);
if (foundEntry != null) { if (foundEntry != null) {
return new ImmutablePartitionHolder<>(foundEntry.getPartitionHolder()); return foundEntry.getPartitionHolder().asImmutable();
} }
} }
} }
@ -362,7 +365,7 @@ public class VersionedIntervalTimeline<VersionType, ObjectType extends Overshado
entry.getTrueInterval(), entry.getTrueInterval(),
entry.getTrueInterval(), entry.getTrueInterval(),
entry.getVersion(), entry.getVersion(),
new PartitionHolder<>(entry.getPartitionHolder()) PartitionHolder.copyWithOnlyVisibleChunks(entry.getPartitionHolder())
); );
} }
@ -381,9 +384,13 @@ public class VersionedIntervalTimeline<VersionType, ObjectType extends Overshado
final Set<TimelineObjectHolder<VersionType, ObjectType>> overshadowedObjects = overshadowedPartitionsTimeline final Set<TimelineObjectHolder<VersionType, ObjectType>> overshadowedObjects = overshadowedPartitionsTimeline
.values() .values()
.stream() .stream()
.flatMap( .flatMap((Map<VersionType, TimelineEntry> entry) -> entry.values().stream())
(Map<VersionType, TimelineEntry> entry) -> entry.values().stream().map(this::timelineEntryToObjectHolder) .map(entry -> new TimelineObjectHolder<>(
) entry.getTrueInterval(),
entry.getTrueInterval(),
entry.getVersion(),
PartitionHolder.deepCopy(entry.getPartitionHolder())
))
.collect(Collectors.toSet()); .collect(Collectors.toSet());
// 2. Visible timelineEntries can also have overshadowed objects. Add them to the result too. // 2. Visible timelineEntries can also have overshadowed objects. Add them to the result too.
@ -482,7 +489,12 @@ public class VersionedIntervalTimeline<VersionType, ObjectType extends Overshado
if (versionCompare > 0) { if (versionCompare > 0) {
return false; return false;
} else if (versionCompare == 0) { } else if (versionCompare == 0) {
if (timelineEntry.partitionHolder.stream().noneMatch(chunk -> chunk.getObject().overshadows(object))) { // Intentionally use the Iterators API instead of the stream API for performance.
//noinspection ConstantConditions
final boolean nonOvershadowedObject = Iterators.all(
timelineEntry.partitionHolder.iterator(), chunk -> !chunk.getObject().overshadows(object)
);
if (nonOvershadowedObject) {
return false; return false;
} }
} }
@ -724,7 +736,7 @@ public class VersionedIntervalTimeline<VersionType, ObjectType extends Overshado
timelineInterval, timelineInterval,
val.getTrueInterval(), val.getTrueInterval(),
val.getVersion(), val.getVersion(),
new PartitionHolder<>(val.getPartitionHolder()) PartitionHolder.copyWithOnlyVisibleChunks(val.getPartitionHolder())
) )
); );
} }

View File

@ -45,11 +45,21 @@ class AtomicUpdateGroup<T extends Overshadowable<T>> implements Overshadowable<A
// This may matter if there are a lot of segments to keep in memory as in brokers or the coordinator. // This may matter if there are a lot of segments to keep in memory as in brokers or the coordinator.
private final List<PartitionChunk<T>> chunks = new ArrayList<>(); private final List<PartitionChunk<T>> chunks = new ArrayList<>();
static <T extends Overshadowable<T>> AtomicUpdateGroup<T> copy(AtomicUpdateGroup<T> group)
{
return new AtomicUpdateGroup<>(group.chunks);
}
AtomicUpdateGroup(PartitionChunk<T> chunk) AtomicUpdateGroup(PartitionChunk<T> chunk)
{ {
this.chunks.add(chunk); this.chunks.add(chunk);
} }
private AtomicUpdateGroup(List<PartitionChunk<T>> chunks)
{
this.chunks.addAll(chunks);
}
public void add(PartitionChunk<T> chunk) public void add(PartitionChunk<T> chunk)
{ {
if (isFull()) { if (isFull()) {

View File

@ -25,9 +25,9 @@ import org.apache.druid.timeline.Overshadowable;
*/ */
public class ImmutablePartitionHolder<T extends Overshadowable<T>> extends PartitionHolder<T> public class ImmutablePartitionHolder<T extends Overshadowable<T>> extends PartitionHolder<T>
{ {
public ImmutablePartitionHolder(PartitionHolder<T> partitionHolder) protected ImmutablePartitionHolder(OvershadowableManager<T> overshadowableManager)
{ {
super(partitionHolder); super(overshadowableManager);
} }
@Override @Override

View File

@ -21,7 +21,8 @@ package org.apache.druid.timeline.partition;
import com.google.common.annotations.VisibleForTesting; import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions; import com.google.common.base.Preconditions;
import com.google.common.collect.Iterables; import com.google.common.collect.FluentIterable;
import com.google.common.collect.Iterators;
import it.unimi.dsi.fastutil.objects.AbstractObjectCollection; import it.unimi.dsi.fastutil.objects.AbstractObjectCollection;
import it.unimi.dsi.fastutil.objects.ObjectCollection; import it.unimi.dsi.fastutil.objects.ObjectCollection;
import it.unimi.dsi.fastutil.objects.ObjectIterator; import it.unimi.dsi.fastutil.objects.ObjectIterator;
@ -44,7 +45,9 @@ import javax.annotation.Nullable;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.Collection; import java.util.Collection;
import java.util.Collections; import java.util.Collections;
import java.util.Comparator;
import java.util.HashMap; import java.util.HashMap;
import java.util.Iterator;
import java.util.List; import java.util.List;
import java.util.Map; import java.util.Map;
import java.util.Map.Entry; import java.util.Map.Entry;
@ -52,9 +55,6 @@ import java.util.NoSuchElementException;
import java.util.Objects; import java.util.Objects;
import java.util.Set; import java.util.Set;
import java.util.TreeMap; import java.util.TreeMap;
import java.util.function.BiPredicate;
import java.util.stream.Collectors;
import java.util.stream.Stream;
/** /**
* OvershadowableManager manages the state of {@link AtomicUpdateGroup}. See the below {@link State} for details. * OvershadowableManager manages the state of {@link AtomicUpdateGroup}. See the below {@link State} for details.
@ -105,22 +105,46 @@ class OvershadowableManager<T extends Overshadowable<T>>
this.overshadowedGroups = new TreeMap<>(); this.overshadowedGroups = new TreeMap<>();
} }
OvershadowableManager(OvershadowableManager<T> other) public static <T extends Overshadowable<T>> OvershadowableManager<T> copyVisible(OvershadowableManager<T> original)
{ {
this.knownPartitionChunks = new HashMap<>(other.knownPartitionChunks); final OvershadowableManager<T> copy = new OvershadowableManager<>();
this.standbyGroups = new TreeMap<>(other.standbyGroups); original.visibleGroupPerRange.forEach((partitionRange, versionToGroups) -> {
this.visibleGroupPerRange = new TreeMap<>(other.visibleGroupPerRange); // There should be only one group per partition range
this.overshadowedGroups = new TreeMap<>(other.overshadowedGroups); final AtomicUpdateGroup<T> group = versionToGroups.values().iterator().next();
group.getChunks().forEach(chunk -> copy.knownPartitionChunks.put(chunk.getChunkNumber(), chunk));
copy.visibleGroupPerRange.put(
partitionRange,
new SingleEntryShort2ObjectSortedMap<>(group.getMinorVersion(), AtomicUpdateGroup.copy(group))
);
});
return copy;
} }
private OvershadowableManager(List<AtomicUpdateGroup<T>> groups) public static <T extends Overshadowable<T>> OvershadowableManager<T> deepCopy(OvershadowableManager<T> original)
{ {
this(); final OvershadowableManager<T> copy = copyVisible(original);
for (AtomicUpdateGroup<T> entry : groups) { original.overshadowedGroups.forEach((partitionRange, versionToGroups) -> {
for (PartitionChunk<T> chunk : entry.getChunks()) { // There should be only one group per partition range
addChunk(chunk); final AtomicUpdateGroup<T> group = versionToGroups.values().iterator().next();
} group.getChunks().forEach(chunk -> copy.knownPartitionChunks.put(chunk.getChunkNumber(), chunk));
}
copy.overshadowedGroups.put(
partitionRange,
new SingleEntryShort2ObjectSortedMap<>(group.getMinorVersion(), AtomicUpdateGroup.copy(group))
);
});
original.standbyGroups.forEach((partitionRange, versionToGroups) -> {
// There should be only one group per partition range
final AtomicUpdateGroup<T> group = versionToGroups.values().iterator().next();
group.getChunks().forEach(chunk -> copy.knownPartitionChunks.put(chunk.getChunkNumber(), chunk));
copy.standbyGroups.put(
partitionRange,
new SingleEntryShort2ObjectSortedMap<>(group.getMinorVersion(), AtomicUpdateGroup.copy(group))
);
});
return copy;
} }
private TreeMap<RootPartitionRange, Short2ObjectSortedMap<AtomicUpdateGroup<T>>> getStateMap(State state) private TreeMap<RootPartitionRange, Short2ObjectSortedMap<AtomicUpdateGroup<T>>> getStateMap(State state)
@ -168,7 +192,7 @@ class OvershadowableManager<T extends Overshadowable<T>>
private void replaceVisibleWith( private void replaceVisibleWith(
Collection<AtomicUpdateGroup<T>> oldVisibleGroups, Collection<AtomicUpdateGroup<T>> oldVisibleGroups,
State newStateOfOldVisibleGroup, State newStateOfOldVisibleGroup,
List<AtomicUpdateGroup<T>> newVisibleGroups, Collection<AtomicUpdateGroup<T>> newVisibleGroups,
State oldStateOfNewVisibleGroups State oldStateOfNewVisibleGroups
) )
{ {
@ -263,27 +287,17 @@ class OvershadowableManager<T extends Overshadowable<T>>
* @param minorVersion the minor version to check overshadow relation. The found groups will have lower minor versions * @param minorVersion the minor version to check overshadow relation. The found groups will have lower minor versions
* than this. * than this.
* @param fromState the state to search for overshadowed groups. * @param fromState the state to search for overshadowed groups.
*
* @return a list of found atomicUpdateGroups. It could be empty if no groups are found. * @return a list of found atomicUpdateGroups. It could be empty if no groups are found.
*/ */
@VisibleForTesting @VisibleForTesting
List<AtomicUpdateGroup<T>> findOvershadowedBy(RootPartitionRange rangeOfAug, short minorVersion, State fromState) List<AtomicUpdateGroup<T>> findOvershadowedBy(RootPartitionRange rangeOfAug, short minorVersion, State fromState)
{ {
final TreeMap<RootPartitionRange, Short2ObjectSortedMap<AtomicUpdateGroup<T>>> stateMap = getStateMap(fromState); final TreeMap<RootPartitionRange, Short2ObjectSortedMap<AtomicUpdateGroup<T>>> stateMap = getStateMap(fromState);
Entry<RootPartitionRange, Short2ObjectSortedMap<AtomicUpdateGroup<T>>> current = findLowestOverlappingEntry(
rangeOfAug,
stateMap,
true
);
if (current == null) {
return Collections.emptyList();
}
// Going through the map to find all entries of the RootPartitionRange contained by the given rangeOfAug.
// Note that RootPartitionRange of entries are always consecutive.
final List<AtomicUpdateGroup<T>> found = new ArrayList<>(); final List<AtomicUpdateGroup<T>> found = new ArrayList<>();
while (current != null && rangeOfAug.overlaps(current.getKey())) { final Iterator<Entry<RootPartitionRange, Short2ObjectSortedMap<AtomicUpdateGroup<T>>>> iterator =
entryIteratorGreaterThan(rangeOfAug.startPartitionId, stateMap);
while (iterator.hasNext()) {
final Entry<RootPartitionRange, Short2ObjectSortedMap<AtomicUpdateGroup<T>>> current = iterator.next();
if (rangeOfAug.contains(current.getKey())) { if (rangeOfAug.contains(current.getKey())) {
// versionToGroup is sorted by minorVersion. // versionToGroup is sorted by minorVersion.
// versionToGroup.headMap(minorVersion) below returns a map containing all entries of lower minorVersions // versionToGroup.headMap(minorVersion) below returns a map containing all entries of lower minorVersions
@ -296,8 +310,9 @@ class OvershadowableManager<T extends Overshadowable<T>>
if (versionToGroup.firstShortKey() < minorVersion) { if (versionToGroup.firstShortKey() < minorVersion) {
found.addAll(versionToGroup.headMap(minorVersion).values()); found.addAll(versionToGroup.headMap(minorVersion).values());
} }
} else {
break;
} }
current = stateMap.higherEntry(current.getKey());
} }
return found; return found;
} }
@ -318,27 +333,23 @@ class OvershadowableManager<T extends Overshadowable<T>>
* @param minorVersion the minor version to check overshadow relation. The found groups will have higher minor * @param minorVersion the minor version to check overshadow relation. The found groups will have higher minor
* versions than this. * versions than this.
* @param fromState the state to search for overshadowed groups. * @param fromState the state to search for overshadowed groups.
*
* @return a list of found atomicUpdateGroups. It could be empty if no groups are found. * @return a list of found atomicUpdateGroups. It could be empty if no groups are found.
*/ */
@VisibleForTesting @VisibleForTesting
List<AtomicUpdateGroup<T>> findOvershadows(RootPartitionRange rangeOfAug, short minorVersion, State fromState) List<AtomicUpdateGroup<T>> findOvershadows(RootPartitionRange rangeOfAug, short minorVersion, State fromState)
{ {
final TreeMap<RootPartitionRange, Short2ObjectSortedMap<AtomicUpdateGroup<T>>> stateMap = getStateMap(fromState); final TreeMap<RootPartitionRange, Short2ObjectSortedMap<AtomicUpdateGroup<T>>> stateMap = getStateMap(fromState);
Entry<RootPartitionRange, Short2ObjectSortedMap<AtomicUpdateGroup<T>>> current = findLowestOverlappingEntry( final Iterator<Entry<RootPartitionRange, Short2ObjectSortedMap<AtomicUpdateGroup<T>>>> iterator =
rangeOfAug, entryIteratorSmallerThan(rangeOfAug.endPartitionId, stateMap);
stateMap,
false
);
if (current == null) {
return Collections.emptyList();
}
// Going through the map to find all entries of the RootPartitionRange contains the given rangeOfAug. // Going through the map to find all entries of the RootPartitionRange contains the given rangeOfAug.
// Note that RootPartitionRange of entries are always consecutive. // Note that RootPartitionRange of entries are always consecutive.
final List<AtomicUpdateGroup<T>> found = new ArrayList<>(); final List<AtomicUpdateGroup<T>> found = new ArrayList<>();
while (current != null && current.getKey().overlaps(rangeOfAug)) { while (iterator.hasNext()) {
final Entry<RootPartitionRange, Short2ObjectSortedMap<AtomicUpdateGroup<T>>> current = iterator.next();
if (!current.getKey().overlaps(rangeOfAug)) {
break;
}
if (current.getKey().contains(rangeOfAug)) { if (current.getKey().contains(rangeOfAug)) {
// versionToGroup is sorted by minorVersion. // versionToGroup is sorted by minorVersion.
// versionToGroup.tailMap(minorVersion) below returns a map containing all entries of equal to or higher // versionToGroup.tailMap(minorVersion) below returns a map containing all entries of equal to or higher
@ -352,63 +363,62 @@ class OvershadowableManager<T extends Overshadowable<T>>
found.addAll(versionToGroup.tailMap(minorVersion).values()); found.addAll(versionToGroup.tailMap(minorVersion).values());
} }
} }
current = stateMap.higherEntry(current.getKey());
} }
return found; return found;
} }
/** boolean isOvershadowedByVisibleGroup(RootPartitionRange partitionRange, short minorVersion)
* Finds the lowest entry overlapping with the given root partition range.
* It first searches the entries lower than or equal to the given range.
* If there's no such entry lower than the given range, then it searches the entries higher than the given range.
*
* @return an entry of the lowest key overlapping with the given range. Otherwise null.
*/
@Nullable
private Entry<RootPartitionRange, Short2ObjectSortedMap<AtomicUpdateGroup<T>>> findLowestOverlappingEntry(
RootPartitionRange rangeOfAug,
TreeMap<RootPartitionRange, Short2ObjectSortedMap<AtomicUpdateGroup<T>>> stateMap,
boolean strictSameStartId
)
{ {
// Searches the entries lower than or equal to the given range. final Iterator<Entry<RootPartitionRange, Short2ObjectSortedMap<AtomicUpdateGroup<T>>>> iterator =
Entry<RootPartitionRange, Short2ObjectSortedMap<AtomicUpdateGroup<T>>> current = stateMap.floorEntry(rangeOfAug); entryIteratorSmallerThan(partitionRange.endPartitionId, visibleGroupPerRange);
if (current == null) { // Going through the map to find all entries of the RootPartitionRange contains the given rangeOfAug.
// Searches the entries higher than then given range. // Note that RootPartitionRange of entries are always consecutive.
current = stateMap.higherEntry(rangeOfAug); while (iterator.hasNext()) {
} final Entry<RootPartitionRange, Short2ObjectSortedMap<AtomicUpdateGroup<T>>> current = iterator.next();
if (!current.getKey().overlaps(partitionRange)) {
if (current == null) {
return null;
}
// floorEntry() can return the greatest key less than rangeOfAug. We need to skip non-overlapping keys.
while (current != null && !current.getKey().overlaps(rangeOfAug)) {
current = stateMap.higherEntry(current.getKey());
}
final BiPredicate<RootPartitionRange, RootPartitionRange> predicate;
if (strictSameStartId) {
predicate = (entryRange, groupRange) -> entryRange.startPartitionId == groupRange.startPartitionId;
} else {
predicate = RootPartitionRange::overlaps;
}
// There could be multiple entries of the same startPartitionId but different endPartitionId.
// Find the first key of the same startPartitionId which has the lowest endPartitionId.
while (current != null) {
final Entry<RootPartitionRange, Short2ObjectSortedMap<AtomicUpdateGroup<T>>> lowerEntry = stateMap.lowerEntry(
current.getKey()
);
if (lowerEntry != null && predicate.test(lowerEntry.getKey(), rangeOfAug)) {
current = lowerEntry;
} else {
break; break;
} }
if (current.getKey().contains(partitionRange)) {
// versionToGroup is sorted by minorVersion.
// versionToGroup.tailMap(minorVersion) below returns a map containing all entries of equal to or higher
// minorVersions than the given minorVersion.
final Short2ObjectSortedMap<AtomicUpdateGroup<T>> versionToGroup = current.getValue();
if (versionToGroup.lastShortKey() > minorVersion) {
return true;
}
}
}
return false;
} }
return current; /**
* Returns an iterator of entries that has a {@link RootPartitionRange} smaller than the given partitionId.
* A RootPartitionRange is smaller than a partitionId if {@link RootPartitionRange#startPartitionId} < partitionId.
*/
private Iterator<Entry<RootPartitionRange, Short2ObjectSortedMap<AtomicUpdateGroup<T>>>> entryIteratorSmallerThan(
short partitionId,
TreeMap<RootPartitionRange, Short2ObjectSortedMap<AtomicUpdateGroup<T>>> stateMap
)
{
final RootPartitionRange lowFench = new RootPartitionRange((short) 0, (short) 0);
final RootPartitionRange highFence = new RootPartitionRange(partitionId, partitionId);
return stateMap.subMap(lowFench, false, highFence, false).descendingMap().entrySet().iterator();
}
/**
* Returns an iterator of entries that has a {@link RootPartitionRange} greater than the given partitionId.
* A RootPartitionRange is greater than a partitionId if {@link RootPartitionRange#startPartitionId} >= partitionId
* and {@link RootPartitionRange#endPartitionId} > partitionId.
*/
private Iterator<Entry<RootPartitionRange, Short2ObjectSortedMap<AtomicUpdateGroup<T>>>> entryIteratorGreaterThan(
short partitionId,
TreeMap<RootPartitionRange, Short2ObjectSortedMap<AtomicUpdateGroup<T>>> stateMap
)
{
final RootPartitionRange lowFench = new RootPartitionRange(partitionId, partitionId);
final RootPartitionRange highFence = new RootPartitionRange(Short.MAX_VALUE, Short.MAX_VALUE);
return stateMap.subMap(lowFench, false, highFence, false).entrySet().iterator();
} }
/** /**
@ -539,13 +549,14 @@ class OvershadowableManager<T extends Overshadowable<T>>
if (!isOvershadowingGroupsFull) { if (!isOvershadowingGroupsFull) {
// Let's check the overshadowed groups can cover the partition range of groupsOvershadowingAug // Let's check the overshadowed groups can cover the partition range of groupsOvershadowingAug
// and are fully available. // and are fully available.
final List<AtomicUpdateGroup<T>> latestFullGroups = groupsOvershadowingAug //noinspection ConstantConditions
.stream() final List<AtomicUpdateGroup<T>> latestFullGroups = FluentIterable
.flatMap(eachFullgroup -> findLatestFullyAvailableOvershadowedAtomicUpdateGroups( .from(groupsOvershadowingAug)
.transformAndConcat(eachFullgroup -> findLatestFullyAvailableOvershadowedAtomicUpdateGroups(
RootPartitionRange.of(eachFullgroup), RootPartitionRange.of(eachFullgroup),
eachFullgroup.getMinorVersion()).stream() eachFullgroup.getMinorVersion()
) ))
.collect(Collectors.toList()); .toList();
if (!latestFullGroups.isEmpty()) { if (!latestFullGroups.isEmpty()) {
final boolean isOvershadowedGroupsFull = doGroupsFullyCoverPartitionRange( final boolean isOvershadowedGroupsFull = doGroupsFullyCoverPartitionRange(
@ -574,7 +585,6 @@ class OvershadowableManager<T extends Overshadowable<T>>
* @param groups atomicUpdateGroups sorted by their rootPartitionRange * @param groups atomicUpdateGroups sorted by their rootPartitionRange
* @param startRootPartitionId the start partitionId of the root partition range to check the coverage * @param startRootPartitionId the start partitionId of the root partition range to check the coverage
* @param endRootPartitionId the end partitionId of the root partition range to check the coverage * @param endRootPartitionId the end partitionId of the root partition range to check the coverage
*
* @return true if the given groups fully cover the given partition range. * @return true if the given groups fully cover the given partition range.
*/ */
private boolean doGroupsFullyCoverPartitionRange( private boolean doGroupsFullyCoverPartitionRange(
@ -675,11 +685,10 @@ class OvershadowableManager<T extends Overshadowable<T>>
final AtomicUpdateGroup<T> newAtomicUpdateGroup = new AtomicUpdateGroup<>(chunk); final AtomicUpdateGroup<T> newAtomicUpdateGroup = new AtomicUpdateGroup<>(chunk);
// Decide the initial state of the new atomicUpdateGroup // Decide the initial state of the new atomicUpdateGroup
final boolean overshadowed = visibleGroupPerRange final boolean overshadowed = isOvershadowedByVisibleGroup(
.values() RootPartitionRange.of(newAtomicUpdateGroup),
.stream() newAtomicUpdateGroup.getMinorVersion()
.flatMap(map -> map.values().stream()) );
.anyMatch(group -> group.overshadows(newAtomicUpdateGroup));
if (overshadowed) { if (overshadowed) {
addAtomicUpdateGroupWithState(newAtomicUpdateGroup, State.OVERSHADOWED, true); addAtomicUpdateGroupWithState(newAtomicUpdateGroup, State.OVERSHADOWED, true);
@ -724,20 +733,18 @@ class OvershadowableManager<T extends Overshadowable<T>>
if (!latestFullAugs.isEmpty()) { if (!latestFullAugs.isEmpty()) {
// The current visible atomicUpdateGroup becomes standby // The current visible atomicUpdateGroup becomes standby
// and the fully available overshadowed atomicUpdateGroups become visible // and the fully available overshadowed atomicUpdateGroups become visible
final Set<AtomicUpdateGroup<T>> overshadowsLatestFullAugsInVisible = latestFullAugs final Set<AtomicUpdateGroup<T>> overshadowsLatestFullAugsInVisible = FluentIterable
.stream() .from(latestFullAugs)
.flatMap(group -> findOvershadows(group, State.VISIBLE).stream()) .transformAndConcat(group -> findOvershadows(group, State.VISIBLE))
.collect(Collectors.toSet()); .toSet();
replaceVisibleWith( replaceVisibleWith(
overshadowsLatestFullAugsInVisible, overshadowsLatestFullAugsInVisible,
State.STANDBY, State.STANDBY,
latestFullAugs, latestFullAugs,
State.OVERSHADOWED State.OVERSHADOWED
); );
latestFullAugs FluentIterable.from(latestFullAugs)
.stream() .transformAndConcat(group -> findOvershadows(group, State.OVERSHADOWED))
.flatMap(group -> findOvershadows(group, State.OVERSHADOWED).stream())
.collect(Collectors.toSet())
.forEach(group -> transitAtomicUpdateGroupState(group, State.OVERSHADOWED, State.STANDBY)); .forEach(group -> transitAtomicUpdateGroupState(group, State.OVERSHADOWED, State.STANDBY));
} else { } else {
// Find the latest non-fully available atomicUpdateGroups // Find the latest non-fully available atomicUpdateGroups
@ -745,17 +752,16 @@ class OvershadowableManager<T extends Overshadowable<T>>
findOvershadows(rangeOfAug, minorVersion, State.STANDBY) findOvershadows(rangeOfAug, minorVersion, State.STANDBY)
); );
if (!latestStandby.isEmpty()) { if (!latestStandby.isEmpty()) {
final List<AtomicUpdateGroup<T>> overshadowedByLatestStandby = latestStandby final List<AtomicUpdateGroup<T>> overshadowedByLatestStandby = FluentIterable
.stream() .from(latestStandby)
.flatMap(group -> findOvershadowedBy(group, State.VISIBLE).stream()) .transformAndConcat(group -> findOvershadowedBy(group, State.VISIBLE))
.collect(Collectors.toList()); .toList();
replaceVisibleWith(overshadowedByLatestStandby, State.OVERSHADOWED, latestStandby, State.STANDBY); replaceVisibleWith(overshadowedByLatestStandby, State.OVERSHADOWED, latestStandby, State.STANDBY);
// All standby groups overshadowed by the new visible group should be moved to overshadowed // All standby groups overshadowed by the new visible group should be moved to overshadowed
latestStandby FluentIterable
.stream() .from(latestStandby)
.flatMap(group -> findOvershadowedBy(group, State.STANDBY).stream()) .transformAndConcat(group -> findOvershadowedBy(group, State.STANDBY))
.collect(Collectors.toSet())
.forEach(aug -> transitAtomicUpdateGroupState(aug, State.STANDBY, State.OVERSHADOWED)); .forEach(aug -> transitAtomicUpdateGroupState(aug, State.STANDBY, State.OVERSHADOWED));
} else if (augOfRemovedChunk.isEmpty()) { } else if (augOfRemovedChunk.isEmpty()) {
// Visible is empty. Move the latest overshadowed to visible. // Visible is empty. Move the latest overshadowed to visible.
@ -785,15 +791,27 @@ class OvershadowableManager<T extends Overshadowable<T>>
return Collections.emptyList(); return Collections.emptyList();
} }
final OvershadowableManager<T> manager = new OvershadowableManager<>(groups); final TreeMap<RootPartitionRange, AtomicUpdateGroup<T>> rangeToGroup = new TreeMap<>();
if (!manager.standbyGroups.isEmpty()) { for (AtomicUpdateGroup<T> group : groups) {
throw new ISE("This method should be called only when there is no fully available group in the given state."); rangeToGroup.put(RootPartitionRange.of(group), group);
} }
final List<AtomicUpdateGroup<T>> visibles = new ArrayList<>(); final List<AtomicUpdateGroup<T>> visibles = new ArrayList<>();
for (Short2ObjectSortedMap<AtomicUpdateGroup<T>> map : manager.visibleGroupPerRange.values()) { // rangeToGroup is sorted by RootPartitionRange which means, the groups of the wider range will appear later
visibles.addAll(map.values()); // in the rangeToGroup map. Since the wider groups have higer minor versions than narrower groups,
// we iterate the rangeToGroup from the last entry in descending order.
Entry<RootPartitionRange, AtomicUpdateGroup<T>> currEntry = rangeToGroup.lastEntry();
while (currEntry != null) {
final Entry<RootPartitionRange, AtomicUpdateGroup<T>> lowerEntry = rangeToGroup.lowerEntry(currEntry.getKey());
if (lowerEntry != null) {
if (lowerEntry.getKey().endPartitionId != currEntry.getKey().startPartitionId) {
return Collections.emptyList();
} }
}
visibles.add(currEntry.getValue());
currEntry = lowerEntry;
}
// visibles should be sorted.
visibles.sort(Comparator.comparing(RootPartitionRange::of));
return visibles; return visibles;
} }
@ -807,27 +825,38 @@ class OvershadowableManager<T extends Overshadowable<T>>
minorVersion, minorVersion,
State.OVERSHADOWED State.OVERSHADOWED
); );
if (overshadowedGroups.isEmpty()) {
// Filter out non-fully available groups.
final TreeMap<RootPartitionRange, AtomicUpdateGroup<T>> fullGroups = new TreeMap<>();
for (AtomicUpdateGroup<T> group : FluentIterable.from(overshadowedGroups).filter(AtomicUpdateGroup::isFull)) {
fullGroups.put(RootPartitionRange.of(group), group);
}
if (fullGroups.isEmpty()) {
return Collections.emptyList();
}
if (fullGroups.firstKey().startPartitionId != rangeOfAug.startPartitionId
|| fullGroups.lastKey().endPartitionId != rangeOfAug.endPartitionId) {
return Collections.emptyList(); return Collections.emptyList();
} }
final OvershadowableManager<T> manager = new OvershadowableManager<>(overshadowedGroups); // Find latest fully available groups.
final List<AtomicUpdateGroup<T>> visibles = new ArrayList<>(); final List<AtomicUpdateGroup<T>> visibles = new ArrayList<>();
for (Short2ObjectSortedMap<AtomicUpdateGroup<T>> map : manager.visibleGroupPerRange.values()) { // fullGroups is sorted by RootPartitionRange which means, the groups of the wider range will appear later
for (AtomicUpdateGroup<T> atomicUpdateGroup : map.values()) { // in the fullGroups map. Since the wider groups have higer minor versions than narrower groups,
if (!atomicUpdateGroup.isFull()) { // we iterate the fullGroups from the last entry in descending order.
Entry<RootPartitionRange, AtomicUpdateGroup<T>> currEntry = fullGroups.lastEntry();
while (currEntry != null) {
final Entry<RootPartitionRange, AtomicUpdateGroup<T>> lowerEntry = fullGroups.lowerEntry(currEntry.getKey());
if (lowerEntry != null) {
if (lowerEntry.getKey().endPartitionId != currEntry.getKey().startPartitionId) {
return Collections.emptyList(); return Collections.emptyList();
} }
visibles.add(atomicUpdateGroup);
} }
visibles.add(currEntry.getValue());
currEntry = lowerEntry;
} }
final RootPartitionRange foundRange = RootPartitionRange.of( // visibles should be sorted.
visibles.get(0).getStartRootPartitionId(), visibles.sort(Comparator.comparing(RootPartitionRange::of));
visibles.get(visibles.size() - 1).getEndRootPartitionId()
);
if (!rangeOfAug.equals(foundRange)) {
return Collections.emptyList();
}
return visibles; return visibles;
} }
@ -896,10 +925,14 @@ class OvershadowableManager<T extends Overshadowable<T>>
public boolean isComplete() public boolean isComplete()
{ {
return visibleGroupPerRange return Iterators.all(
.values() visibleGroupPerRange.values().iterator(),
.stream() map -> {
.allMatch(map -> Iterables.getOnlyElement(map.values()).isFull()); SingleEntryShort2ObjectSortedMap<AtomicUpdateGroup<T>> singleMap =
(SingleEntryShort2ObjectSortedMap<AtomicUpdateGroup<T>>) map;
//noinspection ConstantConditions
return singleMap.val.isFull();
});
} }
@Nullable @Nullable
@ -922,13 +955,18 @@ class OvershadowableManager<T extends Overshadowable<T>>
} }
} }
Stream<PartitionChunk<T>> createVisibleChunksStream() Iterator<PartitionChunk<T>> visibleChunksIterator()
{ {
return visibleGroupPerRange final FluentIterable<Short2ObjectSortedMap<AtomicUpdateGroup<T>>> versionToGroupIterable = FluentIterable.from(
.values() visibleGroupPerRange.values()
.stream() );
.flatMap((Short2ObjectSortedMap<AtomicUpdateGroup<T>> map) -> map.values().stream()) return versionToGroupIterable
.flatMap((AtomicUpdateGroup<T> aug) -> aug.getChunks().stream()); .transformAndConcat(map -> {
SingleEntryShort2ObjectSortedMap<AtomicUpdateGroup<T>> singleMap =
(SingleEntryShort2ObjectSortedMap<AtomicUpdateGroup<T>>) map;
//noinspection ConstantConditions
return singleMap.val.getChunks();
}).iterator();
} }
List<PartitionChunk<T>> getOvershadowedChunks() List<PartitionChunk<T>> getOvershadowedChunks()
@ -1086,6 +1124,12 @@ class OvershadowableManager<T extends Overshadowable<T>>
val = null; val = null;
} }
private SingleEntryShort2ObjectSortedMap(short key, V val)
{
this.key = key;
this.val = val;
}
@Override @Override
public Short2ObjectSortedMap<V> subMap(short fromKey, short toKey) public Short2ObjectSortedMap<V> subMap(short fromKey, short toKey)
{ {

View File

@ -26,8 +26,6 @@ import javax.annotation.Nullable;
import java.util.Iterator; import java.util.Iterator;
import java.util.List; import java.util.List;
import java.util.Objects; import java.util.Objects;
import java.util.Spliterator;
import java.util.stream.Stream;
/** /**
* An object that clumps together multiple other objects which each represent a shard of some space. * An object that clumps together multiple other objects which each represent a shard of some space.
@ -36,6 +34,18 @@ public class PartitionHolder<T extends Overshadowable<T>> implements Iterable<Pa
{ {
private final OvershadowableManager<T> overshadowableManager; private final OvershadowableManager<T> overshadowableManager;
public static <T extends Overshadowable<T>> PartitionHolder<T> copyWithOnlyVisibleChunks(
PartitionHolder<T> partitionHolder
)
{
return new PartitionHolder<>(OvershadowableManager.copyVisible(partitionHolder.overshadowableManager));
}
public static <T extends Overshadowable<T>> PartitionHolder<T> deepCopy(PartitionHolder<T> partitionHolder)
{
return new PartitionHolder<>(OvershadowableManager.deepCopy(partitionHolder.overshadowableManager));
}
public PartitionHolder(PartitionChunk<T> initialChunk) public PartitionHolder(PartitionChunk<T> initialChunk)
{ {
this.overshadowableManager = new OvershadowableManager<>(); this.overshadowableManager = new OvershadowableManager<>();
@ -50,9 +60,14 @@ public class PartitionHolder<T extends Overshadowable<T>> implements Iterable<Pa
} }
} }
public PartitionHolder(PartitionHolder<T> partitionHolder) protected PartitionHolder(OvershadowableManager<T> overshadowableManager)
{ {
this.overshadowableManager = new OvershadowableManager<>(partitionHolder.overshadowableManager); this.overshadowableManager = overshadowableManager;
}
public ImmutablePartitionHolder<T> asImmutable()
{
return new ImmutablePartitionHolder<>(OvershadowableManager.copyVisible(overshadowableManager));
} }
public boolean add(PartitionChunk<T> chunk) public boolean add(PartitionChunk<T> chunk)
@ -112,18 +127,7 @@ public class PartitionHolder<T extends Overshadowable<T>> implements Iterable<Pa
@Override @Override
public Iterator<PartitionChunk<T>> iterator() public Iterator<PartitionChunk<T>> iterator()
{ {
return stream().iterator(); return overshadowableManager.visibleChunksIterator();
}
@Override
public Spliterator<PartitionChunk<T>> spliterator()
{
return stream().spliterator();
}
public Stream<PartitionChunk<T>> stream()
{
return overshadowableManager.createVisibleChunksStream();
} }
public List<PartitionChunk<T>> getOvershadowed() public List<PartitionChunk<T>> getOvershadowed()

View File

@ -23,7 +23,6 @@ import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableSet; import com.google.common.collect.ImmutableSet;
import org.apache.druid.java.util.common.DateTimes; import org.apache.druid.java.util.common.DateTimes;
import org.apache.druid.java.util.common.Intervals; import org.apache.druid.java.util.common.Intervals;
import org.apache.druid.timeline.partition.ImmutablePartitionHolder;
import org.apache.druid.timeline.partition.IntegerPartitionChunk; import org.apache.druid.timeline.partition.IntegerPartitionChunk;
import org.apache.druid.timeline.partition.OvershadowableInteger; import org.apache.druid.timeline.partition.OvershadowableInteger;
import org.apache.druid.timeline.partition.PartitionHolder; import org.apache.druid.timeline.partition.PartitionHolder;
@ -225,22 +224,22 @@ public class VersionedIntervalTimelineSpecificDataTest extends VersionedInterval
public void testFindEntry() public void testFindEntry()
{ {
Assert.assertEquals( Assert.assertEquals(
new ImmutablePartitionHolder<>(new PartitionHolder<>(makeSingle("1", 1))), new PartitionHolder<>(makeSingle("1", 1)).asImmutable(),
timeline.findEntry(Intervals.of("2011-10-01/2011-10-02"), "1") timeline.findEntry(Intervals.of("2011-10-01/2011-10-02"), "1")
); );
Assert.assertEquals( Assert.assertEquals(
new ImmutablePartitionHolder<>(new PartitionHolder<>(makeSingle("1", 1))), new PartitionHolder<>(makeSingle("1", 1)).asImmutable(),
timeline.findEntry(Intervals.of("2011-10-01/2011-10-01T10"), "1") timeline.findEntry(Intervals.of("2011-10-01/2011-10-01T10"), "1")
); );
Assert.assertEquals( Assert.assertEquals(
new ImmutablePartitionHolder<>(new PartitionHolder<>(makeSingle("1", 1))), new PartitionHolder<>(makeSingle("1", 1)).asImmutable(),
timeline.findEntry(Intervals.of("2011-10-01T02/2011-10-02"), "1") timeline.findEntry(Intervals.of("2011-10-01T02/2011-10-02"), "1")
); );
Assert.assertEquals( Assert.assertEquals(
new ImmutablePartitionHolder<>(new PartitionHolder<>(makeSingle("1", 1))), new PartitionHolder<>(makeSingle("1", 1)).asImmutable(),
timeline.findEntry(Intervals.of("2011-10-01T04/2011-10-01T17"), "1") timeline.findEntry(Intervals.of("2011-10-01T04/2011-10-01T17"), "1")
); );

View File

@ -25,7 +25,6 @@ import com.google.common.collect.Lists;
import com.google.common.collect.Sets; import com.google.common.collect.Sets;
import org.apache.druid.java.util.common.DateTimes; import org.apache.druid.java.util.common.DateTimes;
import org.apache.druid.java.util.common.Intervals; import org.apache.druid.java.util.common.Intervals;
import org.apache.druid.timeline.partition.ImmutablePartitionHolder;
import org.apache.druid.timeline.partition.IntegerPartitionChunk; import org.apache.druid.timeline.partition.IntegerPartitionChunk;
import org.apache.druid.timeline.partition.OvershadowableInteger; import org.apache.druid.timeline.partition.OvershadowableInteger;
import org.apache.druid.timeline.partition.PartitionHolder; import org.apache.druid.timeline.partition.PartitionHolder;
@ -58,7 +57,7 @@ public class VersionedIntervalTimelineTest extends VersionedIntervalTimelineTest
add("2011-01-02/2011-01-05", "2", 1); add("2011-01-02/2011-01-05", "2", 1);
Assert.assertEquals( Assert.assertEquals(
new ImmutablePartitionHolder<>(new PartitionHolder<>(makeSingle("1", 1))), new PartitionHolder<>(makeSingle("1", 1)).asImmutable(),
timeline.findEntry(Intervals.of("2011-01-02T02/2011-01-04"), "1") timeline.findEntry(Intervals.of("2011-01-02T02/2011-01-04"), "1")
); );
} }
@ -1407,9 +1406,7 @@ public class VersionedIntervalTimelineTest extends VersionedIntervalTimelineTest
new PartitionHolder<>( new PartitionHolder<>(
ImmutableList.of( ImmutableList.of(
makeNumbered("1", 0, 0), makeNumbered("1", 0, 0),
makeNumbered("1", 1, 0), makeNumbered("1", 1, 0)
makeNumberedOverwriting("1", 0, 1, 0, 2, 1, 3),
makeNumberedOverwriting("1", 1, 1, 0, 2, 1, 3)
) )
) )
) )
@ -1500,4 +1497,79 @@ public class VersionedIntervalTimelineTest extends VersionedIntervalTimelineTest
Assert.assertEquals(2, Lists.newArrayList(overshadowableIntegers.iterator()).size()); Assert.assertEquals(2, Lists.newArrayList(overshadowableIntegers.iterator()).size());
} }
@Test
public void testFindNonOvershadowedObjectsInIntervalWithOnlyCompletePartitionsReturningValidResult()
{
// 2019-01-01/2019-01-02
add("2019-01-01/2019-01-02", "0", makeNumbered("0", 0, 0));
add("2019-01-01/2019-01-02", "0", makeNumbered("0", 1, 0));
add("2019-01-01/2019-01-02", "0", makeNumbered("0", 2, 0));
// 2019-01-02/2019-01-03
add("2019-01-02/2019-01-03", "0", makeNumbered("0", 0, 0));
add("2019-01-02/2019-01-03", "0", makeNumbered("0", 1, 0));
// Incomplete partitions
add("2019-01-03/2019-01-04", "0", makeNumbered("2", 0, 3, 0));
add("2019-01-03/2019-01-04", "0", makeNumbered("2", 1, 3, 0));
// Overwrite 2019-01-01/2019-01-02
add("2019-01-01/2019-01-02", "1", makeNumbered("1", 0, 0));
add("2019-01-01/2019-01-02", "1", makeNumbered("1", 1, 0));
// Overwrite 2019-01-01/2019-01-02
add("2019-01-01/2019-01-02", "1", makeNumberedOverwriting("1", 0, 1, 0, 2, 1, 3));
add("2019-01-01/2019-01-02", "1", makeNumberedOverwriting("1", 1, 1, 0, 2, 1, 3));
add("2019-01-01/2019-01-02", "1", makeNumberedOverwriting("1", 2, 1, 0, 2, 1, 3));
Assert.assertEquals(
ImmutableSet.of(
makeNumberedOverwriting("1", 0, 1, 0, 2, 1, 3).getObject(),
makeNumberedOverwriting("1", 1, 1, 0, 2, 1, 3).getObject(),
makeNumberedOverwriting("1", 2, 1, 0, 2, 1, 3).getObject(),
makeNumbered("0", 0, 0).getObject(),
makeNumbered("0", 1, 0).getObject()
),
timeline.findNonOvershadowedObjectsInInterval(Intervals.of("2019-01-01/2019-01-04"), Partitions.ONLY_COMPLETE)
);
}
@Test
public void testFindNonOvershadowedObjectsInIntervalWithIncompleteOkReturningValidResult()
{
// 2019-01-01/2019-01-02
add("2019-01-01/2019-01-02", "0", makeNumbered("0", 0, 0));
add("2019-01-01/2019-01-02", "0", makeNumbered("0", 1, 0));
add("2019-01-01/2019-01-02", "0", makeNumbered("0", 2, 0));
// 2019-01-02/2019-01-03
add("2019-01-02/2019-01-03", "0", makeNumbered("0", 0, 0));
add("2019-01-02/2019-01-03", "0", makeNumbered("0", 1, 0));
// Incomplete partitions
add("2019-01-03/2019-01-04", "0", makeNumbered("2", 0, 3, 0));
add("2019-01-03/2019-01-04", "0", makeNumbered("2", 1, 3, 0));
// Overwrite 2019-01-01/2019-01-02
add("2019-01-01/2019-01-02", "1", makeNumbered("1", 0, 0));
add("2019-01-01/2019-01-02", "1", makeNumbered("1", 1, 0));
// Overwrite 2019-01-01/2019-01-02
add("2019-01-01/2019-01-02", "1", makeNumberedOverwriting("1", 0, 1, 0, 2, 1, 3));
add("2019-01-01/2019-01-02", "1", makeNumberedOverwriting("1", 1, 1, 0, 2, 1, 3));
add("2019-01-01/2019-01-02", "1", makeNumberedOverwriting("1", 2, 1, 0, 2, 1, 3));
Assert.assertEquals(
ImmutableSet.of(
makeNumberedOverwriting("1", 0, 1, 0, 2, 1, 3).getObject(),
makeNumberedOverwriting("1", 1, 1, 0, 2, 1, 3).getObject(),
makeNumberedOverwriting("1", 2, 1, 0, 2, 1, 3).getObject(),
makeNumbered("0", 0, 0).getObject(),
makeNumbered("0", 1, 0).getObject(),
makeNumbered("2", 0, 3, 0).getObject(),
makeNumbered("2", 1, 3, 0).getObject()
),
timeline.findNonOvershadowedObjectsInInterval(Intervals.of("2019-01-01/2019-01-04"), Partitions.INCOMPLETE_OK)
);
}
} }

View File

@ -19,6 +19,7 @@
package org.apache.druid.timeline; package org.apache.druid.timeline;
import com.google.common.collect.FluentIterable;
import com.google.common.collect.Iterables; import com.google.common.collect.Iterables;
import com.google.common.collect.Lists; import com.google.common.collect.Lists;
import com.google.common.collect.Ordering; import com.google.common.collect.Ordering;
@ -107,7 +108,10 @@ public class VersionedIntervalTimelineTestBase
void checkRemove() void checkRemove()
{ {
for (TimelineObjectHolder<String, OvershadowableInteger> holder : timeline.findFullyOvershadowed()) { for (TimelineObjectHolder<String, OvershadowableInteger> holder : timeline.findFullyOvershadowed()) {
for (PartitionChunk<OvershadowableInteger> chunk : holder.getObject()) { // Copy chunks to avoid the ConcurrentModificationException.
// Note that timeline.remove() modifies the PartitionHolder.
List<PartitionChunk<OvershadowableInteger>> chunks = FluentIterable.from(holder.getObject()).toList();
for (PartitionChunk<OvershadowableInteger> chunk : chunks) {
timeline.remove(holder.getInterval(), holder.getVersion(), chunk); timeline.remove(holder.getInterval(), holder.getVersion(), chunk);
} }
} }
@ -154,26 +158,36 @@ public class VersionedIntervalTimelineTestBase
); );
} }
PartitionChunk<OvershadowableInteger> makeSingle(String majorVersion, int value) public static PartitionChunk<OvershadowableInteger> makeSingle(String majorVersion, int value)
{ {
return makeSingle(majorVersion, 0, value); return makeSingle(majorVersion, 0, value);
} }
private PartitionChunk<OvershadowableInteger> makeSingle(String majorVersion, int partitionNum, int val) public static PartitionChunk<OvershadowableInteger> makeSingle(String majorVersion, int partitionNum, int val)
{ {
return new SingleElementPartitionChunk<>(new OvershadowableInteger(majorVersion, partitionNum, val)); return new SingleElementPartitionChunk<>(new OvershadowableInteger(majorVersion, partitionNum, val));
} }
PartitionChunk<OvershadowableInteger> makeNumbered(String majorVersion, int partitionNum, int val) public static PartitionChunk<OvershadowableInteger> makeNumbered(String majorVersion, int partitionNum, int val)
{
return makeNumbered(majorVersion, partitionNum, 0, val);
}
public static PartitionChunk<OvershadowableInteger> makeNumbered(
String majorVersion,
int partitionNum,
int chunks,
int val
)
{ {
return new NumberedPartitionChunk<>( return new NumberedPartitionChunk<>(
partitionNum, partitionNum,
0, chunks,
new OvershadowableInteger(majorVersion, partitionNum, val) new OvershadowableInteger(majorVersion, partitionNum, val)
); );
} }
PartitionChunk<OvershadowableInteger> makeNumberedOverwriting( public static PartitionChunk<OvershadowableInteger> makeNumberedOverwriting(
String majorVersion, String majorVersion,
int partitionNumOrdinal, int partitionNumOrdinal,
int val, int val,

View File

@ -0,0 +1,67 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.druid.timeline.partition;
import nl.jqno.equalsverifier.EqualsVerifier;
import org.apache.druid.timeline.VersionedIntervalTimelineTestBase;
import org.junit.Assert;
import org.junit.Test;
import java.util.stream.IntStream;
public class AtomicUpdateGroupTest
{
@Test
public void testCopy()
{
AtomicUpdateGroup<OvershadowableInteger> original = new AtomicUpdateGroup<>(
VersionedIntervalTimelineTestBase.makeNumberedOverwriting(
"0",
0,
0,
0,
10,
0,
10
)
);
IntStream.range(1, 10).forEach(
i -> original.add(
VersionedIntervalTimelineTestBase.makeNumberedOverwriting(
"0",
i,
0,
0,
10,
0,
10
)
)
);
Assert.assertEquals(AtomicUpdateGroup.copy(original), original);
}
@Test
public void testEqualAndHashCodeContract()
{
EqualsVerifier.forClass(AtomicUpdateGroup.class).usingGetClass().verify();
}
}

View File

@ -20,7 +20,9 @@
package org.apache.druid.timeline.partition; package org.apache.druid.timeline.partition;
import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableList;
import com.google.common.collect.Lists;
import com.google.common.collect.Sets; import com.google.common.collect.Sets;
import nl.jqno.equalsverifier.EqualsVerifier;
import org.apache.druid.timeline.partition.OvershadowableManager.RootPartitionRange; import org.apache.druid.timeline.partition.OvershadowableManager.RootPartitionRange;
import org.apache.druid.timeline.partition.OvershadowableManager.State; import org.apache.druid.timeline.partition.OvershadowableManager.State;
import org.junit.Assert; import org.junit.Assert;
@ -63,6 +65,63 @@ public class OvershadowableManagerTest
expectedStandbyChunks = new ArrayList<>(); expectedStandbyChunks = new ArrayList<>();
} }
@Test
public void testCopyVisible()
{
// chunks of partition id 0 and 1
manager.addChunk(newRootChunk());
manager.addChunk(newRootChunk());
// chunks to overshadow the partition id range [0, 2)
manager.addChunk(newNonRootChunk(0, 2, 1, 3));
manager.addChunk(newNonRootChunk(0, 2, 1, 3));
manager.addChunk(newNonRootChunk(0, 2, 1, 3));
// chunks of partition id 3 and 4
manager.addChunk(newRootChunk());
manager.addChunk(newRootChunk());
// standby chunk
manager.addChunk(newNonRootChunk(2, 4, 1, 3));
OvershadowableManager<OvershadowableInteger> copy = OvershadowableManager.copyVisible(manager);
Assert.assertTrue(copy.getOvershadowedChunks().isEmpty());
Assert.assertTrue(copy.getStandbyChunks().isEmpty());
Assert.assertEquals(
Lists.newArrayList(manager.visibleChunksIterator()),
Lists.newArrayList(copy.visibleChunksIterator())
);
}
@Test
public void testDeepCopy()
{
// chunks of partition id 0 and 1
manager.addChunk(newRootChunk());
manager.addChunk(newRootChunk());
// chunks to overshadow the partition id range [0, 2)
manager.addChunk(newNonRootChunk(0, 2, 1, 3));
manager.addChunk(newNonRootChunk(0, 2, 1, 3));
manager.addChunk(newNonRootChunk(0, 2, 1, 3));
// chunks of partition id 3 and 4
manager.addChunk(newRootChunk());
manager.addChunk(newRootChunk());
// standby chunk
manager.addChunk(newNonRootChunk(2, 4, 1, 3));
OvershadowableManager<OvershadowableInteger> copy = OvershadowableManager.deepCopy(manager);
Assert.assertEquals(manager, copy);
}
@Test
public void testEqualAndHashCodeContract()
{
EqualsVerifier.forClass(OvershadowableManager.class).usingGetClass().verify();
}
@Test @Test
public void testFindOvershadowedBy() public void testFindOvershadowedBy()
{ {
@ -981,7 +1040,7 @@ public class OvershadowableManagerTest
Assert.assertEquals( Assert.assertEquals(
"Mismatched visible chunks", "Mismatched visible chunks",
new HashSet<>(expectedVisibleChunks), new HashSet<>(expectedVisibleChunks),
Sets.newHashSet(manager.createVisibleChunksStream().iterator()) Sets.newHashSet(manager.visibleChunksIterator())
); );
Assert.assertEquals( Assert.assertEquals(
"Mismatched overshadowed chunks", "Mismatched overshadowed chunks",

View File

@ -23,6 +23,7 @@ import com.fasterxml.jackson.annotation.JacksonInject;
import com.fasterxml.jackson.annotation.JsonCreator; import com.fasterxml.jackson.annotation.JsonCreator;
import com.fasterxml.jackson.annotation.JsonProperty; import com.fasterxml.jackson.annotation.JsonProperty;
import com.google.common.base.Optional; import com.google.common.base.Optional;
import com.google.common.collect.FluentIterable;
import org.apache.commons.io.FileUtils; import org.apache.commons.io.FileUtils;
import org.apache.druid.client.indexing.IndexingServiceClient; import org.apache.druid.client.indexing.IndexingServiceClient;
import org.apache.druid.data.input.InputRow; import org.apache.druid.data.input.InputRow;
@ -65,6 +66,7 @@ import org.apache.druid.segment.realtime.appenderator.BatchAppenderatorDriver;
import org.apache.druid.segment.realtime.appenderator.SegmentAllocator; import org.apache.druid.segment.realtime.appenderator.SegmentAllocator;
import org.apache.druid.segment.realtime.appenderator.SegmentsAndCommitMetadata; import org.apache.druid.segment.realtime.appenderator.SegmentsAndCommitMetadata;
import org.apache.druid.timeline.DataSegment; import org.apache.druid.timeline.DataSegment;
import org.apache.druid.timeline.TimelineObjectHolder;
import org.apache.druid.timeline.VersionedIntervalTimeline; import org.apache.druid.timeline.VersionedIntervalTimeline;
import org.apache.druid.timeline.partition.PartitionChunk; import org.apache.druid.timeline.partition.PartitionChunk;
import org.joda.time.Interval; import org.joda.time.Interval;
@ -223,11 +225,10 @@ public class SinglePhaseSubTask extends AbstractBatchIndexTask
final Set<DataSegment> allSegments = new HashSet<>(getTaskLockHelper().getLockedExistingSegments()); final Set<DataSegment> allSegments = new HashSet<>(getTaskLockHelper().getLockedExistingSegments());
allSegments.addAll(pushedSegments); allSegments.addAll(pushedSegments);
final VersionedIntervalTimeline<String, DataSegment> timeline = VersionedIntervalTimeline.forSegments(allSegments); final VersionedIntervalTimeline<String, DataSegment> timeline = VersionedIntervalTimeline.forSegments(allSegments);
final Set<DataSegment> oldSegments = timeline.findFullyOvershadowed() final Set<DataSegment> oldSegments = FluentIterable.from(timeline.findFullyOvershadowed())
.stream() .transformAndConcat(TimelineObjectHolder::getObject)
.flatMap(holder -> holder.getObject().stream()) .transform(PartitionChunk::getObject)
.map(PartitionChunk::getObject) .toSet();
.collect(Collectors.toSet());
taskClient.report(supervisorTaskId, new PushedSegmentsReport(getId(), oldSegments, pushedSegments)); taskClient.report(supervisorTaskId, new PushedSegmentsReport(getId(), oldSegments, pushedSegments));
return TaskStatus.success(getId()); return TaskStatus.success(getId());

View File

@ -25,10 +25,10 @@ import com.fasterxml.jackson.annotation.JsonInclude;
import com.fasterxml.jackson.annotation.JsonInclude.Include; import com.fasterxml.jackson.annotation.JsonInclude.Include;
import com.fasterxml.jackson.annotation.JsonProperty; import com.fasterxml.jackson.annotation.JsonProperty;
import com.google.common.base.Preconditions; import com.google.common.base.Preconditions;
import com.google.common.collect.FluentIterable;
import com.google.common.collect.Iterators; import com.google.common.collect.Iterators;
import org.apache.druid.client.coordinator.CoordinatorClient; import org.apache.druid.client.coordinator.CoordinatorClient;
import org.apache.druid.data.input.AbstractInputSource; import org.apache.druid.data.input.AbstractInputSource;
import org.apache.druid.data.input.InputEntity;
import org.apache.druid.data.input.InputFileAttribute; import org.apache.druid.data.input.InputFileAttribute;
import org.apache.druid.data.input.InputFormat; import org.apache.druid.data.input.InputFormat;
import org.apache.druid.data.input.InputRowSchema; import org.apache.druid.data.input.InputRowSchema;
@ -171,15 +171,16 @@ public class DruidInputSource extends AbstractInputSource implements SplittableI
final SegmentLoader segmentLoader = segmentLoaderFactory.manufacturate(temporaryDirectory); final SegmentLoader segmentLoader = segmentLoaderFactory.manufacturate(temporaryDirectory);
final List<TimelineObjectHolder<String, DataSegment>> timeline = createTimeline(); final List<TimelineObjectHolder<String, DataSegment>> timeline = createTimeline();
final Stream<InputEntity> entityStream = timeline final Iterator<DruidSegmentInputEntity> entityIterator = FluentIterable
.stream() .from(timeline)
.flatMap(holder -> { .transformAndConcat(holder -> {
//noinspection ConstantConditions
final PartitionHolder<DataSegment> partitionHolder = holder.getObject(); final PartitionHolder<DataSegment> partitionHolder = holder.getObject();
return partitionHolder //noinspection ConstantConditions
.stream() return FluentIterable
.map(chunk -> new DruidSegmentInputEntity(segmentLoader, chunk.getObject(), holder.getInterval())); .from(partitionHolder)
}); .transform(chunk -> new DruidSegmentInputEntity(segmentLoader, chunk.getObject(), holder.getInterval()));
}).iterator();
final List<String> effectiveDimensions; final List<String> effectiveDimensions;
if (dimensions == null) { if (dimensions == null) {
effectiveDimensions = ReingestionTimelineUtils.getUniqueDimensions( effectiveDimensions = ReingestionTimelineUtils.getUniqueDimensions(
@ -209,7 +210,7 @@ public class DruidInputSource extends AbstractInputSource implements SplittableI
return new InputEntityIteratingReader( return new InputEntityIteratingReader(
inputRowSchema, inputRowSchema,
inputFormat, inputFormat,
entityStream.iterator(), entityIterator,
temporaryDirectory temporaryDirectory
); );
} }

View File

@ -168,8 +168,9 @@ public class SinglePhaseParallelIndexingTest extends AbstractParallelIndexSuperv
runTestTask(inputInterval, Granularities.DAY); runTestTask(inputInterval, Granularities.DAY);
final Interval interval = inputInterval == null ? Intervals.ETERNITY : inputInterval; final Interval interval = inputInterval == null ? Intervals.ETERNITY : inputInterval;
final Collection<DataSegment> allSegments = final Collection<DataSegment> allSegments = new HashSet<>(
getStorageCoordinator().retrieveUsedSegmentsForInterval("dataSource", interval, Segments.ONLY_VISIBLE); getStorageCoordinator().retrieveUsedSegmentsForInterval("dataSource", interval, Segments.ONLY_VISIBLE)
);
// Reingest the same data. Each segment should get replaced by a segment with a newer version. // Reingest the same data. Each segment should get replaced by a segment with a newer version.
runTestTask(inputInterval, secondSegmentGranularity); runTestTask(inputInterval, secondSegmentGranularity);

View File

@ -19,6 +19,7 @@
package org.apache.druid.tests.indexer; package org.apache.druid.tests.indexer;
import com.google.common.collect.FluentIterable;
import com.google.inject.Inject; import com.google.inject.Inject;
import org.apache.commons.io.IOUtils; import org.apache.commons.io.IOUtils;
import org.apache.druid.indexing.common.task.batch.parallel.PartialDimensionDistributionTask; import org.apache.druid.indexing.common.task.batch.parallel.PartialDimensionDistributionTask;
@ -250,11 +251,13 @@ public abstract class AbstractITBatchIndexTest extends AbstractIndexerTest
); );
final List<TimelineObjectHolder<String, DataSegment>> holders = timeline.lookup(Intervals.ETERNITY); final List<TimelineObjectHolder<String, DataSegment>> holders = timeline.lookup(Intervals.ETERNITY);
return holders return FluentIterable
.stream() .from(holders)
.flatMap(holder -> holder.getObject().stream()) .transformAndConcat(TimelineObjectHolder::getObject)
.anyMatch(chunk -> oldVersions.stream() .anyMatch(
.anyMatch(oldSegment -> chunk.getObject().overshadows(oldSegment))); chunk -> FluentIterable.from(oldVersions)
.anyMatch(oldSegment -> chunk.getObject().overshadows(oldSegment))
);
}, },
"See a new version" "See a new version"
); );

View File

@ -169,5 +169,4 @@ public class DataSourcesSnapshot
} }
return overshadowedSegments; return overshadowedSegments;
} }
} }

View File

@ -22,6 +22,7 @@ package org.apache.druid.metadata;
import com.fasterxml.jackson.core.JsonProcessingException; import com.fasterxml.jackson.core.JsonProcessingException;
import com.fasterxml.jackson.databind.ObjectMapper; import com.fasterxml.jackson.databind.ObjectMapper;
import com.google.common.base.Preconditions; import com.google.common.base.Preconditions;
import com.google.common.collect.FluentIterable;
import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableSet; import com.google.common.collect.ImmutableSet;
import com.google.common.collect.Iterables; import com.google.common.collect.Iterables;
@ -71,13 +72,11 @@ import java.sql.ResultSet;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.Collection; import java.util.Collection;
import java.util.Collections; import java.util.Collections;
import java.util.Comparator;
import java.util.HashSet; import java.util.HashSet;
import java.util.List; import java.util.List;
import java.util.Map; import java.util.Map;
import java.util.Set; import java.util.Set;
import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicBoolean;
import java.util.stream.StreamSupport;
/** /**
*/ */
@ -811,9 +810,10 @@ public class IndexerSQLMetadataStorageCoordinator implements IndexerMetadataStor
return null; return null;
} else { } else {
if (existingChunks //noinspection ConstantConditions
.stream() if (FluentIterable
.flatMap(holder -> StreamSupport.stream(holder.getObject().spliterator(), false)) .from(existingChunks)
.transformAndConcat(TimelineObjectHolder::getObject)
.anyMatch(chunk -> !chunk.getObject().getShardSpec().isCompatible(partialShardSpec.getShardSpecClass()))) { .anyMatch(chunk -> !chunk.getObject().getShardSpec().isCompatible(partialShardSpec.getShardSpecClass()))) {
// All existing segments should have a compatible shardSpec with partialShardSpec. // All existing segments should have a compatible shardSpec with partialShardSpec.
return null; return null;
@ -825,15 +825,19 @@ public class IndexerSQLMetadataStorageCoordinator implements IndexerMetadataStor
if (!existingChunks.isEmpty()) { if (!existingChunks.isEmpty()) {
TimelineObjectHolder<String, DataSegment> existingHolder = Iterables.getOnlyElement(existingChunks); TimelineObjectHolder<String, DataSegment> existingHolder = Iterables.getOnlyElement(existingChunks);
maxId = StreamSupport //noinspection ConstantConditions
.stream(existingHolder.getObject().spliterator(), false) for (DataSegment segment : FluentIterable
.from(existingHolder.getObject())
.transform(PartitionChunk::getObject)
// Here we check only the segments of the same shardSpec to find out the max partitionId. // Here we check only the segments of the same shardSpec to find out the max partitionId.
// Note that OverwriteShardSpec has the higher range for partitionId than others. // Note that OverwriteShardSpec has the higher range for partitionId than others.
// See PartitionIds. // See PartitionIds.
.filter(chunk -> chunk.getObject().getShardSpec().getClass() == partialShardSpec.getShardSpecClass()) .filter(segment -> segment.getShardSpec().getClass() == partialShardSpec.getShardSpecClass())) {
.max(Comparator.comparing(chunk -> chunk.getObject().getShardSpec().getPartitionNum())) // Don't use the stream API for performance.
.map(chunk -> SegmentIdWithShardSpec.fromDataSegment(chunk.getObject())) if (maxId == null || maxId.getShardSpec().getPartitionNum() < segment.getShardSpec().getPartitionNum()) {
.orElse(null); maxId = SegmentIdWithShardSpec.fromDataSegment(segment);
}
}
} }
final List<SegmentIdWithShardSpec> pendings = getPendingSegmentsForIntervalWithHandle( final List<SegmentIdWithShardSpec> pendings = getPendingSegmentsForIntervalWithHandle(

View File

@ -22,10 +22,12 @@ package org.apache.druid.server.coordinator.duty;
import com.fasterxml.jackson.databind.ObjectMapper; import com.fasterxml.jackson.databind.ObjectMapper;
import com.google.common.annotations.VisibleForTesting; import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions; import com.google.common.base.Preconditions;
import com.google.common.collect.FluentIterable;
import com.google.common.collect.Maps; import com.google.common.collect.Maps;
import it.unimi.dsi.fastutil.objects.Object2LongOpenHashMap; import it.unimi.dsi.fastutil.objects.Object2LongOpenHashMap;
import org.apache.druid.indexer.partitions.DynamicPartitionsSpec; import org.apache.druid.indexer.partitions.DynamicPartitionsSpec;
import org.apache.druid.indexer.partitions.PartitionsSpec; import org.apache.druid.indexer.partitions.PartitionsSpec;
import org.apache.druid.java.util.common.DateTimes;
import org.apache.druid.java.util.common.ISE; import org.apache.druid.java.util.common.ISE;
import org.apache.druid.java.util.common.JodaUtils; import org.apache.druid.java.util.common.JodaUtils;
import org.apache.druid.java.util.common.guava.Comparators; import org.apache.druid.java.util.common.guava.Comparators;
@ -54,7 +56,6 @@ import java.util.NoSuchElementException;
import java.util.Objects; import java.util.Objects;
import java.util.PriorityQueue; import java.util.PriorityQueue;
import java.util.stream.Collectors; import java.util.stream.Collectors;
import java.util.stream.StreamSupport;
/** /**
* This class iterates all segments of the dataSources configured for compaction from the newest to the oldest. * This class iterates all segments of the dataSources configured for compaction from the newest to the oldest.
@ -119,13 +120,15 @@ public class NewestSegmentFirstIterator implements CompactionSegmentIterator
final List<TimelineObjectHolder<String, DataSegment>> holders = timeline.lookup(interval); final List<TimelineObjectHolder<String, DataSegment>> holders = timeline.lookup(interval);
resultMap.put( long size = 0;
entry.getDataSource(), for (DataSegment segment : FluentIterable
holders.stream() .from(holders)
.flatMap(holder -> StreamSupport.stream(holder.getObject().spliterator(), false)) .transformAndConcat(TimelineObjectHolder::getObject)
.mapToLong(chunk -> chunk.getObject().getSize()) .transform(PartitionChunk::getObject)) {
.sum() size += segment.getSize();
); }
resultMap.put(entry.getDataSource(), size);
} }
return resultMap; return resultMap;
} }
@ -237,11 +240,9 @@ public class NewestSegmentFirstIterator implements CompactionSegmentIterator
if (holders.isEmpty()) { if (holders.isEmpty()) {
throw new NoSuchElementException(); throw new NoSuchElementException();
} }
return holders.remove(holders.size() - 1) return FluentIterable.from(holders.remove(holders.size() - 1).getObject())
.getObject() .transform(PartitionChunk::getObject)
.stream() .toList();
.map(PartitionChunk::getObject)
.collect(Collectors.toList());
} }
} }
@ -509,11 +510,16 @@ public class NewestSegmentFirstIterator implements CompactionSegmentIterator
private QueueEntry(List<DataSegment> segments) private QueueEntry(List<DataSegment> segments)
{ {
Preconditions.checkArgument(segments != null && !segments.isEmpty()); Preconditions.checkArgument(segments != null && !segments.isEmpty());
Collections.sort(segments); DateTime minStart = DateTimes.MAX, maxEnd = DateTimes.MIN;
this.interval = new Interval( for (DataSegment segment : segments) {
segments.get(0).getInterval().getStart(), if (segment.getInterval().getStart().compareTo(minStart) < 0) {
segments.get(segments.size() - 1).getInterval().getEnd() minStart = segment.getInterval().getStart();
); }
if (segment.getInterval().getEnd().compareTo(maxEnd) > 0) {
maxEnd = segment.getInterval().getEnd();
}
}
this.interval = new Interval(minStart, maxEnd);
this.segments = segments; this.segments = segments;
} }

View File

@ -30,6 +30,7 @@ import org.apache.druid.client.indexing.NoopIndexingServiceClient;
import org.apache.druid.indexer.TaskStatusPlus; import org.apache.druid.indexer.TaskStatusPlus;
import org.apache.druid.indexer.partitions.DynamicPartitionsSpec; import org.apache.druid.indexer.partitions.DynamicPartitionsSpec;
import org.apache.druid.jackson.DefaultObjectMapper; import org.apache.druid.jackson.DefaultObjectMapper;
import org.apache.druid.java.util.common.DateTimes;
import org.apache.druid.java.util.common.Intervals; import org.apache.druid.java.util.common.Intervals;
import org.apache.druid.java.util.common.StringUtils; import org.apache.druid.java.util.common.StringUtils;
import org.apache.druid.server.coordinator.CoordinatorCompactionConfig; import org.apache.druid.server.coordinator.CoordinatorCompactionConfig;
@ -44,6 +45,7 @@ import org.apache.druid.timeline.VersionedIntervalTimeline;
import org.apache.druid.timeline.partition.NumberedShardSpec; import org.apache.druid.timeline.partition.NumberedShardSpec;
import org.apache.druid.timeline.partition.PartitionChunk; import org.apache.druid.timeline.partition.PartitionChunk;
import org.apache.druid.timeline.partition.ShardSpec; import org.apache.druid.timeline.partition.ShardSpec;
import org.joda.time.DateTime;
import org.joda.time.Interval; import org.joda.time.Interval;
import org.joda.time.Period; import org.joda.time.Period;
import org.junit.Assert; import org.junit.Assert;
@ -74,11 +76,16 @@ public class CompactSegmentsTest
) )
{ {
Preconditions.checkArgument(segments.size() > 1); Preconditions.checkArgument(segments.size() > 1);
Collections.sort(segments); DateTime minStart = DateTimes.MAX, maxEnd = DateTimes.MIN;
Interval compactInterval = new Interval( for (DataSegment segment : segments) {
segments.get(0).getInterval().getStart(), if (segment.getInterval().getStart().compareTo(minStart) < 0) {
segments.get(segments.size() - 1).getInterval().getEnd() minStart = segment.getInterval().getStart();
); }
if (segment.getInterval().getEnd().compareTo(maxEnd) > 0) {
maxEnd = segment.getInterval().getEnd();
}
}
Interval compactInterval = new Interval(minStart, maxEnd);
final VersionedIntervalTimeline<String, DataSegment> timeline = dataSources.get(segments.get(0).getDataSource()); final VersionedIntervalTimeline<String, DataSegment> timeline = dataSources.get(segments.get(0).getDataSource());
segments.forEach( segments.forEach(
segment -> timeline.remove( segment -> timeline.remove(