Merge pull request #2301 from metamx/fix-2299_2

fix reference counting for segments
This commit is contained in:
Fangjin Yang 2016-01-20 11:21:35 -08:00
commit 0c5f4b947c
5 changed files with 77 additions and 48 deletions

View File

@ -22,6 +22,7 @@ package io.druid.query;
import com.google.common.base.Function; import com.google.common.base.Function;
import com.google.common.base.Preconditions; import com.google.common.base.Preconditions;
import com.google.common.base.Predicates; import com.google.common.base.Predicates;
import com.metamx.common.guava.ResourceClosingSequence;
import com.metamx.common.guava.Sequence; import com.metamx.common.guava.Sequence;
import com.metamx.common.guava.Sequences; import com.metamx.common.guava.Sequences;
import com.metamx.common.logger.Logger; import com.metamx.common.logger.Logger;
@ -33,7 +34,9 @@ import io.druid.segment.Cursor;
import io.druid.segment.StorageAdapter; import io.druid.segment.StorageAdapter;
import org.joda.time.Interval; import org.joda.time.Interval;
import java.io.Closeable;
import java.util.List; import java.util.List;
import java.util.Map;
/** /**
*/ */
@ -81,4 +84,15 @@ public class QueryRunnerHelper
Predicates.<Result<T>>notNull() Predicates.<Result<T>>notNull()
); );
} }
public static <T> QueryRunner<T> makeClosingQueryRunner(final QueryRunner<T> runner, final Closeable closeable){
return new QueryRunner<T>()
{
@Override
public Sequence<T> run(Query<T> query, Map<String, Object> responseContext)
{
return new ResourceClosingSequence<>(runner.run(query, responseContext), closeable);
}
};
}
} }

View File

@ -33,20 +33,24 @@ public class ReferenceCountingSegmentQueryRunner<T> implements QueryRunner<T>
{ {
private final QueryRunnerFactory<T, Query<T>> factory; private final QueryRunnerFactory<T, Query<T>> factory;
private final ReferenceCountingSegment adapter; private final ReferenceCountingSegment adapter;
private final SegmentDescriptor descriptor;
public ReferenceCountingSegmentQueryRunner( public ReferenceCountingSegmentQueryRunner(
QueryRunnerFactory<T, Query<T>> factory, QueryRunnerFactory<T, Query<T>> factory,
ReferenceCountingSegment adapter ReferenceCountingSegment adapter,
SegmentDescriptor descriptor
) )
{ {
this.factory = factory; this.factory = factory;
this.adapter = adapter; this.adapter = adapter;
this.descriptor = descriptor;
} }
@Override @Override
public Sequence<T> run(final Query<T> query, Map<String, Object> responseContext) public Sequence<T> run(final Query<T> query, Map<String, Object> responseContext)
{ {
final Closeable closeable = adapter.increment(); final Closeable closeable = adapter.increment();
if (closeable != null) {
try { try {
final Sequence<T> baseSequence = factory.createRunner(adapter).run(query, responseContext); final Sequence<T> baseSequence = factory.createRunner(adapter).run(query, responseContext);
@ -56,5 +60,9 @@ public class ReferenceCountingSegmentQueryRunner<T> implements QueryRunner<T>
CloseQuietly.close(closeable); CloseQuietly.close(closeable);
throw e; throw e;
} }
} else {
// Segment was closed before we had a chance to increment the reference count
return new ReportTimelineMissingSegmentQueryRunner<T>(descriptor).run(query, responseContext);
}
} }
} }

View File

@ -20,11 +20,13 @@
package io.druid.segment.realtime; package io.druid.segment.realtime;
import com.google.common.base.Throwables; import com.google.common.base.Throwables;
import com.metamx.common.Pair;
import io.druid.segment.IncrementalIndexSegment; import io.druid.segment.IncrementalIndexSegment;
import io.druid.segment.ReferenceCountingSegment; import io.druid.segment.ReferenceCountingSegment;
import io.druid.segment.Segment; import io.druid.segment.Segment;
import io.druid.segment.incremental.IncrementalIndex; import io.druid.segment.incremental.IncrementalIndex;
import java.io.Closeable;
import java.io.IOException; import java.io.IOException;
/** /**
@ -34,6 +36,7 @@ public class FireHydrant
private final int count; private final int count;
private volatile IncrementalIndex index; private volatile IncrementalIndex index;
private volatile ReferenceCountingSegment adapter; private volatile ReferenceCountingSegment adapter;
private Object swapLock = new Object();
public FireHydrant( public FireHydrant(
IncrementalIndex index, IncrementalIndex index,
@ -61,7 +64,7 @@ public class FireHydrant
return index; return index;
} }
public ReferenceCountingSegment getSegment() public Segment getSegment()
{ {
return adapter; return adapter;
} }
@ -78,6 +81,7 @@ public class FireHydrant
public void swapSegment(Segment adapter) public void swapSegment(Segment adapter)
{ {
synchronized (swapLock) {
if (this.adapter != null) { if (this.adapter != null) {
try { try {
this.adapter.close(); this.adapter.close();
@ -89,6 +93,16 @@ public class FireHydrant
this.adapter = new ReferenceCountingSegment(adapter); this.adapter = new ReferenceCountingSegment(adapter);
this.index = null; this.index = null;
} }
}
public Pair<Segment, Closeable> getAndIncrementSegment()
{
// Prevent swapping of index before increment is called
synchronized (swapLock) {
Closeable closeable = adapter.increment();
return new Pair<Segment, Closeable>(adapter, closeable);
}
}
@Override @Override
public String toString() public String toString()

View File

@ -36,6 +36,7 @@ import com.metamx.common.Granularity;
import com.metamx.common.ISE; import com.metamx.common.ISE;
import com.metamx.common.Pair; import com.metamx.common.Pair;
import com.metamx.common.concurrent.ScheduledExecutors; import com.metamx.common.concurrent.ScheduledExecutors;
import com.metamx.common.guava.CloseQuietly;
import com.metamx.common.guava.FunctionalIterable; import com.metamx.common.guava.FunctionalIterable;
import com.metamx.emitter.EmittingLogger; import com.metamx.emitter.EmittingLogger;
import com.metamx.emitter.service.ServiceEmitter; import com.metamx.emitter.service.ServiceEmitter;
@ -55,6 +56,7 @@ import io.druid.query.Query;
import io.druid.query.QueryRunner; import io.druid.query.QueryRunner;
import io.druid.query.QueryRunnerFactory; import io.druid.query.QueryRunnerFactory;
import io.druid.query.QueryRunnerFactoryConglomerate; import io.druid.query.QueryRunnerFactoryConglomerate;
import io.druid.query.QueryRunnerHelper;
import io.druid.query.QueryToolChest; import io.druid.query.QueryToolChest;
import io.druid.query.ReportTimelineMissingSegmentQueryRunner; import io.druid.query.ReportTimelineMissingSegmentQueryRunner;
import io.druid.query.SegmentDescriptor; import io.druid.query.SegmentDescriptor;
@ -327,47 +329,38 @@ public class RealtimePlumber implements Plumber
@Override @Override
public QueryRunner<T> apply(FireHydrant input) public QueryRunner<T> apply(FireHydrant input)
{ {
// It is possible that we got a query for a segment, and while that query
// is in the jetty queue, the segment is abandoned. Here, we need to retry
// the query for the segment.
if (input == null || input.getSegment() == null) {
return new ReportTimelineMissingSegmentQueryRunner<T>(descriptor);
}
if (skipIncrementalSegment && !input.hasSwapped()) { if (skipIncrementalSegment && !input.hasSwapped()) {
return new NoopQueryRunner<T>(); return new NoopQueryRunner<T>();
} }
// Prevent the underlying segment from closing when its being iterated // Prevent the underlying segment from swapping when its being iterated
final ReferenceCountingSegment segment = input.getSegment(); final Pair<Segment, Closeable> segment = input.getAndIncrementSegment();
final Closeable closeable = segment.increment();
try { try {
QueryRunner<T> baseRunner = QueryRunnerHelper.makeClosingQueryRunner(
factory.createRunner(segment.lhs),
segment.rhs
);
if (input.hasSwapped() // only use caching if data is immutable if (input.hasSwapped() // only use caching if data is immutable
&& cache.isLocal() // hydrants may not be in sync between replicas, make sure cache is local && cache.isLocal() // hydrants may not be in sync between replicas, make sure cache is local
) { ) {
return new CachingQueryRunner<>( return new CachingQueryRunner<>(
makeHydrantIdentifier(input, segment), makeHydrantIdentifier(input, segment.lhs),
descriptor, descriptor,
objectMapper, objectMapper,
cache, cache,
toolchest, toolchest,
factory.createRunner(segment), baseRunner,
MoreExecutors.sameThreadExecutor(), MoreExecutors.sameThreadExecutor(),
cacheConfig cacheConfig
); );
} else { } else {
return factory.createRunner(input.getSegment()); return baseRunner;
} }
} }
finally { catch (RuntimeException e) {
try { CloseQuietly.close(segment.rhs);
if (closeable != null) { throw e;
closeable.close();
}
}
catch (IOException e) {
throw Throwables.propagate(e);
}
} }
} }
} }
@ -385,7 +378,7 @@ public class RealtimePlumber implements Plumber
); );
} }
protected static String makeHydrantIdentifier(FireHydrant input, ReferenceCountingSegment segment) protected static String makeHydrantIdentifier(FireHydrant input, Segment segment)
{ {
return segment.getIdentifier() + "_" + input.getCount(); return segment.getIdentifier() + "_" + input.getCount();
} }

View File

@ -442,7 +442,7 @@ public class ServerManager implements QuerySegmentWalker
return toolChest.makeMetricBuilder(input); return toolChest.makeMetricBuilder(input);
} }
}, },
new ReferenceCountingSegmentQueryRunner<T>(factory, adapter), new ReferenceCountingSegmentQueryRunner<T>(factory, adapter, segmentDescriptor),
"query/segment/time", "query/segment/time",
ImmutableMap.of("segment", adapter.getIdentifier()) ImmutableMap.of("segment", adapter.getIdentifier())
), ),