1) Cleanup TODO comments

This commit is contained in:
cheddar 2013-04-23 16:25:29 -05:00
parent 545b489093
commit 7370b0f2fc
7 changed files with 19 additions and 17 deletions

View File

@ -98,7 +98,7 @@ public class RealtimeIndexTask extends AbstractTask
@JsonProperty("id") String id, @JsonProperty("id") String id,
@JsonProperty("schema") Schema schema, @JsonProperty("schema") Schema schema,
@JsonProperty("firehose") FirehoseFactory firehoseFactory, @JsonProperty("firehose") FirehoseFactory firehoseFactory,
@JsonProperty("fireDepartmentConfig") FireDepartmentConfig fireDepartmentConfig, // TODO rename? @JsonProperty("fireDepartmentConfig") FireDepartmentConfig fireDepartmentConfig,
@JsonProperty("windowPeriod") Period windowPeriod, @JsonProperty("windowPeriod") Period windowPeriod,
@JsonProperty("segmentGranularity") IndexGranularity segmentGranularity, @JsonProperty("segmentGranularity") IndexGranularity segmentGranularity,
@JsonProperty("minTime") DateTime minTime @JsonProperty("minTime") DateTime minTime
@ -178,8 +178,8 @@ public class RealtimeIndexTask extends AbstractTask
firehose = new GracefulShutdownFirehose(wrappedFirehose, segmentGranularity, windowPeriod); firehose = new GracefulShutdownFirehose(wrappedFirehose, segmentGranularity, windowPeriod);
} }
// TODO -- Take PlumberSchool in constructor (although that will need jackson injectables for stuff like // It would be nice to get the PlumberSchool in the constructor. Although that will need jackson injectables for
// TODO -- the ServerView, which seems kind of odd?) // stuff like the ServerView, which seems kind of odd? Perhaps revisit this when Guice has been introduced.
final RealtimePlumberSchool realtimePlumberSchool = new RealtimePlumberSchool( final RealtimePlumberSchool realtimePlumberSchool = new RealtimePlumberSchool(
windowPeriod, windowPeriod,
new File(toolbox.getTaskWorkDir(), "persist"), new File(toolbox.getTaskWorkDir(), "persist"),
@ -188,10 +188,9 @@ public class RealtimeIndexTask extends AbstractTask
final SegmentPublisher segmentPublisher = new TaskActionSegmentPublisher(this, toolbox); final SegmentPublisher segmentPublisher = new TaskActionSegmentPublisher(this, toolbox);
// TODO -- We're adding stuff to talk to the coordinator in various places in the plumber, and may // NOTE: We talk to the coordinator in various places in the plumber and we could be more robust to issues
// TODO -- want to be more robust to coordinator downtime (currently we'll block/throw in whatever // with the coordinator. Right now, we'll block/throw in whatever thread triggered the coordinator behavior,
// TODO -- thread triggered the coordinator behavior, which will typically be either the main // which will typically be either the main data processing loop or the persist thread.
// TODO -- data processing loop or the persist thread)
// Wrap default SegmentAnnouncer such that we unlock intervals as we unannounce segments // Wrap default SegmentAnnouncer such that we unlock intervals as we unannounce segments
final SegmentAnnouncer lockingSegmentAnnouncer = new SegmentAnnouncer() final SegmentAnnouncer lockingSegmentAnnouncer = new SegmentAnnouncer()

View File

@ -56,7 +56,7 @@ public interface TaskStorage
* Returns task as stored in the storage facility. If the task ID does not exist, this will return an * Returns task as stored in the storage facility. If the task ID does not exist, this will return an
* absentee Optional. * absentee Optional.
* *
* TODO -- This method probably wants to be combined with {@link #getStatus}. * NOTE: This method really feels like it should be combined with {@link #getStatus}. Expect that in the future.
*/ */
public Optional<Task> getTask(String taskid); public Optional<Task> getTask(String taskid);

View File

@ -143,12 +143,13 @@ public class TaskStorageQueryAdapter
* Returns all segments created by descendants for a particular task that stayed within the same task group. Includes * Returns all segments created by descendants for a particular task that stayed within the same task group. Includes
* that task, plus any tasks it spawned, and so on. Does not include spawned tasks that ended up in a different task * that task, plus any tasks it spawned, and so on. Does not include spawned tasks that ended up in a different task
* group. Does not include this task's parents or siblings. * group. Does not include this task's parents or siblings.
*
* This method is useful when you want to figure out all of the things a single task spawned. It does pose issues
* with the result set perhaps growing boundlessly and we do not do anything to protect against that. Use at your
* own risk and know that at some point, we might adjust this to actually enforce some sort of limits.
*/ */
public Set<DataSegment> getSameGroupNewSegments(final String taskid) public Set<DataSegment> getSameGroupNewSegments(final String taskid)
{ {
// TODO: This is useful for regular index tasks (so we know what was published), but
// TODO: for long-lived index tasks the list can get out of hand. We may want a limit.
final Optional<Task> taskOptional = storage.getTask(taskid); final Optional<Task> taskOptional = storage.getTask(taskid);
final Set<DataSegment> segments = Sets.newHashSet(); final Set<DataSegment> segments = Sets.newHashSet();
final List<Task> nextTasks = Lists.newArrayList(); final List<Task> nextTasks = Lists.newArrayList();

View File

@ -297,8 +297,7 @@ public class IndexerCoordinatorNode extends QueryableNode<IndexerCoordinatorNode
); );
staticContext.setBaseResource(resourceCollection); staticContext.setBaseResource(resourceCollection);
// TODO -- Need a QueryServlet and some kind of QuerySegmentWalker if we want to support querying tasks // If we want to support querying tasks (e.g. for realtime in local mode), we need a QueryServlet here.
// TODO -- (e.g. for realtime) in local mode
final Context root = new Context(server, "/", Context.SESSIONS); final Context root = new Context(server, "/", Context.SESSIONS);
root.addServlet(new ServletHolder(new StatusServlet()), "/status"); root.addServlet(new ServletHolder(new StatusServlet()), "/status");

View File

@ -177,7 +177,7 @@ public class IndexerCoordinatorResource
} }
// Legacy endpoint // Legacy endpoint
// TODO Remove // TODO remove
@Deprecated @Deprecated
@GET @GET
@Path("/status/{taskid}") @Path("/status/{taskid}")
@ -239,7 +239,9 @@ public class IndexerCoordinatorResource
{ {
final Map<String, Object> retMap; final Map<String, Object> retMap;
// TODO make sure this worker is supposed to be running this task (attempt id? token?) // It would be great to verify that this worker is actually supposed to be running the task before
// actually doing the task. Some ideas for how that could be done would be using some sort of attempt_id
// or token that gets passed around.
try { try {
final T ret = taskActionClient.submit(holder.getAction()); final T ret = taskActionClient.submit(holder.getAction());

View File

@ -372,7 +372,8 @@ public class InfoResource
@QueryParam("interval") final String interval @QueryParam("interval") final String interval
) )
{ {
// TODO: will likely be all rewritten once Guice introduced // This is weird enough to have warranted some sort of T0D0 comment at one point, but it will likely be all
// rewritten once Guice introduced, and that's the brunt of the information that was in the original T0D0 too.
if (indexingServiceClient == null) { if (indexingServiceClient == null) {
return Response.status(Response.Status.OK).entity(ImmutableMap.of("error", "no indexing service found")).build(); return Response.status(Response.Status.OK).entity(ImmutableMap.of("error", "no indexing service found")).build();
} }

View File

@ -96,7 +96,7 @@ public class TimeseriesQueryEngine
@Override @Override
public void cleanup(Iterator<Result<TimeseriesResultValue>> toClean) public void cleanup(Iterator<Result<TimeseriesResultValue>> toClean)
{ {
// TODO: Let's fix this to actually use Sequences for the closing of stuff // https://github.com/metamx/druid/issues/128
while (toClean.hasNext()) { while (toClean.hasNext()) {
toClean.next(); toClean.next();
} }