mirror of https://github.com/apache/druid.git
Merge pull request #433 from metamx/router-node
Client-side tier-aware query runner
This commit is contained in:
commit
60c204afeb
|
@ -159,7 +159,7 @@ public class DatabaseRuleManager
|
||||||
|
|
||||||
this.exec = Execs.scheduledSingleThreaded("DatabaseRuleManager-Exec--%d");
|
this.exec = Execs.scheduledSingleThreaded("DatabaseRuleManager-Exec--%d");
|
||||||
|
|
||||||
createDefaultRule(dbi, getRulesTable(), config.get().getDefaultTier(), jsonMapper);
|
createDefaultRule(dbi, getRulesTable(), config.get().getDefaultRule(), jsonMapper);
|
||||||
ScheduledExecutors.scheduleWithFixedDelay(
|
ScheduledExecutors.scheduleWithFixedDelay(
|
||||||
exec,
|
exec,
|
||||||
new Duration(0),
|
new Duration(0),
|
||||||
|
@ -274,8 +274,8 @@ public class DatabaseRuleManager
|
||||||
if (theRules.get(dataSource) != null) {
|
if (theRules.get(dataSource) != null) {
|
||||||
retVal.addAll(theRules.get(dataSource));
|
retVal.addAll(theRules.get(dataSource));
|
||||||
}
|
}
|
||||||
if (theRules.get(config.get().getDefaultTier()) != null) {
|
if (theRules.get(config.get().getDefaultRule()) != null) {
|
||||||
retVal.addAll(theRules.get(config.get().getDefaultTier()));
|
retVal.addAll(theRules.get(config.get().getDefaultRule()));
|
||||||
}
|
}
|
||||||
return retVal;
|
return retVal;
|
||||||
}
|
}
|
||||||
|
|
|
@ -27,14 +27,14 @@ import org.joda.time.Period;
|
||||||
public class DatabaseRuleManagerConfig
|
public class DatabaseRuleManagerConfig
|
||||||
{
|
{
|
||||||
@JsonProperty
|
@JsonProperty
|
||||||
private String defaultTier = "_default";
|
private String defaultRule = "_default";
|
||||||
|
|
||||||
@JsonProperty
|
@JsonProperty
|
||||||
private Period pollDuration = new Period("PT1M");
|
private Period pollDuration = new Period("PT1M");
|
||||||
|
|
||||||
public String getDefaultTier()
|
public String getDefaultRule()
|
||||||
{
|
{
|
||||||
return defaultTier;
|
return defaultRule;
|
||||||
}
|
}
|
||||||
|
|
||||||
public Period getPollDuration()
|
public Period getPollDuration()
|
||||||
|
|
|
@ -64,7 +64,7 @@ public class DatabaseRuleManagerProvider implements Provider<DatabaseRuleManager
|
||||||
{
|
{
|
||||||
dbConnector.createRulesTable();
|
dbConnector.createRulesTable();
|
||||||
DatabaseRuleManager.createDefaultRule(
|
DatabaseRuleManager.createDefaultRule(
|
||||||
dbConnector.getDBI(), dbTables.get().getRulesTable(), config.get().getDefaultTier(), jsonMapper
|
dbConnector.getDBI(), dbTables.get().getRulesTable(), config.get().getDefaultRule(), jsonMapper
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -22,6 +22,7 @@ package io.druid.server.coordinator.rules;
|
||||||
import com.fasterxml.jackson.annotation.JsonProperty;
|
import com.fasterxml.jackson.annotation.JsonProperty;
|
||||||
import io.druid.timeline.DataSegment;
|
import io.druid.timeline.DataSegment;
|
||||||
import org.joda.time.DateTime;
|
import org.joda.time.DateTime;
|
||||||
|
import org.joda.time.Interval;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
*/
|
*/
|
||||||
|
@ -39,4 +40,10 @@ public class ForeverDropRule extends DropRule
|
||||||
{
|
{
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean appliesTo(Interval interval, DateTime referenceTimestamp)
|
||||||
|
{
|
||||||
|
return true;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -23,6 +23,7 @@ import com.fasterxml.jackson.annotation.JsonCreator;
|
||||||
import com.fasterxml.jackson.annotation.JsonProperty;
|
import com.fasterxml.jackson.annotation.JsonProperty;
|
||||||
import io.druid.timeline.DataSegment;
|
import io.druid.timeline.DataSegment;
|
||||||
import org.joda.time.DateTime;
|
import org.joda.time.DateTime;
|
||||||
|
import org.joda.time.Interval;
|
||||||
|
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
|
|
||||||
|
@ -66,4 +67,10 @@ public class ForeverLoadRule extends LoadRule
|
||||||
{
|
{
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean appliesTo(Interval interval, DateTime referenceTimestamp)
|
||||||
|
{
|
||||||
|
return true;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -55,6 +55,12 @@ public class IntervalDropRule extends DropRule
|
||||||
@Override
|
@Override
|
||||||
public boolean appliesTo(DataSegment segment, DateTime referenceTimestamp)
|
public boolean appliesTo(DataSegment segment, DateTime referenceTimestamp)
|
||||||
{
|
{
|
||||||
return interval.contains(segment.getInterval());
|
return appliesTo(segment.getInterval(), referenceTimestamp);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean appliesTo(Interval theInterval, DateTime referenceTimestamp)
|
||||||
|
{
|
||||||
|
return interval.contains(theInterval);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -85,7 +85,13 @@ public class IntervalLoadRule extends LoadRule
|
||||||
@Override
|
@Override
|
||||||
public boolean appliesTo(DataSegment segment, DateTime referenceTimestamp)
|
public boolean appliesTo(DataSegment segment, DateTime referenceTimestamp)
|
||||||
{
|
{
|
||||||
return interval.contains(segment.getInterval());
|
return appliesTo(segment.getInterval(), referenceTimestamp);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean appliesTo(Interval theInterval, DateTime referenceTimestamp)
|
||||||
|
{
|
||||||
|
return interval.contains(theInterval);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
|
|
@ -55,8 +55,14 @@ public class PeriodDropRule extends DropRule
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public boolean appliesTo(DataSegment segment, DateTime referenceTimestamp)
|
public boolean appliesTo(DataSegment segment, DateTime referenceTimestamp)
|
||||||
|
{
|
||||||
|
return appliesTo(segment.getInterval(), referenceTimestamp);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean appliesTo(Interval theInterval, DateTime referenceTimestamp)
|
||||||
{
|
{
|
||||||
final Interval currInterval = new Interval(period, referenceTimestamp);
|
final Interval currInterval = new Interval(period, referenceTimestamp);
|
||||||
return currInterval.contains(segment.getInterval());
|
return currInterval.contains(theInterval);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -86,8 +86,14 @@ public class PeriodLoadRule extends LoadRule
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public boolean appliesTo(DataSegment segment, DateTime referenceTimestamp)
|
public boolean appliesTo(DataSegment segment, DateTime referenceTimestamp)
|
||||||
|
{
|
||||||
|
return appliesTo(segment.getInterval(), referenceTimestamp);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean appliesTo(Interval interval, DateTime referenceTimestamp)
|
||||||
{
|
{
|
||||||
final Interval currInterval = new Interval(period, referenceTimestamp);
|
final Interval currInterval = new Interval(period, referenceTimestamp);
|
||||||
return currInterval.overlaps(segment.getInterval());
|
return currInterval.overlaps(interval);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -26,6 +26,7 @@ import io.druid.server.coordinator.DruidCoordinator;
|
||||||
import io.druid.server.coordinator.DruidCoordinatorRuntimeParams;
|
import io.druid.server.coordinator.DruidCoordinatorRuntimeParams;
|
||||||
import io.druid.timeline.DataSegment;
|
import io.druid.timeline.DataSegment;
|
||||||
import org.joda.time.DateTime;
|
import org.joda.time.DateTime;
|
||||||
|
import org.joda.time.Interval;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
*/
|
*/
|
||||||
|
@ -37,13 +38,15 @@ import org.joda.time.DateTime;
|
||||||
@JsonSubTypes.Type(name = "dropByPeriod", value = PeriodDropRule.class),
|
@JsonSubTypes.Type(name = "dropByPeriod", value = PeriodDropRule.class),
|
||||||
@JsonSubTypes.Type(name = "dropByInterval", value = IntervalDropRule.class),
|
@JsonSubTypes.Type(name = "dropByInterval", value = IntervalDropRule.class),
|
||||||
@JsonSubTypes.Type(name = "dropForever", value = ForeverDropRule.class)
|
@JsonSubTypes.Type(name = "dropForever", value = ForeverDropRule.class)
|
||||||
|
|
||||||
})
|
})
|
||||||
|
|
||||||
public interface Rule
|
public interface Rule
|
||||||
{
|
{
|
||||||
public String getType();
|
public String getType();
|
||||||
|
|
||||||
public boolean appliesTo(DataSegment segment, DateTime referenceTimestamp);
|
public boolean appliesTo(DataSegment segment, DateTime referenceTimestamp);
|
||||||
|
|
||||||
|
public boolean appliesTo(Interval interval, DateTime referenceTimestamp);
|
||||||
|
|
||||||
public CoordinatorStats run(DruidCoordinator coordinator, DruidCoordinatorRuntimeParams params, DataSegment segment);
|
public CoordinatorStats run(DruidCoordinator coordinator, DruidCoordinatorRuntimeParams params, DataSegment segment);
|
||||||
}
|
}
|
||||||
|
|
|
@ -0,0 +1,178 @@
|
||||||
|
/*
|
||||||
|
* Druid - a distributed column store.
|
||||||
|
* Copyright (C) 2012, 2013 Metamarkets Group Inc.
|
||||||
|
*
|
||||||
|
* This program is free software; you can redistribute it and/or
|
||||||
|
* modify it under the terms of the GNU General Public License
|
||||||
|
* as published by the Free Software Foundation; either version 2
|
||||||
|
* of the License, or (at your option) any later version.
|
||||||
|
*
|
||||||
|
* This program is distributed in the hope that it will be useful,
|
||||||
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
* GNU General Public License for more details.
|
||||||
|
*
|
||||||
|
* You should have received a copy of the GNU General Public License
|
||||||
|
* along with this program; if not, write to the Free Software
|
||||||
|
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package io.druid.server.router;
|
||||||
|
|
||||||
|
import com.google.common.base.Throwables;
|
||||||
|
import com.google.inject.Inject;
|
||||||
|
import com.metamx.common.Pair;
|
||||||
|
import com.metamx.common.concurrent.ScheduledExecutors;
|
||||||
|
import com.metamx.common.lifecycle.LifecycleStart;
|
||||||
|
import com.metamx.common.lifecycle.LifecycleStop;
|
||||||
|
import com.metamx.emitter.EmittingLogger;
|
||||||
|
import io.druid.concurrent.Execs;
|
||||||
|
import io.druid.curator.discovery.ServerDiscoveryFactory;
|
||||||
|
import io.druid.curator.discovery.ServerDiscoverySelector;
|
||||||
|
import io.druid.query.Query;
|
||||||
|
import io.druid.server.coordinator.rules.LoadRule;
|
||||||
|
import io.druid.server.coordinator.rules.Rule;
|
||||||
|
import org.joda.time.DateTime;
|
||||||
|
import org.joda.time.Duration;
|
||||||
|
import org.joda.time.Interval;
|
||||||
|
|
||||||
|
import java.util.List;
|
||||||
|
import java.util.Map;
|
||||||
|
import java.util.concurrent.ConcurrentHashMap;
|
||||||
|
|
||||||
|
/**
|
||||||
|
*/
|
||||||
|
public class BrokerSelector<T>
|
||||||
|
{
|
||||||
|
private static EmittingLogger log = new EmittingLogger(BrokerSelector.class);
|
||||||
|
|
||||||
|
private final CoordinatorRuleManager ruleManager;
|
||||||
|
private final TierConfig tierConfig;
|
||||||
|
private final ServerDiscoveryFactory serverDiscoveryFactory;
|
||||||
|
private final ConcurrentHashMap<String, ServerDiscoverySelector> selectorMap = new ConcurrentHashMap<String, ServerDiscoverySelector>();
|
||||||
|
|
||||||
|
private final Object lock = new Object();
|
||||||
|
|
||||||
|
private volatile boolean started = false;
|
||||||
|
|
||||||
|
@Inject
|
||||||
|
public BrokerSelector(
|
||||||
|
CoordinatorRuleManager ruleManager,
|
||||||
|
TierConfig tierConfig,
|
||||||
|
ServerDiscoveryFactory serverDiscoveryFactory
|
||||||
|
)
|
||||||
|
{
|
||||||
|
this.ruleManager = ruleManager;
|
||||||
|
this.tierConfig = tierConfig;
|
||||||
|
this.serverDiscoveryFactory = serverDiscoveryFactory;
|
||||||
|
}
|
||||||
|
|
||||||
|
@LifecycleStart
|
||||||
|
public void start()
|
||||||
|
{
|
||||||
|
synchronized (lock) {
|
||||||
|
if (started) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
try {
|
||||||
|
for (Map.Entry<String, String> entry : tierConfig.getTierToBrokerMap().entrySet()) {
|
||||||
|
ServerDiscoverySelector selector = serverDiscoveryFactory.createSelector(entry.getValue());
|
||||||
|
selector.start();
|
||||||
|
selectorMap.put(entry.getValue(), selector);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
catch (Exception e) {
|
||||||
|
throw Throwables.propagate(e);
|
||||||
|
}
|
||||||
|
|
||||||
|
started = true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@LifecycleStop
|
||||||
|
public void stop()
|
||||||
|
{
|
||||||
|
synchronized (lock) {
|
||||||
|
if (!started) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
try {
|
||||||
|
for (ServerDiscoverySelector selector : selectorMap.values()) {
|
||||||
|
selector.stop();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
catch (Exception e) {
|
||||||
|
throw Throwables.propagate(e);
|
||||||
|
}
|
||||||
|
|
||||||
|
started = false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
public Pair<String, ServerDiscoverySelector> select(final Query<T> query)
|
||||||
|
{
|
||||||
|
synchronized (lock) {
|
||||||
|
if (!ruleManager.isStarted() || !started) {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
List<Rule> rules = ruleManager.getRulesWithDefault((query.getDataSource()).getName());
|
||||||
|
|
||||||
|
// find the rule that can apply to the entire set of intervals
|
||||||
|
DateTime now = new DateTime();
|
||||||
|
int lastRulePosition = -1;
|
||||||
|
LoadRule baseRule = null;
|
||||||
|
|
||||||
|
for (Interval interval : query.getIntervals()) {
|
||||||
|
int currRulePosition = 0;
|
||||||
|
for (Rule rule : rules) {
|
||||||
|
if (rule instanceof LoadRule && currRulePosition > lastRulePosition && rule.appliesTo(interval, now)) {
|
||||||
|
lastRulePosition = currRulePosition;
|
||||||
|
baseRule = (LoadRule) rule;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
currRulePosition++;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (baseRule == null) {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
// in the baseRule, find the broker of highest priority
|
||||||
|
String brokerServiceName = null;
|
||||||
|
for (Map.Entry<String, String> entry : tierConfig.getTierToBrokerMap().entrySet()) {
|
||||||
|
if (baseRule.getTieredReplicants().containsKey(entry.getKey())) {
|
||||||
|
brokerServiceName = entry.getValue();
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (brokerServiceName == null) {
|
||||||
|
log.makeAlert(
|
||||||
|
"WTF?! No brokerServiceName found for datasource[%s], intervals[%s]. Using default[%s].",
|
||||||
|
query.getDataSource(),
|
||||||
|
query.getIntervals(),
|
||||||
|
tierConfig.getDefaultBrokerServiceName()
|
||||||
|
).emit();
|
||||||
|
brokerServiceName = tierConfig.getDefaultBrokerServiceName();
|
||||||
|
}
|
||||||
|
|
||||||
|
ServerDiscoverySelector retVal = selectorMap.get(brokerServiceName);
|
||||||
|
|
||||||
|
if (retVal == null) {
|
||||||
|
log.makeAlert(
|
||||||
|
"WTF?! No selector found for brokerServiceName[%s]. Using default selector for[%s]",
|
||||||
|
brokerServiceName,
|
||||||
|
tierConfig.getDefaultBrokerServiceName()
|
||||||
|
).emit();
|
||||||
|
retVal = selectorMap.get(tierConfig.getDefaultBrokerServiceName());
|
||||||
|
}
|
||||||
|
|
||||||
|
return new Pair<>(brokerServiceName, retVal);
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,193 @@
|
||||||
|
/*
|
||||||
|
* Druid - a distributed column store.
|
||||||
|
* Copyright (C) 2012, 2013 Metamarkets Group Inc.
|
||||||
|
*
|
||||||
|
* This program is free software; you can redistribute it and/or
|
||||||
|
* modify it under the terms of the GNU General Public License
|
||||||
|
* as published by the Free Software Foundation; either version 2
|
||||||
|
* of the License, or (at your option) any later version.
|
||||||
|
*
|
||||||
|
* This program is distributed in the hope that it will be useful,
|
||||||
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
* GNU General Public License for more details.
|
||||||
|
*
|
||||||
|
* You should have received a copy of the GNU General Public License
|
||||||
|
* along with this program; if not, write to the Free Software
|
||||||
|
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package io.druid.server.router;
|
||||||
|
|
||||||
|
import com.fasterxml.jackson.core.type.TypeReference;
|
||||||
|
import com.fasterxml.jackson.databind.ObjectMapper;
|
||||||
|
import com.google.api.client.util.Charsets;
|
||||||
|
import com.google.common.base.Supplier;
|
||||||
|
import com.google.common.collect.Lists;
|
||||||
|
import com.google.inject.Inject;
|
||||||
|
import com.metamx.common.concurrent.ScheduledExecutors;
|
||||||
|
import com.metamx.common.lifecycle.LifecycleStart;
|
||||||
|
import com.metamx.common.lifecycle.LifecycleStop;
|
||||||
|
import com.metamx.common.logger.Logger;
|
||||||
|
import com.metamx.http.client.HttpClient;
|
||||||
|
import com.metamx.http.client.response.StatusResponseHandler;
|
||||||
|
import com.metamx.http.client.response.StatusResponseHolder;
|
||||||
|
import io.druid.client.selector.Server;
|
||||||
|
import io.druid.concurrent.Execs;
|
||||||
|
import io.druid.curator.discovery.ServerDiscoverySelector;
|
||||||
|
import io.druid.guice.ManageLifecycle;
|
||||||
|
import io.druid.guice.annotations.Global;
|
||||||
|
import io.druid.guice.annotations.Json;
|
||||||
|
import io.druid.server.coordinator.rules.Rule;
|
||||||
|
import org.joda.time.Duration;
|
||||||
|
|
||||||
|
import java.net.URL;
|
||||||
|
import java.util.List;
|
||||||
|
import java.util.Map;
|
||||||
|
import java.util.concurrent.ConcurrentHashMap;
|
||||||
|
import java.util.concurrent.ScheduledExecutorService;
|
||||||
|
import java.util.concurrent.atomic.AtomicReference;
|
||||||
|
|
||||||
|
/**
|
||||||
|
*/
|
||||||
|
@ManageLifecycle
|
||||||
|
public class CoordinatorRuleManager
|
||||||
|
{
|
||||||
|
private static final Logger log = new Logger(CoordinatorRuleManager.class);
|
||||||
|
|
||||||
|
private final HttpClient httpClient;
|
||||||
|
private final ObjectMapper jsonMapper;
|
||||||
|
private final Supplier<TierConfig> config;
|
||||||
|
private final ServerDiscoverySelector selector;
|
||||||
|
|
||||||
|
private final StatusResponseHandler responseHandler;
|
||||||
|
private final AtomicReference<ConcurrentHashMap<String, List<Rule>>> rules;
|
||||||
|
|
||||||
|
private volatile ScheduledExecutorService exec;
|
||||||
|
|
||||||
|
private final Object lock = new Object();
|
||||||
|
|
||||||
|
private volatile boolean started = false;
|
||||||
|
|
||||||
|
@Inject
|
||||||
|
public CoordinatorRuleManager(
|
||||||
|
@Global HttpClient httpClient,
|
||||||
|
@Json ObjectMapper jsonMapper,
|
||||||
|
Supplier<TierConfig> config,
|
||||||
|
ServerDiscoverySelector selector
|
||||||
|
)
|
||||||
|
{
|
||||||
|
this.httpClient = httpClient;
|
||||||
|
this.jsonMapper = jsonMapper;
|
||||||
|
this.config = config;
|
||||||
|
this.selector = selector;
|
||||||
|
|
||||||
|
this.responseHandler = new StatusResponseHandler(Charsets.UTF_8);
|
||||||
|
this.rules = new AtomicReference<>(
|
||||||
|
new ConcurrentHashMap<String, List<Rule>>()
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
@LifecycleStart
|
||||||
|
public void start()
|
||||||
|
{
|
||||||
|
synchronized (lock) {
|
||||||
|
if (started) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
this.exec = Execs.scheduledSingleThreaded("CoordinatorRuleManager-Exec--%d");
|
||||||
|
|
||||||
|
ScheduledExecutors.scheduleWithFixedDelay(
|
||||||
|
exec,
|
||||||
|
new Duration(0),
|
||||||
|
config.get().getPollPeriod().toStandardDuration(),
|
||||||
|
new Runnable()
|
||||||
|
{
|
||||||
|
@Override
|
||||||
|
public void run()
|
||||||
|
{
|
||||||
|
poll();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
);
|
||||||
|
|
||||||
|
started = true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@LifecycleStop
|
||||||
|
public void stop()
|
||||||
|
{
|
||||||
|
synchronized (lock) {
|
||||||
|
if (!started) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
rules.set(new ConcurrentHashMap<String, List<Rule>>());
|
||||||
|
|
||||||
|
started = false;
|
||||||
|
exec.shutdownNow();
|
||||||
|
exec = null;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
public boolean isStarted()
|
||||||
|
{
|
||||||
|
return started;
|
||||||
|
}
|
||||||
|
|
||||||
|
public void poll()
|
||||||
|
{
|
||||||
|
try {
|
||||||
|
String url = getRuleURL();
|
||||||
|
if (url == null) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
StatusResponseHolder response = httpClient.get(new URL(url))
|
||||||
|
.go(responseHandler)
|
||||||
|
.get();
|
||||||
|
|
||||||
|
ConcurrentHashMap<String, List<Rule>> newRules = new ConcurrentHashMap<String, List<Rule>>(
|
||||||
|
(Map<String, List<Rule>>) jsonMapper.readValue(
|
||||||
|
response.getContent(), new TypeReference<Map<String, List<Rule>>>()
|
||||||
|
{
|
||||||
|
}
|
||||||
|
)
|
||||||
|
);
|
||||||
|
|
||||||
|
log.info("Got [%,d] rules", newRules.keySet().size());
|
||||||
|
|
||||||
|
rules.set(newRules);
|
||||||
|
}
|
||||||
|
catch (Exception e) {
|
||||||
|
log.error(e, "Exception while polling for rules");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
public List<Rule> getRulesWithDefault(final String dataSource)
|
||||||
|
{
|
||||||
|
List<Rule> retVal = Lists.newArrayList();
|
||||||
|
Map<String, List<Rule>> theRules = rules.get();
|
||||||
|
if (theRules.get(dataSource) != null) {
|
||||||
|
retVal.addAll(theRules.get(dataSource));
|
||||||
|
}
|
||||||
|
if (theRules.get(config.get().getDefaultRule()) != null) {
|
||||||
|
retVal.addAll(theRules.get(config.get().getDefaultRule()));
|
||||||
|
}
|
||||||
|
return retVal;
|
||||||
|
}
|
||||||
|
|
||||||
|
private String getRuleURL()
|
||||||
|
{
|
||||||
|
Server server = selector.pick();
|
||||||
|
|
||||||
|
if (server == null) {
|
||||||
|
log.error("No instances found for [%s]!", config.get().getCoordinatorServiceName());
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
return String.format("http://%s%s", server.getHost(), config.get().getRulesEndpoint());
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,82 @@
|
||||||
|
/*
|
||||||
|
* Druid - a distributed column store.
|
||||||
|
* Copyright (C) 2012, 2013 Metamarkets Group Inc.
|
||||||
|
*
|
||||||
|
* This program is free software; you can redistribute it and/or
|
||||||
|
* modify it under the terms of the GNU General Public License
|
||||||
|
* as published by the Free Software Foundation; either version 2
|
||||||
|
* of the License, or (at your option) any later version.
|
||||||
|
*
|
||||||
|
* This program is distributed in the hope that it will be useful,
|
||||||
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
* GNU General Public License for more details.
|
||||||
|
*
|
||||||
|
* You should have received a copy of the GNU General Public License
|
||||||
|
* along with this program; if not, write to the Free Software
|
||||||
|
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package io.druid.server.router;
|
||||||
|
|
||||||
|
import com.fasterxml.jackson.databind.ObjectMapper;
|
||||||
|
import com.google.inject.Inject;
|
||||||
|
import com.metamx.http.client.HttpClient;
|
||||||
|
import io.druid.curator.discovery.ServerDiscoveryFactory;
|
||||||
|
import io.druid.guice.annotations.Global;
|
||||||
|
import io.druid.query.Query;
|
||||||
|
import io.druid.query.QueryRunner;
|
||||||
|
import io.druid.query.QuerySegmentWalker;
|
||||||
|
import io.druid.query.QueryToolChestWarehouse;
|
||||||
|
import io.druid.query.SegmentDescriptor;
|
||||||
|
import org.joda.time.Interval;
|
||||||
|
|
||||||
|
/**
|
||||||
|
*/
|
||||||
|
public class RouterQuerySegmentWalker implements QuerySegmentWalker
|
||||||
|
{
|
||||||
|
private final QueryToolChestWarehouse warehouse;
|
||||||
|
private final ObjectMapper objectMapper;
|
||||||
|
private final HttpClient httpClient;
|
||||||
|
private final BrokerSelector brokerSelector;
|
||||||
|
private final TierConfig tierConfig;
|
||||||
|
|
||||||
|
@Inject
|
||||||
|
public RouterQuerySegmentWalker(
|
||||||
|
QueryToolChestWarehouse warehouse,
|
||||||
|
ObjectMapper objectMapper,
|
||||||
|
@Global HttpClient httpClient,
|
||||||
|
BrokerSelector brokerSelector,
|
||||||
|
TierConfig tierConfig
|
||||||
|
)
|
||||||
|
{
|
||||||
|
this.warehouse = warehouse;
|
||||||
|
this.objectMapper = objectMapper;
|
||||||
|
this.httpClient = httpClient;
|
||||||
|
this.brokerSelector = brokerSelector;
|
||||||
|
this.tierConfig = tierConfig;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public <T> QueryRunner<T> getQueryRunnerForIntervals(Query<T> query, Iterable<Interval> intervals)
|
||||||
|
{
|
||||||
|
return makeRunner();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public <T> QueryRunner<T> getQueryRunnerForSegments(Query<T> query, Iterable<SegmentDescriptor> specs)
|
||||||
|
{
|
||||||
|
return makeRunner();
|
||||||
|
}
|
||||||
|
|
||||||
|
private <T> QueryRunner<T> makeRunner()
|
||||||
|
{
|
||||||
|
return new TierAwareQueryRunner<T>(
|
||||||
|
warehouse,
|
||||||
|
objectMapper,
|
||||||
|
httpClient,
|
||||||
|
brokerSelector,
|
||||||
|
tierConfig
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,121 @@
|
||||||
|
/*
|
||||||
|
* Druid - a distributed column store.
|
||||||
|
* Copyright (C) 2012, 2013 Metamarkets Group Inc.
|
||||||
|
*
|
||||||
|
* This program is free software; you can redistribute it and/or
|
||||||
|
* modify it under the terms of the GNU General Public License
|
||||||
|
* as published by the Free Software Foundation; either version 2
|
||||||
|
* of the License, or (at your option) any later version.
|
||||||
|
*
|
||||||
|
* This program is distributed in the hope that it will be useful,
|
||||||
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
* GNU General Public License for more details.
|
||||||
|
*
|
||||||
|
* You should have received a copy of the GNU General Public License
|
||||||
|
* along with this program; if not, write to the Free Software
|
||||||
|
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package io.druid.server.router;
|
||||||
|
|
||||||
|
import com.fasterxml.jackson.databind.ObjectMapper;
|
||||||
|
import com.google.common.base.Throwables;
|
||||||
|
import com.metamx.common.Pair;
|
||||||
|
import com.metamx.common.guava.Sequence;
|
||||||
|
import com.metamx.common.guava.Sequences;
|
||||||
|
import com.metamx.emitter.EmittingLogger;
|
||||||
|
import com.metamx.http.client.HttpClient;
|
||||||
|
import io.druid.client.DirectDruidClient;
|
||||||
|
import io.druid.client.selector.Server;
|
||||||
|
import io.druid.curator.discovery.ServerDiscoveryFactory;
|
||||||
|
import io.druid.curator.discovery.ServerDiscoverySelector;
|
||||||
|
import io.druid.query.Query;
|
||||||
|
import io.druid.query.QueryRunner;
|
||||||
|
import io.druid.query.QueryToolChestWarehouse;
|
||||||
|
|
||||||
|
import java.util.Map;
|
||||||
|
import java.util.concurrent.ConcurrentHashMap;
|
||||||
|
|
||||||
|
/**
|
||||||
|
*/
|
||||||
|
public class TierAwareQueryRunner<T> implements QueryRunner<T>
|
||||||
|
{
|
||||||
|
private static EmittingLogger log = new EmittingLogger(TierAwareQueryRunner.class);
|
||||||
|
|
||||||
|
private final QueryToolChestWarehouse warehouse;
|
||||||
|
private final ObjectMapper objectMapper;
|
||||||
|
private final HttpClient httpClient;
|
||||||
|
private final BrokerSelector<T> brokerSelector;
|
||||||
|
private final TierConfig tierConfig;
|
||||||
|
|
||||||
|
private final ConcurrentHashMap<String, Server> serverBackup = new ConcurrentHashMap<String, Server>();
|
||||||
|
|
||||||
|
public TierAwareQueryRunner(
|
||||||
|
QueryToolChestWarehouse warehouse,
|
||||||
|
ObjectMapper objectMapper,
|
||||||
|
HttpClient httpClient,
|
||||||
|
BrokerSelector<T> brokerSelector,
|
||||||
|
TierConfig tierConfig
|
||||||
|
)
|
||||||
|
{
|
||||||
|
this.warehouse = warehouse;
|
||||||
|
this.objectMapper = objectMapper;
|
||||||
|
this.httpClient = httpClient;
|
||||||
|
this.brokerSelector = brokerSelector;
|
||||||
|
this.tierConfig = tierConfig;
|
||||||
|
}
|
||||||
|
|
||||||
|
public Server findServer(Query<T> query)
|
||||||
|
{
|
||||||
|
final Pair<String, ServerDiscoverySelector> selected = brokerSelector.select(query);
|
||||||
|
final String brokerServiceName = selected.lhs;
|
||||||
|
final ServerDiscoverySelector selector = selected.rhs;
|
||||||
|
|
||||||
|
Server server = selector.pick();
|
||||||
|
if (server == null) {
|
||||||
|
log.error(
|
||||||
|
"WTF?! No server found for brokerServiceName[%s]. Using backup",
|
||||||
|
brokerServiceName
|
||||||
|
);
|
||||||
|
|
||||||
|
server = serverBackup.get(brokerServiceName);
|
||||||
|
|
||||||
|
if (server == null) {
|
||||||
|
log.makeAlert(
|
||||||
|
"WTF?! No backup found for brokerServiceName[%s]. Using default[%s]",
|
||||||
|
brokerServiceName,
|
||||||
|
tierConfig.getDefaultBrokerServiceName()
|
||||||
|
).emit();
|
||||||
|
|
||||||
|
server = serverBackup.get(tierConfig.getDefaultBrokerServiceName());
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
serverBackup.put(brokerServiceName, server);
|
||||||
|
}
|
||||||
|
|
||||||
|
return server;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Sequence<T> run(Query<T> query)
|
||||||
|
{
|
||||||
|
Server server = findServer(query);
|
||||||
|
|
||||||
|
if (server == null) {
|
||||||
|
log.makeAlert(
|
||||||
|
"Catastrophic failure! No servers found at all! Failing request!"
|
||||||
|
).emit();
|
||||||
|
return Sequences.empty();
|
||||||
|
}
|
||||||
|
|
||||||
|
QueryRunner<T> client = new DirectDruidClient<T>(
|
||||||
|
warehouse,
|
||||||
|
objectMapper,
|
||||||
|
httpClient,
|
||||||
|
server.getHost()
|
||||||
|
);
|
||||||
|
|
||||||
|
return client.run(query);
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,91 @@
|
||||||
|
/*
|
||||||
|
* Druid - a distributed column store.
|
||||||
|
* Copyright (C) 2012, 2013 Metamarkets Group Inc.
|
||||||
|
*
|
||||||
|
* This program is free software; you can redistribute it and/or
|
||||||
|
* modify it under the terms of the GNU General Public License
|
||||||
|
* as published by the Free Software Foundation; either version 2
|
||||||
|
* of the License, or (at your option) any later version.
|
||||||
|
*
|
||||||
|
* This program is distributed in the hope that it will be useful,
|
||||||
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
* GNU General Public License for more details.
|
||||||
|
*
|
||||||
|
* You should have received a copy of the GNU General Public License
|
||||||
|
* along with this program; if not, write to the Free Software
|
||||||
|
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package io.druid.server.router;
|
||||||
|
|
||||||
|
import com.fasterxml.jackson.annotation.JsonProperty;
|
||||||
|
import com.google.common.collect.ImmutableMap;
|
||||||
|
import io.druid.client.DruidServer;
|
||||||
|
import org.joda.time.Period;
|
||||||
|
|
||||||
|
import javax.validation.constraints.NotNull;
|
||||||
|
import java.util.LinkedHashMap;
|
||||||
|
|
||||||
|
/**
|
||||||
|
*/
|
||||||
|
public class TierConfig
|
||||||
|
{
|
||||||
|
@JsonProperty
|
||||||
|
@NotNull
|
||||||
|
private String defaultBrokerServiceName = "";
|
||||||
|
|
||||||
|
@JsonProperty
|
||||||
|
private LinkedHashMap<String, String> tierToBrokerMap;
|
||||||
|
|
||||||
|
@JsonProperty
|
||||||
|
@NotNull
|
||||||
|
private String defaultRule = "_default";
|
||||||
|
|
||||||
|
@JsonProperty
|
||||||
|
@NotNull
|
||||||
|
private String rulesEndpoint = "/druid/coordinator/v1/rules";
|
||||||
|
|
||||||
|
@JsonProperty
|
||||||
|
@NotNull
|
||||||
|
private String coordinatorServiceName = null;
|
||||||
|
|
||||||
|
@JsonProperty
|
||||||
|
@NotNull
|
||||||
|
private Period pollPeriod = new Period("PT1M");
|
||||||
|
|
||||||
|
// tier, <bard, numThreads>
|
||||||
|
public LinkedHashMap<String, String> getTierToBrokerMap()
|
||||||
|
{
|
||||||
|
return tierToBrokerMap == null ? new LinkedHashMap<>(
|
||||||
|
ImmutableMap.of(
|
||||||
|
DruidServer.DEFAULT_TIER, defaultBrokerServiceName
|
||||||
|
)
|
||||||
|
) : tierToBrokerMap;
|
||||||
|
}
|
||||||
|
|
||||||
|
public String getDefaultBrokerServiceName()
|
||||||
|
{
|
||||||
|
return defaultBrokerServiceName;
|
||||||
|
}
|
||||||
|
|
||||||
|
public String getDefaultRule()
|
||||||
|
{
|
||||||
|
return defaultRule;
|
||||||
|
}
|
||||||
|
|
||||||
|
public String getRulesEndpoint()
|
||||||
|
{
|
||||||
|
return rulesEndpoint;
|
||||||
|
}
|
||||||
|
|
||||||
|
public String getCoordinatorServiceName()
|
||||||
|
{
|
||||||
|
return coordinatorServiceName;
|
||||||
|
}
|
||||||
|
|
||||||
|
public Period getPollPeriod()
|
||||||
|
{
|
||||||
|
return pollPeriod;
|
||||||
|
}
|
||||||
|
}
|
|
@ -122,6 +122,12 @@ public class LoadRuleTest
|
||||||
{
|
{
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean appliesTo(Interval interval, DateTime referenceTimestamp)
|
||||||
|
{
|
||||||
|
return true;
|
||||||
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
DruidCluster druidCluster = new DruidCluster(
|
DruidCluster druidCluster = new DruidCluster(
|
||||||
|
@ -214,6 +220,12 @@ public class LoadRuleTest
|
||||||
{
|
{
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean appliesTo(Interval interval, DateTime referenceTimestamp)
|
||||||
|
{
|
||||||
|
return true;
|
||||||
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
DruidServer server1 = new DruidServer(
|
DruidServer server1 = new DruidServer(
|
||||||
|
|
|
@ -0,0 +1,230 @@
|
||||||
|
/*
|
||||||
|
* Druid - a distributed column store.
|
||||||
|
* Copyright (C) 2012, 2013 Metamarkets Group Inc.
|
||||||
|
*
|
||||||
|
* This program is free software; you can redistribute it and/or
|
||||||
|
* modify it under the terms of the GNU General Public License
|
||||||
|
* as published by the Free Software Foundation; either version 2
|
||||||
|
* of the License, or (at your option) any later version.
|
||||||
|
*
|
||||||
|
* This program is distributed in the hope that it will be useful,
|
||||||
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
* GNU General Public License for more details.
|
||||||
|
*
|
||||||
|
* You should have received a copy of the GNU General Public License
|
||||||
|
* along with this program; if not, write to the Free Software
|
||||||
|
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package io.druid.server.router;
|
||||||
|
|
||||||
|
import com.fasterxml.jackson.databind.ObjectMapper;
|
||||||
|
import com.google.common.base.Supplier;
|
||||||
|
import com.google.common.collect.ImmutableMap;
|
||||||
|
import com.metamx.common.Pair;
|
||||||
|
import com.metamx.http.client.HttpClient;
|
||||||
|
import io.druid.client.DruidServer;
|
||||||
|
import io.druid.curator.discovery.ServerDiscoveryFactory;
|
||||||
|
import io.druid.curator.discovery.ServerDiscoverySelector;
|
||||||
|
import io.druid.guice.annotations.Global;
|
||||||
|
import io.druid.guice.annotations.Json;
|
||||||
|
import io.druid.query.Druids;
|
||||||
|
import io.druid.query.TableDataSource;
|
||||||
|
import io.druid.query.aggregation.AggregatorFactory;
|
||||||
|
import io.druid.query.aggregation.CountAggregatorFactory;
|
||||||
|
import io.druid.query.spec.MultipleIntervalSegmentSpec;
|
||||||
|
import io.druid.query.timeboundary.TimeBoundaryQuery;
|
||||||
|
import io.druid.server.coordinator.rules.IntervalLoadRule;
|
||||||
|
import io.druid.server.coordinator.rules.Rule;
|
||||||
|
import junit.framework.Assert;
|
||||||
|
import org.easymock.EasyMock;
|
||||||
|
import org.joda.time.Interval;
|
||||||
|
import org.junit.After;
|
||||||
|
import org.junit.Before;
|
||||||
|
import org.junit.Test;
|
||||||
|
|
||||||
|
import java.util.Arrays;
|
||||||
|
import java.util.LinkedHashMap;
|
||||||
|
import java.util.List;
|
||||||
|
|
||||||
|
/**
|
||||||
|
*/
|
||||||
|
public class BrokerSelectorTest
|
||||||
|
{
|
||||||
|
private ServerDiscoveryFactory factory;
|
||||||
|
private ServerDiscoverySelector selector;
|
||||||
|
private BrokerSelector brokerSelector;
|
||||||
|
|
||||||
|
@Before
|
||||||
|
public void setUp() throws Exception
|
||||||
|
{
|
||||||
|
factory = EasyMock.createMock(ServerDiscoveryFactory.class);
|
||||||
|
selector = EasyMock.createMock(ServerDiscoverySelector.class);
|
||||||
|
|
||||||
|
brokerSelector = new BrokerSelector(
|
||||||
|
new TestRuleManager(null, null, null, null),
|
||||||
|
new TierConfig()
|
||||||
|
{
|
||||||
|
@Override
|
||||||
|
public LinkedHashMap<String, String> getTierToBrokerMap()
|
||||||
|
{
|
||||||
|
return new LinkedHashMap<String, String>(
|
||||||
|
ImmutableMap.<String, String>of(
|
||||||
|
"hot", "hotBroker",
|
||||||
|
"medium", "mediumBroker",
|
||||||
|
DruidServer.DEFAULT_TIER, "coldBroker"
|
||||||
|
)
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String getDefaultBrokerServiceName()
|
||||||
|
{
|
||||||
|
return "hotBroker";
|
||||||
|
}
|
||||||
|
},
|
||||||
|
factory
|
||||||
|
);
|
||||||
|
EasyMock.expect(factory.createSelector(EasyMock.<String>anyObject())).andReturn(selector).atLeastOnce();
|
||||||
|
EasyMock.replay(factory);
|
||||||
|
|
||||||
|
selector.start();
|
||||||
|
EasyMock.expectLastCall().atLeastOnce();
|
||||||
|
selector.stop();
|
||||||
|
EasyMock.expectLastCall().atLeastOnce();
|
||||||
|
EasyMock.replay(selector);
|
||||||
|
|
||||||
|
brokerSelector.start();
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
@After
|
||||||
|
public void tearDown() throws Exception
|
||||||
|
{
|
||||||
|
brokerSelector.stop();
|
||||||
|
|
||||||
|
EasyMock.verify(selector);
|
||||||
|
EasyMock.verify(factory);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testBasicSelect() throws Exception
|
||||||
|
{
|
||||||
|
String brokerName = (String) brokerSelector.select(
|
||||||
|
new TimeBoundaryQuery(
|
||||||
|
new TableDataSource("test"),
|
||||||
|
new MultipleIntervalSegmentSpec(Arrays.<Interval>asList(new Interval("2011-08-31/2011-09-01"))),
|
||||||
|
null
|
||||||
|
)
|
||||||
|
).lhs;
|
||||||
|
|
||||||
|
Assert.assertEquals("coldBroker", brokerName);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testBasicSelect2() throws Exception
|
||||||
|
{
|
||||||
|
String brokerName = (String) brokerSelector.select(
|
||||||
|
new TimeBoundaryQuery(
|
||||||
|
new TableDataSource("test"),
|
||||||
|
new MultipleIntervalSegmentSpec(Arrays.<Interval>asList(new Interval("2013-08-31/2013-09-01"))),
|
||||||
|
null
|
||||||
|
)
|
||||||
|
).lhs;
|
||||||
|
|
||||||
|
Assert.assertEquals("hotBroker", brokerName);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testSelectMatchesNothing() throws Exception
|
||||||
|
{
|
||||||
|
Pair retVal = brokerSelector.select(
|
||||||
|
new TimeBoundaryQuery(
|
||||||
|
new TableDataSource("test"),
|
||||||
|
new MultipleIntervalSegmentSpec(Arrays.<Interval>asList(new Interval("2010-08-31/2010-09-01"))),
|
||||||
|
null
|
||||||
|
)
|
||||||
|
);
|
||||||
|
|
||||||
|
Assert.assertEquals(null, retVal);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testSelectMultiInterval() throws Exception
|
||||||
|
{
|
||||||
|
String brokerName = (String) brokerSelector.select(
|
||||||
|
Druids.newTimeseriesQueryBuilder()
|
||||||
|
.dataSource("test")
|
||||||
|
.aggregators(Arrays.<AggregatorFactory>asList(new CountAggregatorFactory("count")))
|
||||||
|
.intervals(
|
||||||
|
new MultipleIntervalSegmentSpec(
|
||||||
|
Arrays.<Interval>asList(
|
||||||
|
new Interval("2013-08-31/2013-09-01"),
|
||||||
|
new Interval("2012-08-31/2012-09-01"),
|
||||||
|
new Interval("2011-08-31/2011-09-01")
|
||||||
|
)
|
||||||
|
)
|
||||||
|
).build()
|
||||||
|
).lhs;
|
||||||
|
|
||||||
|
Assert.assertEquals("coldBroker", brokerName);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testSelectMultiInterval2() throws Exception
|
||||||
|
{
|
||||||
|
String brokerName = (String) brokerSelector.select(
|
||||||
|
Druids.newTimeseriesQueryBuilder()
|
||||||
|
.dataSource("test")
|
||||||
|
.aggregators(Arrays.<AggregatorFactory>asList(new CountAggregatorFactory("count")))
|
||||||
|
.intervals(
|
||||||
|
new MultipleIntervalSegmentSpec(
|
||||||
|
Arrays.<Interval>asList(
|
||||||
|
new Interval("2011-08-31/2011-09-01"),
|
||||||
|
new Interval("2012-08-31/2012-09-01"),
|
||||||
|
new Interval("2013-08-31/2013-09-01")
|
||||||
|
)
|
||||||
|
)
|
||||||
|
).build()
|
||||||
|
).lhs;
|
||||||
|
|
||||||
|
Assert.assertEquals("coldBroker", brokerName);
|
||||||
|
}
|
||||||
|
|
||||||
|
private static class TestRuleManager extends CoordinatorRuleManager
|
||||||
|
{
|
||||||
|
public TestRuleManager(
|
||||||
|
@Global HttpClient httpClient,
|
||||||
|
@Json ObjectMapper jsonMapper,
|
||||||
|
Supplier<TierConfig> config,
|
||||||
|
ServerDiscoverySelector selector
|
||||||
|
)
|
||||||
|
{
|
||||||
|
super(httpClient, jsonMapper, config, selector);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean isStarted()
|
||||||
|
{
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public List<Rule> getRulesWithDefault(String dataSource)
|
||||||
|
{
|
||||||
|
return Arrays.<Rule>asList(
|
||||||
|
new IntervalLoadRule(new Interval("2013/2014"), ImmutableMap.<String, Integer>of("hot", 1), null, null),
|
||||||
|
new IntervalLoadRule(new Interval("2012/2013"), ImmutableMap.<String, Integer>of("medium", 1), null, null),
|
||||||
|
new IntervalLoadRule(
|
||||||
|
new Interval("2011/2012"),
|
||||||
|
ImmutableMap.<String, Integer>of(DruidServer.DEFAULT_TIER, 1),
|
||||||
|
null,
|
||||||
|
null
|
||||||
|
)
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,139 @@
|
||||||
|
/*
|
||||||
|
* Druid - a distributed column store.
|
||||||
|
* Copyright (C) 2012, 2013 Metamarkets Group Inc.
|
||||||
|
*
|
||||||
|
* This program is free software; you can redistribute it and/or
|
||||||
|
* modify it under the terms of the GNU General Public License
|
||||||
|
* as published by the Free Software Foundation; either version 2
|
||||||
|
* of the License, or (at your option) any later version.
|
||||||
|
*
|
||||||
|
* This program is distributed in the hope that it will be useful,
|
||||||
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
* GNU General Public License for more details.
|
||||||
|
*
|
||||||
|
* You should have received a copy of the GNU General Public License
|
||||||
|
* along with this program; if not, write to the Free Software
|
||||||
|
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package io.druid.server.router;
|
||||||
|
|
||||||
|
import com.google.common.collect.ImmutableMap;
|
||||||
|
import com.metamx.common.Pair;
|
||||||
|
import io.druid.client.DruidServer;
|
||||||
|
import io.druid.client.selector.Server;
|
||||||
|
import io.druid.curator.discovery.ServerDiscoverySelector;
|
||||||
|
import io.druid.query.Query;
|
||||||
|
import io.druid.query.TableDataSource;
|
||||||
|
import io.druid.query.spec.MultipleIntervalSegmentSpec;
|
||||||
|
import io.druid.query.timeboundary.TimeBoundaryQuery;
|
||||||
|
import junit.framework.Assert;
|
||||||
|
import org.easymock.EasyMock;
|
||||||
|
import org.joda.time.Interval;
|
||||||
|
import org.junit.After;
|
||||||
|
import org.junit.Before;
|
||||||
|
import org.junit.Test;
|
||||||
|
|
||||||
|
import java.util.Arrays;
|
||||||
|
import java.util.LinkedHashMap;
|
||||||
|
|
||||||
|
/**
|
||||||
|
*/
|
||||||
|
public class TierAwareQueryRunnerTest
|
||||||
|
{
|
||||||
|
private ServerDiscoverySelector selector;
|
||||||
|
private BrokerSelector brokerSelector;
|
||||||
|
private TierConfig config;
|
||||||
|
private Server server;
|
||||||
|
|
||||||
|
@Before
|
||||||
|
public void setUp() throws Exception
|
||||||
|
{
|
||||||
|
selector = EasyMock.createMock(ServerDiscoverySelector.class);
|
||||||
|
brokerSelector = EasyMock.createMock(BrokerSelector.class);
|
||||||
|
|
||||||
|
config = new TierConfig()
|
||||||
|
{
|
||||||
|
@Override
|
||||||
|
public LinkedHashMap<String, String> getTierToBrokerMap()
|
||||||
|
{
|
||||||
|
return new LinkedHashMap<>(
|
||||||
|
ImmutableMap.<String, String>of(
|
||||||
|
"hot", "hotBroker",
|
||||||
|
"medium", "mediumBroker",
|
||||||
|
DruidServer.DEFAULT_TIER, "coldBroker"
|
||||||
|
)
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String getDefaultBrokerServiceName()
|
||||||
|
{
|
||||||
|
return "hotBroker";
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
server = new Server()
|
||||||
|
{
|
||||||
|
@Override
|
||||||
|
public String getScheme()
|
||||||
|
{
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String getHost()
|
||||||
|
{
|
||||||
|
return "foo";
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String getAddress()
|
||||||
|
{
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public int getPort()
|
||||||
|
{
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
@After
|
||||||
|
public void tearDown() throws Exception
|
||||||
|
{
|
||||||
|
EasyMock.verify(brokerSelector);
|
||||||
|
EasyMock.verify(selector);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testFindServer() throws Exception
|
||||||
|
{
|
||||||
|
EasyMock.expect(brokerSelector.select(EasyMock.<Query>anyObject())).andReturn(new Pair("hotBroker", selector));
|
||||||
|
EasyMock.replay(brokerSelector);
|
||||||
|
|
||||||
|
EasyMock.expect(selector.pick()).andReturn(server).once();
|
||||||
|
EasyMock.replay(selector);
|
||||||
|
|
||||||
|
TierAwareQueryRunner queryRunner = new TierAwareQueryRunner(
|
||||||
|
null,
|
||||||
|
null,
|
||||||
|
null,
|
||||||
|
brokerSelector,
|
||||||
|
config
|
||||||
|
);
|
||||||
|
|
||||||
|
Server server = queryRunner.findServer(
|
||||||
|
new TimeBoundaryQuery(
|
||||||
|
new TableDataSource("test"),
|
||||||
|
new MultipleIntervalSegmentSpec(Arrays.<Interval>asList(new Interval("2011-08-31/2011-09-01"))),
|
||||||
|
null
|
||||||
|
)
|
||||||
|
);
|
||||||
|
|
||||||
|
Assert.assertEquals("foo", server.getHost());
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,105 @@
|
||||||
|
/*
|
||||||
|
* Druid - a distributed column store.
|
||||||
|
* Copyright (C) 2012, 2013 Metamarkets Group Inc.
|
||||||
|
*
|
||||||
|
* This program is free software; you can redistribute it and/or
|
||||||
|
* modify it under the terms of the GNU General Public License
|
||||||
|
* as published by the Free Software Foundation; either version 2
|
||||||
|
* of the License, or (at your option) any later version.
|
||||||
|
*
|
||||||
|
* This program is distributed in the hope that it will be useful,
|
||||||
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
* GNU General Public License for more details.
|
||||||
|
*
|
||||||
|
* You should have received a copy of the GNU General Public License
|
||||||
|
* along with this program; if not, write to the Free Software
|
||||||
|
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package io.druid.cli;
|
||||||
|
|
||||||
|
import com.google.common.collect.ImmutableList;
|
||||||
|
import com.google.inject.Binder;
|
||||||
|
import com.google.inject.Module;
|
||||||
|
import com.google.inject.Provides;
|
||||||
|
import com.metamx.common.logger.Logger;
|
||||||
|
import io.airlift.command.Command;
|
||||||
|
import io.druid.curator.discovery.DiscoveryModule;
|
||||||
|
import io.druid.curator.discovery.ServerDiscoveryFactory;
|
||||||
|
import io.druid.curator.discovery.ServerDiscoverySelector;
|
||||||
|
import io.druid.guice.Jerseys;
|
||||||
|
import io.druid.guice.JsonConfigProvider;
|
||||||
|
import io.druid.guice.LazySingleton;
|
||||||
|
import io.druid.guice.LifecycleModule;
|
||||||
|
import io.druid.guice.ManageLifecycle;
|
||||||
|
import io.druid.guice.annotations.Self;
|
||||||
|
import io.druid.query.MapQueryToolChestWarehouse;
|
||||||
|
import io.druid.query.QuerySegmentWalker;
|
||||||
|
import io.druid.query.QueryToolChestWarehouse;
|
||||||
|
import io.druid.server.QueryResource;
|
||||||
|
import io.druid.server.initialization.JettyServerInitializer;
|
||||||
|
import io.druid.server.router.BrokerSelector;
|
||||||
|
import io.druid.server.router.CoordinatorRuleManager;
|
||||||
|
import io.druid.server.router.RouterQuerySegmentWalker;
|
||||||
|
import io.druid.server.router.TierConfig;
|
||||||
|
import org.eclipse.jetty.server.Server;
|
||||||
|
|
||||||
|
import java.util.List;
|
||||||
|
|
||||||
|
/**
|
||||||
|
*/
|
||||||
|
@Command(
|
||||||
|
name = "router",
|
||||||
|
description = "Experimental! Understands tiers and routes things to different brokers"
|
||||||
|
)
|
||||||
|
public class CliRouter extends ServerRunnable
|
||||||
|
{
|
||||||
|
private static final Logger log = new Logger(CliRouter.class);
|
||||||
|
|
||||||
|
public CliRouter()
|
||||||
|
{
|
||||||
|
super(log);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
protected List<Object> getModules()
|
||||||
|
{
|
||||||
|
return ImmutableList.<Object>of(
|
||||||
|
new Module()
|
||||||
|
{
|
||||||
|
@Override
|
||||||
|
public void configure(Binder binder)
|
||||||
|
{
|
||||||
|
JsonConfigProvider.bind(binder, "druid.router", TierConfig.class);
|
||||||
|
|
||||||
|
binder.bind(CoordinatorRuleManager.class);
|
||||||
|
LifecycleModule.register(binder, CoordinatorRuleManager.class);
|
||||||
|
|
||||||
|
binder.bind(QueryToolChestWarehouse.class).to(MapQueryToolChestWarehouse.class);
|
||||||
|
|
||||||
|
binder.bind(BrokerSelector.class).in(ManageLifecycle.class);
|
||||||
|
binder.bind(QuerySegmentWalker.class).to(RouterQuerySegmentWalker.class).in(LazySingleton.class);
|
||||||
|
|
||||||
|
binder.bind(JettyServerInitializer.class).to(QueryJettyServerInitializer.class).in(LazySingleton.class);
|
||||||
|
Jerseys.addResource(binder, QueryResource.class);
|
||||||
|
LifecycleModule.register(binder, QueryResource.class);
|
||||||
|
|
||||||
|
LifecycleModule.register(binder, Server.class);
|
||||||
|
DiscoveryModule.register(binder, Self.class);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Provides
|
||||||
|
@ManageLifecycle
|
||||||
|
public ServerDiscoverySelector getCoordinatorServerDiscoverySelector(
|
||||||
|
TierConfig config,
|
||||||
|
ServerDiscoveryFactory factory
|
||||||
|
|
||||||
|
)
|
||||||
|
{
|
||||||
|
return factory.createSelector(config.getCoordinatorServiceName());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
|
@ -52,7 +52,7 @@ public class Main
|
||||||
.withCommands(
|
.withCommands(
|
||||||
CliCoordinator.class, CliHistorical.class, CliBroker.class,
|
CliCoordinator.class, CliHistorical.class, CliBroker.class,
|
||||||
CliRealtime.class, CliOverlord.class, CliMiddleManager.class,
|
CliRealtime.class, CliOverlord.class, CliMiddleManager.class,
|
||||||
CliBridge.class
|
CliBridge.class, CliRouter.class
|
||||||
);
|
);
|
||||||
|
|
||||||
builder.withGroup("example")
|
builder.withGroup("example")
|
||||||
|
|
Loading…
Reference in New Issue