Merge branch 'master' of github.com:metamx/druid

This commit is contained in:
fjy 2014-03-12 10:34:31 -07:00
commit 516e855144
4 changed files with 48 additions and 9 deletions

View File

@ -281,7 +281,7 @@ public class HadoopIndexTask extends AbstractTask
Jobby job = new HadoopDruidDetermineConfigurationJob(config); Jobby job = new HadoopDruidDetermineConfigurationJob(config);
log.info("Starting a hadoop index generator job..."); log.info("Starting a hadoop determine configuration job...");
if (job.run()) { if (job.run()) {
return HadoopDruidIndexerConfig.jsonMapper.writeValueAsString(HadoopDruidIndexerConfigBuilder.toSchema(config)); return HadoopDruidIndexerConfig.jsonMapper.writeValueAsString(HadoopDruidIndexerConfigBuilder.toSchema(config));
} }

View File

@ -32,6 +32,8 @@ import net.spy.memcached.FailureMode;
import net.spy.memcached.MemcachedClient; import net.spy.memcached.MemcachedClient;
import net.spy.memcached.MemcachedClientIF; import net.spy.memcached.MemcachedClientIF;
import net.spy.memcached.internal.BulkFuture; import net.spy.memcached.internal.BulkFuture;
import net.spy.memcached.ops.LinkedOperationQueueFactory;
import net.spy.memcached.ops.OperationQueueFactory;
import org.apache.commons.codec.digest.DigestUtils; import org.apache.commons.codec.digest.DigestUtils;
import javax.annotation.Nullable; import javax.annotation.Nullable;
@ -56,7 +58,15 @@ public class MemcachedCache implements Cache
// always use compression // always use compression
transcoder.setCompressionThreshold(0); transcoder.setCompressionThreshold(0);
MemcachedOperationQueueFactory queueFactory = new MemcachedOperationQueueFactory(config.getMaxOperationQueueSize());
OperationQueueFactory opQueueFactory;
long maxQueueBytes = config.getMaxOperationQueueSize();
if(maxQueueBytes > 0) {
opQueueFactory = new MemcachedOperationQueueFactory(maxQueueBytes);
} else {
opQueueFactory = new LinkedOperationQueueFactory();
}
return new MemcachedCache( return new MemcachedCache(
new MemcachedClient( new MemcachedClient(
new ConnectionFactoryBuilder().setProtocol(ConnectionFactoryBuilder.Protocol.BINARY) new ConnectionFactoryBuilder().setProtocol(ConnectionFactoryBuilder.Protocol.BINARY)
@ -68,7 +78,8 @@ public class MemcachedCache implements Cache
.setShouldOptimize(true) .setShouldOptimize(true)
.setOpQueueMaxBlockTime(config.getTimeout()) .setOpQueueMaxBlockTime(config.getTimeout())
.setOpTimeout(config.getTimeout()) .setOpTimeout(config.getTimeout())
.setOpQueueFactory(queueFactory) .setReadBufferSize(config.getReadBufferSize())
.setOpQueueFactory(opQueueFactory)
.build(), .build(),
AddrUtil.getAddresses(config.getHosts()) AddrUtil.getAddresses(config.getHosts())
), ),

View File

@ -20,24 +20,38 @@
package io.druid.client.cache; package io.druid.client.cache;
import com.fasterxml.jackson.annotation.JsonProperty; import com.fasterxml.jackson.annotation.JsonProperty;
import net.spy.memcached.DefaultConnectionFactory;
import javax.validation.constraints.NotNull; import javax.validation.constraints.NotNull;
public class MemcachedCacheConfig public class MemcachedCacheConfig
{ {
// default to 30 day expiration for cache entries
// values greater than 30 days are interpreted by memcached as absolute POSIX timestamps instead of duration
@JsonProperty @JsonProperty
private int expiration = 2592000; // What is this number? private int expiration = 30 * 24 * 3600;
@JsonProperty @JsonProperty
private int timeout = 500; private int timeout = 500;
// comma delimited list of memcached servers, given as host:port combination
@JsonProperty @JsonProperty
@NotNull @NotNull
private String hosts; private String hosts;
@JsonProperty @JsonProperty
private int maxObjectSize = 50 * 1024 * 1024; private int maxObjectSize = 50 * 1024 * 1024;
// memcached client read buffer size, -1 uses the spymemcached library default
@JsonProperty
private int readBufferSize = DefaultConnectionFactory.DEFAULT_READ_BUFFER_SIZE;
@JsonProperty @JsonProperty
private String memcachedPrefix = "druid"; private String memcachedPrefix = "druid";
// maximum size in bytes of memcached client operation queue. 0 means unbounded
@JsonProperty @JsonProperty
private long maxOperationQueueSize = 256 * 1024 * 1024L; // 256 MB private long maxOperationQueueSize = 0;
public int getExpiration() public int getExpiration()
{ {
@ -68,4 +82,9 @@ public class MemcachedCacheConfig
{ {
return maxOperationQueueSize; return maxOperationQueueSize;
} }
public int getReadBufferSize()
{
return readBufferSize;
}
} }

View File

@ -20,14 +20,20 @@
package io.druid.cli; package io.druid.cli;
import com.google.api.client.repackaged.com.google.common.base.Throwables; import com.google.api.client.repackaged.com.google.common.base.Throwables;
import com.google.api.client.util.Lists;
import com.metamx.common.logger.Logger; import com.metamx.common.logger.Logger;
import io.airlift.command.Arguments; import io.airlift.command.Arguments;
import io.airlift.command.Command; import io.airlift.command.Command;
import io.druid.indexer.HadoopDruidDetermineConfigurationJob;
import io.druid.indexer.HadoopDruidIndexerConfig; import io.druid.indexer.HadoopDruidIndexerConfig;
import io.druid.indexer.HadoopDruidIndexerConfigBuilder; import io.druid.indexer.HadoopDruidIndexerConfigBuilder;
import io.druid.indexer.HadoopDruidIndexerJob; import io.druid.indexer.HadoopDruidIndexerJob;
import io.druid.indexer.JobHelper;
import io.druid.indexer.Jobby;
import java.io.File; import java.io.File;
import java.util.ArrayList;
import java.util.List;
/** /**
*/ */
@ -37,17 +43,20 @@ import java.io.File;
) )
public class CliInternalHadoopIndexer implements Runnable public class CliInternalHadoopIndexer implements Runnable
{ {
private static final Logger log = new Logger(CliHadoopIndexer.class);
@Arguments(description = "A JSON object or the path to a file that contains a JSON object", required = true) @Arguments(description = "A JSON object or the path to a file that contains a JSON object", required = true)
private String argumentSpec; private String argumentSpec;
private static final Logger log = new Logger(CliHadoopIndexer.class);
@Override @Override
public void run() public void run()
{ {
try { try {
final HadoopDruidIndexerJob job = new HadoopDruidIndexerJob(getHadoopDruidIndexerConfig()); HadoopDruidIndexerConfig config = getHadoopDruidIndexerConfig();
job.run(); List<Jobby> jobs = Lists.newArrayList();
jobs.add(new HadoopDruidDetermineConfigurationJob(config));
jobs.add(new HadoopDruidIndexerJob(config));
JobHelper.runJobs(jobs, config);
} }
catch (Exception e) { catch (Exception e) {
throw Throwables.propagate(e); throw Throwables.propagate(e);