mirror of https://github.com/apache/druid.git
File.deleteOnExit() (#3923)
* Less use of File.deleteOnExit() * removed deleteOnExit from most of the tests/benchmarks/iopeon * Made IOpeon closable * Formatting. * Revert DeterminePartitionsJobTest, remove cleanup method from IOPeon
This commit is contained in:
parent
9dfcf0763a
commit
8854ce018e
|
@ -26,7 +26,6 @@ import com.google.common.base.Strings;
|
||||||
import com.google.common.collect.Lists;
|
import com.google.common.collect.Lists;
|
||||||
import com.google.common.hash.Hashing;
|
import com.google.common.hash.Hashing;
|
||||||
import com.google.common.io.Files;
|
import com.google.common.io.Files;
|
||||||
|
|
||||||
import io.druid.benchmark.datagen.BenchmarkDataGenerator;
|
import io.druid.benchmark.datagen.BenchmarkDataGenerator;
|
||||||
import io.druid.benchmark.datagen.BenchmarkSchemaInfo;
|
import io.druid.benchmark.datagen.BenchmarkSchemaInfo;
|
||||||
import io.druid.benchmark.datagen.BenchmarkSchemas;
|
import io.druid.benchmark.datagen.BenchmarkSchemas;
|
||||||
|
@ -76,6 +75,7 @@ import io.druid.segment.incremental.IncrementalIndex;
|
||||||
import io.druid.segment.incremental.IncrementalIndexSchema;
|
import io.druid.segment.incremental.IncrementalIndexSchema;
|
||||||
import io.druid.segment.incremental.OnheapIncrementalIndex;
|
import io.druid.segment.incremental.OnheapIncrementalIndex;
|
||||||
import io.druid.segment.serde.ComplexMetrics;
|
import io.druid.segment.serde.ComplexMetrics;
|
||||||
|
import org.apache.commons.io.FileUtils;
|
||||||
import org.joda.time.Interval;
|
import org.joda.time.Interval;
|
||||||
import org.openjdk.jmh.annotations.Benchmark;
|
import org.openjdk.jmh.annotations.Benchmark;
|
||||||
import org.openjdk.jmh.annotations.BenchmarkMode;
|
import org.openjdk.jmh.annotations.BenchmarkMode;
|
||||||
|
@ -87,6 +87,7 @@ import org.openjdk.jmh.annotations.Param;
|
||||||
import org.openjdk.jmh.annotations.Scope;
|
import org.openjdk.jmh.annotations.Scope;
|
||||||
import org.openjdk.jmh.annotations.Setup;
|
import org.openjdk.jmh.annotations.Setup;
|
||||||
import org.openjdk.jmh.annotations.State;
|
import org.openjdk.jmh.annotations.State;
|
||||||
|
import org.openjdk.jmh.annotations.TearDown;
|
||||||
import org.openjdk.jmh.annotations.Warmup;
|
import org.openjdk.jmh.annotations.Warmup;
|
||||||
import org.openjdk.jmh.infra.Blackhole;
|
import org.openjdk.jmh.infra.Blackhole;
|
||||||
|
|
||||||
|
@ -118,6 +119,7 @@ public class FilterPartitionBenchmark
|
||||||
private IncrementalIndex incIndex;
|
private IncrementalIndex incIndex;
|
||||||
private QueryableIndex qIndex;
|
private QueryableIndex qIndex;
|
||||||
private File indexFile;
|
private File indexFile;
|
||||||
|
private File tmpDir;
|
||||||
|
|
||||||
private Filter timeFilterNone;
|
private Filter timeFilterNone;
|
||||||
private Filter timeFilterHalf;
|
private Filter timeFilterHalf;
|
||||||
|
@ -172,13 +174,12 @@ public class FilterPartitionBenchmark
|
||||||
incIndex.add(row);
|
incIndex.add(row);
|
||||||
}
|
}
|
||||||
|
|
||||||
File tmpFile = Files.createTempDir();
|
tmpDir = Files.createTempDir();
|
||||||
log.info("Using temp dir: " + tmpFile.getAbsolutePath());
|
log.info("Using temp dir: " + tmpDir.getAbsolutePath());
|
||||||
tmpFile.deleteOnExit();
|
|
||||||
|
|
||||||
indexFile = INDEX_MERGER_V9.persist(
|
indexFile = INDEX_MERGER_V9.persist(
|
||||||
incIndex,
|
incIndex,
|
||||||
tmpFile,
|
tmpDir,
|
||||||
new IndexSpec()
|
new IndexSpec()
|
||||||
);
|
);
|
||||||
qIndex = INDEX_IO.loadIndex(indexFile);
|
qIndex = INDEX_IO.loadIndex(indexFile);
|
||||||
|
@ -219,6 +220,12 @@ public class FilterPartitionBenchmark
|
||||||
));
|
));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@TearDown
|
||||||
|
public void tearDown() throws IOException
|
||||||
|
{
|
||||||
|
FileUtils.deleteDirectory(tmpDir);
|
||||||
|
}
|
||||||
|
|
||||||
private IncrementalIndex makeIncIndex()
|
private IncrementalIndex makeIncIndex()
|
||||||
{
|
{
|
||||||
return new OnheapIncrementalIndex(
|
return new OnheapIncrementalIndex(
|
||||||
|
|
|
@ -24,7 +24,6 @@ import com.google.common.collect.Lists;
|
||||||
import com.google.common.collect.Maps;
|
import com.google.common.collect.Maps;
|
||||||
import com.google.common.hash.Hashing;
|
import com.google.common.hash.Hashing;
|
||||||
import com.google.common.io.Files;
|
import com.google.common.io.Files;
|
||||||
|
|
||||||
import io.druid.benchmark.datagen.BenchmarkDataGenerator;
|
import io.druid.benchmark.datagen.BenchmarkDataGenerator;
|
||||||
import io.druid.benchmark.datagen.BenchmarkSchemaInfo;
|
import io.druid.benchmark.datagen.BenchmarkSchemaInfo;
|
||||||
import io.druid.benchmark.datagen.BenchmarkSchemas;
|
import io.druid.benchmark.datagen.BenchmarkSchemas;
|
||||||
|
@ -77,6 +76,7 @@ import io.druid.segment.incremental.IncrementalIndex;
|
||||||
import io.druid.segment.incremental.IncrementalIndexSchema;
|
import io.druid.segment.incremental.IncrementalIndexSchema;
|
||||||
import io.druid.segment.incremental.OnheapIncrementalIndex;
|
import io.druid.segment.incremental.OnheapIncrementalIndex;
|
||||||
import io.druid.segment.serde.ComplexMetrics;
|
import io.druid.segment.serde.ComplexMetrics;
|
||||||
|
import org.apache.commons.io.FileUtils;
|
||||||
import org.openjdk.jmh.annotations.Benchmark;
|
import org.openjdk.jmh.annotations.Benchmark;
|
||||||
import org.openjdk.jmh.annotations.BenchmarkMode;
|
import org.openjdk.jmh.annotations.BenchmarkMode;
|
||||||
import org.openjdk.jmh.annotations.Fork;
|
import org.openjdk.jmh.annotations.Fork;
|
||||||
|
@ -87,6 +87,7 @@ import org.openjdk.jmh.annotations.Param;
|
||||||
import org.openjdk.jmh.annotations.Scope;
|
import org.openjdk.jmh.annotations.Scope;
|
||||||
import org.openjdk.jmh.annotations.Setup;
|
import org.openjdk.jmh.annotations.Setup;
|
||||||
import org.openjdk.jmh.annotations.State;
|
import org.openjdk.jmh.annotations.State;
|
||||||
|
import org.openjdk.jmh.annotations.TearDown;
|
||||||
import org.openjdk.jmh.annotations.Warmup;
|
import org.openjdk.jmh.annotations.Warmup;
|
||||||
import org.openjdk.jmh.infra.Blackhole;
|
import org.openjdk.jmh.infra.Blackhole;
|
||||||
|
|
||||||
|
@ -124,6 +125,7 @@ public class FilteredAggregatorBenchmark
|
||||||
private QueryRunnerFactory factory;
|
private QueryRunnerFactory factory;
|
||||||
private BenchmarkSchemaInfo schemaInfo;
|
private BenchmarkSchemaInfo schemaInfo;
|
||||||
private TimeseriesQuery query;
|
private TimeseriesQuery query;
|
||||||
|
private File tmpDir;
|
||||||
|
|
||||||
private static String JS_FN = "function(str) { return 'super-' + str; }";
|
private static String JS_FN = "function(str) { return 'super-' + str; }";
|
||||||
private static ExtractionFn JS_EXTRACTION_FN = new JavaScriptExtractionFn(JS_FN, false, JavaScriptConfig.getEnabledInstance());
|
private static ExtractionFn JS_EXTRACTION_FN = new JavaScriptExtractionFn(JS_FN, false, JavaScriptConfig.getEnabledInstance());
|
||||||
|
@ -187,13 +189,12 @@ public class FilteredAggregatorBenchmark
|
||||||
inputRows.add(row);
|
inputRows.add(row);
|
||||||
}
|
}
|
||||||
|
|
||||||
File tmpFile = Files.createTempDir();
|
tmpDir = Files.createTempDir();
|
||||||
log.info("Using temp dir: " + tmpFile.getAbsolutePath());
|
log.info("Using temp dir: " + tmpDir.getAbsolutePath());
|
||||||
tmpFile.deleteOnExit();
|
|
||||||
|
|
||||||
indexFile = INDEX_MERGER_V9.persist(
|
indexFile = INDEX_MERGER_V9.persist(
|
||||||
incIndex,
|
incIndex,
|
||||||
tmpFile,
|
tmpDir,
|
||||||
new IndexSpec()
|
new IndexSpec()
|
||||||
);
|
);
|
||||||
qIndex = INDEX_IO.loadIndex(indexFile);
|
qIndex = INDEX_IO.loadIndex(indexFile);
|
||||||
|
@ -220,6 +221,12 @@ public class FilteredAggregatorBenchmark
|
||||||
.build();
|
.build();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@TearDown
|
||||||
|
public void tearDown() throws IOException
|
||||||
|
{
|
||||||
|
FileUtils.deleteDirectory(tmpDir);
|
||||||
|
}
|
||||||
|
|
||||||
private IncrementalIndex makeIncIndex(AggregatorFactory[] metrics)
|
private IncrementalIndex makeIncIndex(AggregatorFactory[] metrics)
|
||||||
{
|
{
|
||||||
return new OnheapIncrementalIndex(
|
return new OnheapIncrementalIndex(
|
||||||
|
|
|
@ -184,7 +184,7 @@ public class FloatCompressionBenchmarkFileGenerator
|
||||||
output.write(ByteBuffer.wrap(baos.toByteArray()));
|
output.write(ByteBuffer.wrap(baos.toByteArray()));
|
||||||
}
|
}
|
||||||
finally {
|
finally {
|
||||||
iopeon.cleanup();
|
iopeon.close();
|
||||||
br.close();
|
br.close();
|
||||||
}
|
}
|
||||||
System.out.print(compFile.length() / 1024 + "\n");
|
System.out.print(compFile.length() / 1024 + "\n");
|
||||||
|
|
|
@ -177,7 +177,7 @@ public class LongCompressionBenchmarkFileGenerator
|
||||||
output.write(ByteBuffer.wrap(baos.toByteArray()));
|
output.write(ByteBuffer.wrap(baos.toByteArray()));
|
||||||
}
|
}
|
||||||
finally {
|
finally {
|
||||||
iopeon.cleanup();
|
iopeon.close();
|
||||||
br.close();
|
br.close();
|
||||||
}
|
}
|
||||||
System.out.print(compFile.length() / 1024 + "\n");
|
System.out.print(compFile.length() / 1024 + "\n");
|
||||||
|
|
|
@ -22,7 +22,6 @@ package io.druid.benchmark.indexing;
|
||||||
import com.fasterxml.jackson.databind.ObjectMapper;
|
import com.fasterxml.jackson.databind.ObjectMapper;
|
||||||
import com.google.common.hash.Hashing;
|
import com.google.common.hash.Hashing;
|
||||||
import com.google.common.io.Files;
|
import com.google.common.io.Files;
|
||||||
|
|
||||||
import io.druid.benchmark.datagen.BenchmarkDataGenerator;
|
import io.druid.benchmark.datagen.BenchmarkDataGenerator;
|
||||||
import io.druid.benchmark.datagen.BenchmarkSchemaInfo;
|
import io.druid.benchmark.datagen.BenchmarkSchemaInfo;
|
||||||
import io.druid.benchmark.datagen.BenchmarkSchemas;
|
import io.druid.benchmark.datagen.BenchmarkSchemas;
|
||||||
|
@ -42,6 +41,7 @@ import io.druid.segment.incremental.IncrementalIndex;
|
||||||
import io.druid.segment.incremental.IncrementalIndexSchema;
|
import io.druid.segment.incremental.IncrementalIndexSchema;
|
||||||
import io.druid.segment.incremental.OnheapIncrementalIndex;
|
import io.druid.segment.incremental.OnheapIncrementalIndex;
|
||||||
import io.druid.segment.serde.ComplexMetrics;
|
import io.druid.segment.serde.ComplexMetrics;
|
||||||
|
import org.apache.commons.io.FileUtils;
|
||||||
import org.openjdk.jmh.annotations.Benchmark;
|
import org.openjdk.jmh.annotations.Benchmark;
|
||||||
import org.openjdk.jmh.annotations.BenchmarkMode;
|
import org.openjdk.jmh.annotations.BenchmarkMode;
|
||||||
import org.openjdk.jmh.annotations.Fork;
|
import org.openjdk.jmh.annotations.Fork;
|
||||||
|
@ -52,6 +52,7 @@ import org.openjdk.jmh.annotations.Param;
|
||||||
import org.openjdk.jmh.annotations.Scope;
|
import org.openjdk.jmh.annotations.Scope;
|
||||||
import org.openjdk.jmh.annotations.Setup;
|
import org.openjdk.jmh.annotations.Setup;
|
||||||
import org.openjdk.jmh.annotations.State;
|
import org.openjdk.jmh.annotations.State;
|
||||||
|
import org.openjdk.jmh.annotations.TearDown;
|
||||||
import org.openjdk.jmh.annotations.Warmup;
|
import org.openjdk.jmh.annotations.Warmup;
|
||||||
import org.openjdk.jmh.infra.Blackhole;
|
import org.openjdk.jmh.infra.Blackhole;
|
||||||
|
|
||||||
|
@ -88,6 +89,7 @@ public class IndexMergeBenchmark
|
||||||
|
|
||||||
private List<QueryableIndex> indexesToMerge;
|
private List<QueryableIndex> indexesToMerge;
|
||||||
private BenchmarkSchemaInfo schemaInfo;
|
private BenchmarkSchemaInfo schemaInfo;
|
||||||
|
private File tmpDir;
|
||||||
|
|
||||||
static {
|
static {
|
||||||
JSON_MAPPER = new DefaultObjectMapper();
|
JSON_MAPPER = new DefaultObjectMapper();
|
||||||
|
@ -137,13 +139,12 @@ public class IndexMergeBenchmark
|
||||||
incIndex.add(row);
|
incIndex.add(row);
|
||||||
}
|
}
|
||||||
|
|
||||||
File tmpFile = Files.createTempDir();
|
tmpDir = Files.createTempDir();
|
||||||
log.info("Using temp dir: " + tmpFile.getAbsolutePath());
|
log.info("Using temp dir: " + tmpDir.getAbsolutePath());
|
||||||
tmpFile.deleteOnExit();
|
|
||||||
|
|
||||||
File indexFile = INDEX_MERGER_V9.persist(
|
File indexFile = INDEX_MERGER_V9.persist(
|
||||||
incIndex,
|
incIndex,
|
||||||
tmpFile,
|
tmpDir,
|
||||||
new IndexSpec()
|
new IndexSpec()
|
||||||
);
|
);
|
||||||
|
|
||||||
|
@ -152,6 +153,12 @@ public class IndexMergeBenchmark
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@TearDown
|
||||||
|
public void tearDown() throws IOException
|
||||||
|
{
|
||||||
|
FileUtils.deleteDirectory(tmpDir);
|
||||||
|
}
|
||||||
|
|
||||||
private IncrementalIndex makeIncIndex()
|
private IncrementalIndex makeIncIndex()
|
||||||
{
|
{
|
||||||
return new OnheapIncrementalIndex(
|
return new OnheapIncrementalIndex(
|
||||||
|
@ -176,16 +183,25 @@ public class IndexMergeBenchmark
|
||||||
File tmpFile = File.createTempFile("IndexMergeBenchmark-MERGEDFILE-" + System.currentTimeMillis(), ".TEMPFILE");
|
File tmpFile = File.createTempFile("IndexMergeBenchmark-MERGEDFILE-" + System.currentTimeMillis(), ".TEMPFILE");
|
||||||
tmpFile.delete();
|
tmpFile.delete();
|
||||||
tmpFile.mkdirs();
|
tmpFile.mkdirs();
|
||||||
|
try {
|
||||||
log.info(tmpFile.getAbsolutePath() + " isFile: " + tmpFile.isFile() + " isDir:" + tmpFile.isDirectory());
|
log.info(tmpFile.getAbsolutePath() + " isFile: " + tmpFile.isFile() + " isDir:" + tmpFile.isDirectory());
|
||||||
tmpFile.deleteOnExit();
|
|
||||||
|
|
||||||
File mergedFile = INDEX_MERGER.mergeQueryableIndex(indexesToMerge, rollup, schemaInfo.getAggsArray(), tmpFile, new IndexSpec());
|
File mergedFile = INDEX_MERGER.mergeQueryableIndex(
|
||||||
|
indexesToMerge,
|
||||||
|
rollup,
|
||||||
|
schemaInfo.getAggsArray(),
|
||||||
|
tmpFile,
|
||||||
|
new IndexSpec()
|
||||||
|
);
|
||||||
|
|
||||||
blackhole.consume(mergedFile);
|
blackhole.consume(mergedFile);
|
||||||
|
}
|
||||||
|
finally {
|
||||||
tmpFile.delete();
|
tmpFile.delete();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
@Benchmark
|
@Benchmark
|
||||||
@BenchmarkMode(Mode.AverageTime)
|
@BenchmarkMode(Mode.AverageTime)
|
||||||
@OutputTimeUnit(TimeUnit.MICROSECONDS)
|
@OutputTimeUnit(TimeUnit.MICROSECONDS)
|
||||||
|
@ -194,13 +210,23 @@ public class IndexMergeBenchmark
|
||||||
File tmpFile = File.createTempFile("IndexMergeBenchmark-MERGEDFILE-V9-" + System.currentTimeMillis(), ".TEMPFILE");
|
File tmpFile = File.createTempFile("IndexMergeBenchmark-MERGEDFILE-V9-" + System.currentTimeMillis(), ".TEMPFILE");
|
||||||
tmpFile.delete();
|
tmpFile.delete();
|
||||||
tmpFile.mkdirs();
|
tmpFile.mkdirs();
|
||||||
|
try {
|
||||||
log.info(tmpFile.getAbsolutePath() + " isFile: " + tmpFile.isFile() + " isDir:" + tmpFile.isDirectory());
|
log.info(tmpFile.getAbsolutePath() + " isFile: " + tmpFile.isFile() + " isDir:" + tmpFile.isDirectory());
|
||||||
tmpFile.deleteOnExit();
|
|
||||||
|
|
||||||
File mergedFile = INDEX_MERGER_V9.mergeQueryableIndex(indexesToMerge, rollup, schemaInfo.getAggsArray(), tmpFile, new IndexSpec());
|
File mergedFile = INDEX_MERGER_V9.mergeQueryableIndex(
|
||||||
|
indexesToMerge,
|
||||||
|
rollup,
|
||||||
|
schemaInfo.getAggsArray(),
|
||||||
|
tmpFile,
|
||||||
|
new IndexSpec()
|
||||||
|
);
|
||||||
|
|
||||||
blackhole.consume(mergedFile);
|
blackhole.consume(mergedFile);
|
||||||
|
}
|
||||||
|
finally {
|
||||||
tmpFile.delete();
|
tmpFile.delete();
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -22,7 +22,6 @@ package io.druid.benchmark.indexing;
|
||||||
import com.fasterxml.jackson.databind.ObjectMapper;
|
import com.fasterxml.jackson.databind.ObjectMapper;
|
||||||
import com.google.common.hash.Hashing;
|
import com.google.common.hash.Hashing;
|
||||||
import com.google.common.io.Files;
|
import com.google.common.io.Files;
|
||||||
|
|
||||||
import io.druid.benchmark.datagen.BenchmarkDataGenerator;
|
import io.druid.benchmark.datagen.BenchmarkDataGenerator;
|
||||||
import io.druid.benchmark.datagen.BenchmarkSchemaInfo;
|
import io.druid.benchmark.datagen.BenchmarkSchemaInfo;
|
||||||
import io.druid.benchmark.datagen.BenchmarkSchemas;
|
import io.druid.benchmark.datagen.BenchmarkSchemas;
|
||||||
|
@ -41,6 +40,7 @@ import io.druid.segment.incremental.IncrementalIndex;
|
||||||
import io.druid.segment.incremental.IncrementalIndexSchema;
|
import io.druid.segment.incremental.IncrementalIndexSchema;
|
||||||
import io.druid.segment.incremental.OnheapIncrementalIndex;
|
import io.druid.segment.incremental.OnheapIncrementalIndex;
|
||||||
import io.druid.segment.serde.ComplexMetrics;
|
import io.druid.segment.serde.ComplexMetrics;
|
||||||
|
import org.apache.commons.io.FileUtils;
|
||||||
import org.openjdk.jmh.annotations.Benchmark;
|
import org.openjdk.jmh.annotations.Benchmark;
|
||||||
import org.openjdk.jmh.annotations.BenchmarkMode;
|
import org.openjdk.jmh.annotations.BenchmarkMode;
|
||||||
import org.openjdk.jmh.annotations.Fork;
|
import org.openjdk.jmh.annotations.Fork;
|
||||||
|
@ -174,19 +174,21 @@ public class IndexPersistBenchmark
|
||||||
@OutputTimeUnit(TimeUnit.MICROSECONDS)
|
@OutputTimeUnit(TimeUnit.MICROSECONDS)
|
||||||
public void persist(Blackhole blackhole) throws Exception
|
public void persist(Blackhole blackhole) throws Exception
|
||||||
{
|
{
|
||||||
File tmpFile = Files.createTempDir();
|
File tmpDir = Files.createTempDir();
|
||||||
log.info("Using temp dir: " + tmpFile.getAbsolutePath());
|
log.info("Using temp dir: " + tmpDir.getAbsolutePath());
|
||||||
tmpFile.deleteOnExit();
|
try {
|
||||||
|
|
||||||
File indexFile = INDEX_MERGER.persist(
|
File indexFile = INDEX_MERGER.persist(
|
||||||
incIndex,
|
incIndex,
|
||||||
tmpFile,
|
tmpDir,
|
||||||
new IndexSpec()
|
new IndexSpec()
|
||||||
);
|
);
|
||||||
|
|
||||||
blackhole.consume(indexFile);
|
blackhole.consume(indexFile);
|
||||||
|
}
|
||||||
|
finally {
|
||||||
|
FileUtils.deleteDirectory(tmpDir);
|
||||||
|
}
|
||||||
|
|
||||||
tmpFile.delete();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@Benchmark
|
@Benchmark
|
||||||
|
@ -194,18 +196,20 @@ public class IndexPersistBenchmark
|
||||||
@OutputTimeUnit(TimeUnit.MICROSECONDS)
|
@OutputTimeUnit(TimeUnit.MICROSECONDS)
|
||||||
public void persistV9(Blackhole blackhole) throws Exception
|
public void persistV9(Blackhole blackhole) throws Exception
|
||||||
{
|
{
|
||||||
File tmpFile = Files.createTempDir();
|
File tmpDir = Files.createTempDir();
|
||||||
log.info("Using temp dir: " + tmpFile.getAbsolutePath());
|
log.info("Using temp dir: " + tmpDir.getAbsolutePath());
|
||||||
tmpFile.deleteOnExit();;
|
try {
|
||||||
|
|
||||||
File indexFile = INDEX_MERGER_V9.persist(
|
File indexFile = INDEX_MERGER_V9.persist(
|
||||||
incIndex,
|
incIndex,
|
||||||
tmpFile,
|
tmpDir,
|
||||||
new IndexSpec()
|
new IndexSpec()
|
||||||
);
|
);
|
||||||
|
|
||||||
blackhole.consume(indexFile);
|
blackhole.consume(indexFile);
|
||||||
|
|
||||||
tmpFile.delete();
|
}
|
||||||
|
finally {
|
||||||
|
FileUtils.deleteDirectory(tmpDir);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -79,6 +79,7 @@ import io.druid.segment.incremental.IncrementalIndex;
|
||||||
import io.druid.segment.incremental.IncrementalIndexSchema;
|
import io.druid.segment.incremental.IncrementalIndexSchema;
|
||||||
import io.druid.segment.incremental.OnheapIncrementalIndex;
|
import io.druid.segment.incremental.OnheapIncrementalIndex;
|
||||||
import io.druid.segment.serde.ComplexMetrics;
|
import io.druid.segment.serde.ComplexMetrics;
|
||||||
|
import org.apache.commons.io.FileUtils;
|
||||||
import org.openjdk.jmh.annotations.Benchmark;
|
import org.openjdk.jmh.annotations.Benchmark;
|
||||||
import org.openjdk.jmh.annotations.BenchmarkMode;
|
import org.openjdk.jmh.annotations.BenchmarkMode;
|
||||||
import org.openjdk.jmh.annotations.Fork;
|
import org.openjdk.jmh.annotations.Fork;
|
||||||
|
@ -89,6 +90,7 @@ import org.openjdk.jmh.annotations.Param;
|
||||||
import org.openjdk.jmh.annotations.Scope;
|
import org.openjdk.jmh.annotations.Scope;
|
||||||
import org.openjdk.jmh.annotations.Setup;
|
import org.openjdk.jmh.annotations.Setup;
|
||||||
import org.openjdk.jmh.annotations.State;
|
import org.openjdk.jmh.annotations.State;
|
||||||
|
import org.openjdk.jmh.annotations.TearDown;
|
||||||
import org.openjdk.jmh.annotations.Warmup;
|
import org.openjdk.jmh.annotations.Warmup;
|
||||||
import org.openjdk.jmh.infra.Blackhole;
|
import org.openjdk.jmh.infra.Blackhole;
|
||||||
|
|
||||||
|
@ -132,6 +134,7 @@ public class SearchBenchmark
|
||||||
private BenchmarkSchemaInfo schemaInfo;
|
private BenchmarkSchemaInfo schemaInfo;
|
||||||
private Druids.SearchQueryBuilder queryBuilder;
|
private Druids.SearchQueryBuilder queryBuilder;
|
||||||
private SearchQuery query;
|
private SearchQuery query;
|
||||||
|
private File tmpDir;
|
||||||
|
|
||||||
private ExecutorService executorService;
|
private ExecutorService executorService;
|
||||||
|
|
||||||
|
@ -351,15 +354,14 @@ public class SearchBenchmark
|
||||||
incIndexes.add(incIndex);
|
incIndexes.add(incIndex);
|
||||||
}
|
}
|
||||||
|
|
||||||
File tmpFile = Files.createTempDir();
|
tmpDir = Files.createTempDir();
|
||||||
log.info("Using temp dir: " + tmpFile.getAbsolutePath());
|
log.info("Using temp dir: " + tmpDir.getAbsolutePath());
|
||||||
tmpFile.deleteOnExit();
|
|
||||||
|
|
||||||
qIndexes = new ArrayList<>();
|
qIndexes = new ArrayList<>();
|
||||||
for (int i = 0; i < numSegments; i++) {
|
for (int i = 0; i < numSegments; i++) {
|
||||||
File indexFile = INDEX_MERGER_V9.persist(
|
File indexFile = INDEX_MERGER_V9.persist(
|
||||||
incIndexes.get(i),
|
incIndexes.get(i),
|
||||||
tmpFile,
|
tmpDir,
|
||||||
new IndexSpec()
|
new IndexSpec()
|
||||||
);
|
);
|
||||||
|
|
||||||
|
@ -378,6 +380,12 @@ public class SearchBenchmark
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@TearDown
|
||||||
|
public void tearDown() throws IOException
|
||||||
|
{
|
||||||
|
FileUtils.deleteDirectory(tmpDir);
|
||||||
|
}
|
||||||
|
|
||||||
private IncrementalIndex makeIncIndex()
|
private IncrementalIndex makeIncIndex()
|
||||||
{
|
{
|
||||||
return new OnheapIncrementalIndex(
|
return new OnheapIncrementalIndex(
|
||||||
|
|
|
@ -67,6 +67,7 @@ import io.druid.segment.incremental.IncrementalIndex;
|
||||||
import io.druid.segment.incremental.IncrementalIndexSchema;
|
import io.druid.segment.incremental.IncrementalIndexSchema;
|
||||||
import io.druid.segment.incremental.OnheapIncrementalIndex;
|
import io.druid.segment.incremental.OnheapIncrementalIndex;
|
||||||
import io.druid.segment.serde.ComplexMetrics;
|
import io.druid.segment.serde.ComplexMetrics;
|
||||||
|
import org.apache.commons.io.FileUtils;
|
||||||
import org.openjdk.jmh.annotations.Benchmark;
|
import org.openjdk.jmh.annotations.Benchmark;
|
||||||
import org.openjdk.jmh.annotations.BenchmarkMode;
|
import org.openjdk.jmh.annotations.BenchmarkMode;
|
||||||
import org.openjdk.jmh.annotations.Fork;
|
import org.openjdk.jmh.annotations.Fork;
|
||||||
|
@ -77,6 +78,7 @@ import org.openjdk.jmh.annotations.Param;
|
||||||
import org.openjdk.jmh.annotations.Scope;
|
import org.openjdk.jmh.annotations.Scope;
|
||||||
import org.openjdk.jmh.annotations.Setup;
|
import org.openjdk.jmh.annotations.Setup;
|
||||||
import org.openjdk.jmh.annotations.State;
|
import org.openjdk.jmh.annotations.State;
|
||||||
|
import org.openjdk.jmh.annotations.TearDown;
|
||||||
import org.openjdk.jmh.annotations.Warmup;
|
import org.openjdk.jmh.annotations.Warmup;
|
||||||
import org.openjdk.jmh.infra.Blackhole;
|
import org.openjdk.jmh.infra.Blackhole;
|
||||||
|
|
||||||
|
@ -123,6 +125,7 @@ public class SelectBenchmark
|
||||||
private BenchmarkSchemaInfo schemaInfo;
|
private BenchmarkSchemaInfo schemaInfo;
|
||||||
private Druids.SelectQueryBuilder queryBuilder;
|
private Druids.SelectQueryBuilder queryBuilder;
|
||||||
private SelectQuery query;
|
private SelectQuery query;
|
||||||
|
private File tmpDir;
|
||||||
|
|
||||||
private ExecutorService executorService;
|
private ExecutorService executorService;
|
||||||
|
|
||||||
|
@ -211,15 +214,14 @@ public class SelectBenchmark
|
||||||
incIndexes.add(incIndex);
|
incIndexes.add(incIndex);
|
||||||
}
|
}
|
||||||
|
|
||||||
File tmpFile = Files.createTempDir();
|
tmpDir = Files.createTempDir();
|
||||||
log.info("Using temp dir: " + tmpFile.getAbsolutePath());
|
log.info("Using temp dir: " + tmpDir.getAbsolutePath());
|
||||||
tmpFile.deleteOnExit();
|
|
||||||
|
|
||||||
qIndexes = new ArrayList<>();
|
qIndexes = new ArrayList<>();
|
||||||
for (int i = 0; i < numSegments; i++) {
|
for (int i = 0; i < numSegments; i++) {
|
||||||
File indexFile = INDEX_MERGER_V9.persist(
|
File indexFile = INDEX_MERGER_V9.persist(
|
||||||
incIndexes.get(i),
|
incIndexes.get(i),
|
||||||
tmpFile,
|
tmpDir,
|
||||||
new IndexSpec()
|
new IndexSpec()
|
||||||
);
|
);
|
||||||
QueryableIndex qIndex = INDEX_IO.loadIndex(indexFile);
|
QueryableIndex qIndex = INDEX_IO.loadIndex(indexFile);
|
||||||
|
@ -236,6 +238,12 @@ public class SelectBenchmark
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@TearDown
|
||||||
|
public void tearDown() throws IOException
|
||||||
|
{
|
||||||
|
FileUtils.deleteDirectory(tmpDir);
|
||||||
|
}
|
||||||
|
|
||||||
private IncrementalIndex makeIncIndex()
|
private IncrementalIndex makeIncIndex()
|
||||||
{
|
{
|
||||||
return new OnheapIncrementalIndex(
|
return new OnheapIncrementalIndex(
|
||||||
|
|
|
@ -24,7 +24,6 @@ import com.google.common.collect.Lists;
|
||||||
import com.google.common.collect.Maps;
|
import com.google.common.collect.Maps;
|
||||||
import com.google.common.hash.Hashing;
|
import com.google.common.hash.Hashing;
|
||||||
import com.google.common.io.Files;
|
import com.google.common.io.Files;
|
||||||
|
|
||||||
import io.druid.benchmark.datagen.BenchmarkDataGenerator;
|
import io.druid.benchmark.datagen.BenchmarkDataGenerator;
|
||||||
import io.druid.benchmark.datagen.BenchmarkSchemaInfo;
|
import io.druid.benchmark.datagen.BenchmarkSchemaInfo;
|
||||||
import io.druid.benchmark.datagen.BenchmarkSchemas;
|
import io.druid.benchmark.datagen.BenchmarkSchemas;
|
||||||
|
@ -74,6 +73,7 @@ import io.druid.segment.incremental.IncrementalIndex;
|
||||||
import io.druid.segment.incremental.IncrementalIndexSchema;
|
import io.druid.segment.incremental.IncrementalIndexSchema;
|
||||||
import io.druid.segment.incremental.OnheapIncrementalIndex;
|
import io.druid.segment.incremental.OnheapIncrementalIndex;
|
||||||
import io.druid.segment.serde.ComplexMetrics;
|
import io.druid.segment.serde.ComplexMetrics;
|
||||||
|
import org.apache.commons.io.FileUtils;
|
||||||
import org.joda.time.Interval;
|
import org.joda.time.Interval;
|
||||||
import org.openjdk.jmh.annotations.Benchmark;
|
import org.openjdk.jmh.annotations.Benchmark;
|
||||||
import org.openjdk.jmh.annotations.BenchmarkMode;
|
import org.openjdk.jmh.annotations.BenchmarkMode;
|
||||||
|
@ -85,6 +85,7 @@ import org.openjdk.jmh.annotations.Param;
|
||||||
import org.openjdk.jmh.annotations.Scope;
|
import org.openjdk.jmh.annotations.Scope;
|
||||||
import org.openjdk.jmh.annotations.Setup;
|
import org.openjdk.jmh.annotations.Setup;
|
||||||
import org.openjdk.jmh.annotations.State;
|
import org.openjdk.jmh.annotations.State;
|
||||||
|
import org.openjdk.jmh.annotations.TearDown;
|
||||||
import org.openjdk.jmh.annotations.Warmup;
|
import org.openjdk.jmh.annotations.Warmup;
|
||||||
import org.openjdk.jmh.infra.Blackhole;
|
import org.openjdk.jmh.infra.Blackhole;
|
||||||
|
|
||||||
|
@ -121,6 +122,7 @@ public class TimeseriesBenchmark
|
||||||
|
|
||||||
private List<IncrementalIndex> incIndexes;
|
private List<IncrementalIndex> incIndexes;
|
||||||
private List<QueryableIndex> qIndexes;
|
private List<QueryableIndex> qIndexes;
|
||||||
|
private File tmpDir;
|
||||||
|
|
||||||
private QueryRunnerFactory factory;
|
private QueryRunnerFactory factory;
|
||||||
private BenchmarkSchemaInfo schemaInfo;
|
private BenchmarkSchemaInfo schemaInfo;
|
||||||
|
@ -278,15 +280,14 @@ public class TimeseriesBenchmark
|
||||||
incIndexes.add(incIndex);
|
incIndexes.add(incIndex);
|
||||||
}
|
}
|
||||||
|
|
||||||
File tmpFile = Files.createTempDir();
|
tmpDir = Files.createTempDir();
|
||||||
log.info("Using temp dir: " + tmpFile.getAbsolutePath());
|
log.info("Using temp dir: " + tmpDir.getAbsolutePath());
|
||||||
tmpFile.deleteOnExit();
|
|
||||||
|
|
||||||
qIndexes = new ArrayList<>();
|
qIndexes = new ArrayList<>();
|
||||||
for (int i = 0; i < numSegments; i++) {
|
for (int i = 0; i < numSegments; i++) {
|
||||||
File indexFile = INDEX_MERGER_V9.persist(
|
File indexFile = INDEX_MERGER_V9.persist(
|
||||||
incIndexes.get(i),
|
incIndexes.get(i),
|
||||||
tmpFile,
|
tmpDir,
|
||||||
new IndexSpec()
|
new IndexSpec()
|
||||||
);
|
);
|
||||||
|
|
||||||
|
@ -303,6 +304,12 @@ public class TimeseriesBenchmark
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@TearDown
|
||||||
|
public void tearDown() throws IOException
|
||||||
|
{
|
||||||
|
FileUtils.deleteDirectory(tmpDir);
|
||||||
|
}
|
||||||
|
|
||||||
private IncrementalIndex makeIncIndex()
|
private IncrementalIndex makeIncIndex()
|
||||||
{
|
{
|
||||||
return new OnheapIncrementalIndex(
|
return new OnheapIncrementalIndex(
|
||||||
|
|
|
@ -24,7 +24,6 @@ import com.google.common.collect.Lists;
|
||||||
import com.google.common.collect.Maps;
|
import com.google.common.collect.Maps;
|
||||||
import com.google.common.hash.Hashing;
|
import com.google.common.hash.Hashing;
|
||||||
import com.google.common.io.Files;
|
import com.google.common.io.Files;
|
||||||
|
|
||||||
import io.druid.benchmark.datagen.BenchmarkDataGenerator;
|
import io.druid.benchmark.datagen.BenchmarkDataGenerator;
|
||||||
import io.druid.benchmark.datagen.BenchmarkSchemaInfo;
|
import io.druid.benchmark.datagen.BenchmarkSchemaInfo;
|
||||||
import io.druid.benchmark.datagen.BenchmarkSchemas;
|
import io.druid.benchmark.datagen.BenchmarkSchemas;
|
||||||
|
@ -72,6 +71,7 @@ import io.druid.segment.incremental.IncrementalIndex;
|
||||||
import io.druid.segment.incremental.IncrementalIndexSchema;
|
import io.druid.segment.incremental.IncrementalIndexSchema;
|
||||||
import io.druid.segment.incremental.OnheapIncrementalIndex;
|
import io.druid.segment.incremental.OnheapIncrementalIndex;
|
||||||
import io.druid.segment.serde.ComplexMetrics;
|
import io.druid.segment.serde.ComplexMetrics;
|
||||||
|
import org.apache.commons.io.FileUtils;
|
||||||
import org.openjdk.jmh.annotations.Benchmark;
|
import org.openjdk.jmh.annotations.Benchmark;
|
||||||
import org.openjdk.jmh.annotations.BenchmarkMode;
|
import org.openjdk.jmh.annotations.BenchmarkMode;
|
||||||
import org.openjdk.jmh.annotations.Fork;
|
import org.openjdk.jmh.annotations.Fork;
|
||||||
|
@ -82,6 +82,7 @@ import org.openjdk.jmh.annotations.Param;
|
||||||
import org.openjdk.jmh.annotations.Scope;
|
import org.openjdk.jmh.annotations.Scope;
|
||||||
import org.openjdk.jmh.annotations.Setup;
|
import org.openjdk.jmh.annotations.Setup;
|
||||||
import org.openjdk.jmh.annotations.State;
|
import org.openjdk.jmh.annotations.State;
|
||||||
|
import org.openjdk.jmh.annotations.TearDown;
|
||||||
import org.openjdk.jmh.annotations.Warmup;
|
import org.openjdk.jmh.annotations.Warmup;
|
||||||
import org.openjdk.jmh.infra.Blackhole;
|
import org.openjdk.jmh.infra.Blackhole;
|
||||||
|
|
||||||
|
@ -126,6 +127,7 @@ public class TopNBenchmark
|
||||||
private BenchmarkSchemaInfo schemaInfo;
|
private BenchmarkSchemaInfo schemaInfo;
|
||||||
private TopNQueryBuilder queryBuilder;
|
private TopNQueryBuilder queryBuilder;
|
||||||
private TopNQuery query;
|
private TopNQuery query;
|
||||||
|
private File tmpDir;
|
||||||
|
|
||||||
private ExecutorService executorService;
|
private ExecutorService executorService;
|
||||||
|
|
||||||
|
@ -255,15 +257,14 @@ public class TopNBenchmark
|
||||||
incIndexes.add(incIndex);
|
incIndexes.add(incIndex);
|
||||||
}
|
}
|
||||||
|
|
||||||
File tmpFile = Files.createTempDir();
|
tmpDir = Files.createTempDir();
|
||||||
log.info("Using temp dir: " + tmpFile.getAbsolutePath());
|
log.info("Using temp dir: " + tmpDir.getAbsolutePath());
|
||||||
tmpFile.deleteOnExit();
|
|
||||||
|
|
||||||
qIndexes = new ArrayList<>();
|
qIndexes = new ArrayList<>();
|
||||||
for (int i = 0; i < numSegments; i++) {
|
for (int i = 0; i < numSegments; i++) {
|
||||||
File indexFile = INDEX_MERGER_V9.persist(
|
File indexFile = INDEX_MERGER_V9.persist(
|
||||||
incIndexes.get(i),
|
incIndexes.get(i),
|
||||||
tmpFile,
|
tmpDir,
|
||||||
new IndexSpec()
|
new IndexSpec()
|
||||||
);
|
);
|
||||||
|
|
||||||
|
@ -283,6 +284,12 @@ public class TopNBenchmark
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@TearDown
|
||||||
|
public void tearDown() throws IOException
|
||||||
|
{
|
||||||
|
FileUtils.deleteDirectory(tmpDir);
|
||||||
|
}
|
||||||
|
|
||||||
private IncrementalIndex makeIncIndex()
|
private IncrementalIndex makeIncIndex()
|
||||||
{
|
{
|
||||||
return new OnheapIncrementalIndex(
|
return new OnheapIncrementalIndex(
|
||||||
|
|
|
@ -21,7 +21,6 @@ package io.druid.storage.azure;
|
||||||
|
|
||||||
import com.google.common.collect.ImmutableMap;
|
import com.google.common.collect.ImmutableMap;
|
||||||
import com.microsoft.azure.storage.StorageException;
|
import com.microsoft.azure.storage.StorageException;
|
||||||
|
|
||||||
import io.druid.java.util.common.FileUtils;
|
import io.druid.java.util.common.FileUtils;
|
||||||
import io.druid.segment.loading.SegmentLoadingException;
|
import io.druid.segment.loading.SegmentLoadingException;
|
||||||
import io.druid.timeline.DataSegment;
|
import io.druid.timeline.DataSegment;
|
||||||
|
@ -46,7 +45,6 @@ import static org.junit.Assert.assertTrue;
|
||||||
public class AzureDataSegmentPullerTest extends EasyMockSupport
|
public class AzureDataSegmentPullerTest extends EasyMockSupport
|
||||||
{
|
{
|
||||||
|
|
||||||
private AzureStorage azureStorage;
|
|
||||||
private static final String SEGMENT_FILE_NAME = "segment";
|
private static final String SEGMENT_FILE_NAME = "segment";
|
||||||
private static final String containerName = "container";
|
private static final String containerName = "container";
|
||||||
private static final String blobPath = "/path/to/storage/index.zip";
|
private static final String blobPath = "/path/to/storage/index.zip";
|
||||||
|
@ -61,6 +59,7 @@ public class AzureDataSegmentPullerTest extends EasyMockSupport
|
||||||
0,
|
0,
|
||||||
1
|
1
|
||||||
);
|
);
|
||||||
|
private AzureStorage azureStorage;
|
||||||
|
|
||||||
@Before
|
@Before
|
||||||
public void before()
|
public void before()
|
||||||
|
@ -73,9 +72,8 @@ public class AzureDataSegmentPullerTest extends EasyMockSupport
|
||||||
{
|
{
|
||||||
final String value = "bucket";
|
final String value = "bucket";
|
||||||
final File pulledFile = AzureTestUtils.createZipTempFile(SEGMENT_FILE_NAME, value);
|
final File pulledFile = AzureTestUtils.createZipTempFile(SEGMENT_FILE_NAME, value);
|
||||||
pulledFile.deleteOnExit();
|
|
||||||
final File toDir = Files.createTempDirectory("druid").toFile();
|
final File toDir = Files.createTempDirectory("druid").toFile();
|
||||||
toDir.deleteOnExit();
|
try {
|
||||||
final InputStream zipStream = new FileInputStream(pulledFile);
|
final InputStream zipStream = new FileInputStream(pulledFile);
|
||||||
|
|
||||||
expect(azureStorage.getBlobInputStream(containerName, blobPath)).andReturn(zipStream);
|
expect(azureStorage.getBlobInputStream(containerName, blobPath)).andReturn(zipStream);
|
||||||
|
@ -93,6 +91,11 @@ public class AzureDataSegmentPullerTest extends EasyMockSupport
|
||||||
|
|
||||||
verifyAll();
|
verifyAll();
|
||||||
}
|
}
|
||||||
|
finally {
|
||||||
|
pulledFile.delete();
|
||||||
|
org.apache.commons.io.FileUtils.deleteDirectory(toDir);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
@Test(expected = RuntimeException.class)
|
@Test(expected = RuntimeException.class)
|
||||||
public void testDeleteOutputDirectoryWhenErrorIsRaisedPullingSegmentFiles()
|
public void testDeleteOutputDirectoryWhenErrorIsRaisedPullingSegmentFiles()
|
||||||
|
@ -100,8 +103,7 @@ public class AzureDataSegmentPullerTest extends EasyMockSupport
|
||||||
{
|
{
|
||||||
|
|
||||||
final File outDir = Files.createTempDirectory("druid").toFile();
|
final File outDir = Files.createTempDirectory("druid").toFile();
|
||||||
outDir.deleteOnExit();
|
try {
|
||||||
|
|
||||||
expect(azureStorage.getBlobInputStream(containerName, blobPath)).andThrow(
|
expect(azureStorage.getBlobInputStream(containerName, blobPath)).andThrow(
|
||||||
new StorageException(
|
new StorageException(
|
||||||
"error",
|
"error",
|
||||||
|
@ -121,6 +123,10 @@ public class AzureDataSegmentPullerTest extends EasyMockSupport
|
||||||
assertFalse(outDir.exists());
|
assertFalse(outDir.exists());
|
||||||
|
|
||||||
verifyAll();
|
verifyAll();
|
||||||
|
}
|
||||||
|
finally {
|
||||||
|
org.apache.commons.io.FileUtils.deleteDirectory(outDir);
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -128,6 +134,7 @@ public class AzureDataSegmentPullerTest extends EasyMockSupport
|
||||||
public void getSegmentFilesTest() throws SegmentLoadingException
|
public void getSegmentFilesTest() throws SegmentLoadingException
|
||||||
{
|
{
|
||||||
final File outDir = new File("");
|
final File outDir = new File("");
|
||||||
|
try {
|
||||||
final FileUtils.FileCopyResult result = createMock(FileUtils.FileCopyResult.class);
|
final FileUtils.FileCopyResult result = createMock(FileUtils.FileCopyResult.class);
|
||||||
final AzureDataSegmentPuller puller = createMockBuilder(AzureDataSegmentPuller.class).withConstructor(
|
final AzureDataSegmentPuller puller = createMockBuilder(AzureDataSegmentPuller.class).withConstructor(
|
||||||
azureStorage
|
azureStorage
|
||||||
|
@ -140,6 +147,10 @@ public class AzureDataSegmentPullerTest extends EasyMockSupport
|
||||||
puller.getSegmentFiles(dataSegment, outDir);
|
puller.getSegmentFiles(dataSegment, outDir);
|
||||||
|
|
||||||
verifyAll();
|
verifyAll();
|
||||||
|
}
|
||||||
|
finally {
|
||||||
|
outDir.delete();
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -57,7 +57,7 @@ public class GoogleDataSegmentPullerTest extends EasyMockSupport
|
||||||
throws IOException, SegmentLoadingException
|
throws IOException, SegmentLoadingException
|
||||||
{
|
{
|
||||||
final File outDir = Files.createTempDirectory("druid").toFile();
|
final File outDir = Files.createTempDirectory("druid").toFile();
|
||||||
outDir.deleteOnExit();
|
try {
|
||||||
GoogleStorage storage = createMock(GoogleStorage.class);
|
GoogleStorage storage = createMock(GoogleStorage.class);
|
||||||
|
|
||||||
expect(storage.get(bucket, path)).andThrow(new IOException(""));
|
expect(storage.get(bucket, path)).andThrow(new IOException(""));
|
||||||
|
@ -71,11 +71,16 @@ public class GoogleDataSegmentPullerTest extends EasyMockSupport
|
||||||
|
|
||||||
verifyAll();
|
verifyAll();
|
||||||
}
|
}
|
||||||
|
finally {
|
||||||
|
org.apache.commons.io.FileUtils.deleteDirectory(outDir);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void getSegmentFilesTest() throws SegmentLoadingException
|
public void getSegmentFilesTest() throws SegmentLoadingException, IOException
|
||||||
{
|
{
|
||||||
final File outDir = new File("");
|
final File outDir = new File("");
|
||||||
|
try {
|
||||||
final FileUtils.FileCopyResult result = createMock(FileUtils.FileCopyResult.class);
|
final FileUtils.FileCopyResult result = createMock(FileUtils.FileCopyResult.class);
|
||||||
GoogleStorage storage = createMock(GoogleStorage.class);
|
GoogleStorage storage = createMock(GoogleStorage.class);
|
||||||
GoogleDataSegmentPuller puller = createMockBuilder(GoogleDataSegmentPuller.class).withConstructor(
|
GoogleDataSegmentPuller puller = createMockBuilder(GoogleDataSegmentPuller.class).withConstructor(
|
||||||
|
@ -90,6 +95,10 @@ public class GoogleDataSegmentPullerTest extends EasyMockSupport
|
||||||
|
|
||||||
verifyAll();
|
verifyAll();
|
||||||
}
|
}
|
||||||
|
finally {
|
||||||
|
org.apache.commons.io.FileUtils.deleteDirectory(outDir);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void prepareOutDirTest() throws IOException
|
public void prepareOutDirTest() throws IOException
|
||||||
|
@ -104,7 +113,7 @@ public class GoogleDataSegmentPullerTest extends EasyMockSupport
|
||||||
assertTrue(outDir.exists());
|
assertTrue(outDir.exists());
|
||||||
}
|
}
|
||||||
finally {
|
finally {
|
||||||
outDir.delete();
|
org.apache.commons.io.FileUtils.deleteDirectory(outDir);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -27,6 +27,7 @@ import io.druid.jackson.DefaultObjectMapper;
|
||||||
import io.druid.storage.hdfs.HdfsDataSegmentFinder;
|
import io.druid.storage.hdfs.HdfsDataSegmentFinder;
|
||||||
import io.druid.timeline.DataSegment;
|
import io.druid.timeline.DataSegment;
|
||||||
import io.druid.timeline.partition.NumberedShardSpec;
|
import io.druid.timeline.partition.NumberedShardSpec;
|
||||||
|
import org.apache.commons.io.FileUtils;
|
||||||
import org.apache.commons.io.IOUtils;
|
import org.apache.commons.io.IOUtils;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.fs.FSDataInputStream;
|
import org.apache.hadoop.fs.FSDataInputStream;
|
||||||
|
@ -133,7 +134,6 @@ public class HdfsDataSegmentFinderTest
|
||||||
mapper.registerSubtypes(new NamedType(NumberedShardSpec.class, "numbered"));
|
mapper.registerSubtypes(new NamedType(NumberedShardSpec.class, "numbered"));
|
||||||
|
|
||||||
hdfsTmpDir = File.createTempFile("hdfsDataSource", "dir");
|
hdfsTmpDir = File.createTempFile("hdfsDataSource", "dir");
|
||||||
hdfsTmpDir.deleteOnExit();
|
|
||||||
if (!hdfsTmpDir.delete()) {
|
if (!hdfsTmpDir.delete()) {
|
||||||
throw new IOException(String.format("Unable to delete hdfsTmpDir [%s]", hdfsTmpDir.getAbsolutePath()));
|
throw new IOException(String.format("Unable to delete hdfsTmpDir [%s]", hdfsTmpDir.getAbsolutePath()));
|
||||||
}
|
}
|
||||||
|
@ -145,11 +145,12 @@ public class HdfsDataSegmentFinderTest
|
||||||
}
|
}
|
||||||
|
|
||||||
@AfterClass
|
@AfterClass
|
||||||
public static void tearDownStatic()
|
public static void tearDownStatic() throws IOException
|
||||||
{
|
{
|
||||||
if (miniCluster != null) {
|
if (miniCluster != null) {
|
||||||
miniCluster.shutdown(true);
|
miniCluster.shutdown(true);
|
||||||
}
|
}
|
||||||
|
FileUtils.deleteDirectory(hdfsTmpDir);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Before
|
@Before
|
||||||
|
|
|
@ -24,6 +24,7 @@ import com.google.common.io.ByteStreams;
|
||||||
import io.druid.java.util.common.CompressionUtils;
|
import io.druid.java.util.common.CompressionUtils;
|
||||||
import io.druid.java.util.common.StringUtils;
|
import io.druid.java.util.common.StringUtils;
|
||||||
import io.druid.storage.hdfs.HdfsDataSegmentPuller;
|
import io.druid.storage.hdfs.HdfsDataSegmentPuller;
|
||||||
|
import org.apache.commons.io.FileUtils;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.fs.Path;
|
import org.apache.hadoop.fs.Path;
|
||||||
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||||
|
@ -62,7 +63,6 @@ public class HdfsDataSegmentPullerTest
|
||||||
public static void setupStatic() throws IOException, ClassNotFoundException
|
public static void setupStatic() throws IOException, ClassNotFoundException
|
||||||
{
|
{
|
||||||
hdfsTmpDir = File.createTempFile("hdfsHandlerTest", "dir");
|
hdfsTmpDir = File.createTempFile("hdfsHandlerTest", "dir");
|
||||||
hdfsTmpDir.deleteOnExit();
|
|
||||||
if (!hdfsTmpDir.delete()) {
|
if (!hdfsTmpDir.delete()) {
|
||||||
throw new IOException(String.format("Unable to delete hdfsTmpDir [%s]", hdfsTmpDir.getAbsolutePath()));
|
throw new IOException(String.format("Unable to delete hdfsTmpDir [%s]", hdfsTmpDir.getAbsolutePath()));
|
||||||
}
|
}
|
||||||
|
@ -74,7 +74,6 @@ public class HdfsDataSegmentPullerTest
|
||||||
final File tmpFile = File.createTempFile("hdfsHandlerTest", ".data");
|
final File tmpFile = File.createTempFile("hdfsHandlerTest", ".data");
|
||||||
tmpFile.delete();
|
tmpFile.delete();
|
||||||
try {
|
try {
|
||||||
tmpFile.deleteOnExit();
|
|
||||||
Files.copy(new ByteArrayInputStream(pathByteContents), tmpFile.toPath());
|
Files.copy(new ByteArrayInputStream(pathByteContents), tmpFile.toPath());
|
||||||
try (OutputStream stream = miniCluster.getFileSystem().create(filePath)) {
|
try (OutputStream stream = miniCluster.getFileSystem().create(filePath)) {
|
||||||
Files.copy(tmpFile.toPath(), stream);
|
Files.copy(tmpFile.toPath(), stream);
|
||||||
|
@ -91,6 +90,7 @@ public class HdfsDataSegmentPullerTest
|
||||||
if (miniCluster != null) {
|
if (miniCluster != null) {
|
||||||
miniCluster.shutdown(true);
|
miniCluster.shutdown(true);
|
||||||
}
|
}
|
||||||
|
FileUtils.deleteDirectory(hdfsTmpDir);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -112,18 +112,14 @@ public class HdfsDataSegmentPullerTest
|
||||||
public void testZip() throws IOException, SegmentLoadingException
|
public void testZip() throws IOException, SegmentLoadingException
|
||||||
{
|
{
|
||||||
final File tmpDir = com.google.common.io.Files.createTempDir();
|
final File tmpDir = com.google.common.io.Files.createTempDir();
|
||||||
tmpDir.deleteOnExit();
|
|
||||||
final File tmpFile = File.createTempFile("zipContents", ".txt", tmpDir);
|
final File tmpFile = File.createTempFile("zipContents", ".txt", tmpDir);
|
||||||
tmpFile.deleteOnExit();
|
|
||||||
|
|
||||||
final Path zipPath = new Path("/tmp/testZip.zip");
|
final Path zipPath = new Path("/tmp/testZip.zip");
|
||||||
|
|
||||||
final File outTmpDir = com.google.common.io.Files.createTempDir();
|
final File outTmpDir = com.google.common.io.Files.createTempDir();
|
||||||
outTmpDir.deleteOnExit();
|
|
||||||
|
|
||||||
final URI uri = URI.create(uriBase.toString() + zipPath.toString());
|
final URI uri = URI.create(uriBase.toString() + zipPath.toString());
|
||||||
|
|
||||||
tmpFile.deleteOnExit();
|
|
||||||
try (final OutputStream stream = new FileOutputStream(tmpFile)) {
|
try (final OutputStream stream = new FileOutputStream(tmpFile)) {
|
||||||
ByteStreams.copy(new ByteArrayInputStream(pathByteContents), stream);
|
ByteStreams.copy(new ByteArrayInputStream(pathByteContents), stream);
|
||||||
}
|
}
|
||||||
|
@ -164,7 +160,6 @@ public class HdfsDataSegmentPullerTest
|
||||||
final Path zipPath = new Path("/tmp/testZip.gz");
|
final Path zipPath = new Path("/tmp/testZip.gz");
|
||||||
|
|
||||||
final File outTmpDir = com.google.common.io.Files.createTempDir();
|
final File outTmpDir = com.google.common.io.Files.createTempDir();
|
||||||
outTmpDir.deleteOnExit();
|
|
||||||
final File outFile = new File(outTmpDir, "testZip");
|
final File outFile = new File(outTmpDir, "testZip");
|
||||||
outFile.delete();
|
outFile.delete();
|
||||||
|
|
||||||
|
@ -201,7 +196,6 @@ public class HdfsDataSegmentPullerTest
|
||||||
final Path zipPath = new Path(perTestPath, "test.txt");
|
final Path zipPath = new Path(perTestPath, "test.txt");
|
||||||
|
|
||||||
final File outTmpDir = com.google.common.io.Files.createTempDir();
|
final File outTmpDir = com.google.common.io.Files.createTempDir();
|
||||||
outTmpDir.deleteOnExit();
|
|
||||||
final File outFile = new File(outTmpDir, "test.txt");
|
final File outFile = new File(outTmpDir, "test.txt");
|
||||||
outFile.delete();
|
outFile.delete();
|
||||||
|
|
||||||
|
|
|
@ -23,6 +23,7 @@ import com.google.common.io.ByteStreams;
|
||||||
|
|
||||||
import io.druid.java.util.common.StringUtils;
|
import io.druid.java.util.common.StringUtils;
|
||||||
import io.druid.storage.hdfs.HdfsFileTimestampVersionFinder;
|
import io.druid.storage.hdfs.HdfsFileTimestampVersionFinder;
|
||||||
|
import org.apache.commons.io.FileUtils;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.fs.Path;
|
import org.apache.hadoop.fs.Path;
|
||||||
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||||
|
@ -58,7 +59,6 @@ public class HdfsFileTimestampVersionFinderTest
|
||||||
public static void setupStatic() throws IOException, ClassNotFoundException
|
public static void setupStatic() throws IOException, ClassNotFoundException
|
||||||
{
|
{
|
||||||
hdfsTmpDir = File.createTempFile("hdfsHandlerTest", "dir");
|
hdfsTmpDir = File.createTempFile("hdfsHandlerTest", "dir");
|
||||||
hdfsTmpDir.deleteOnExit();
|
|
||||||
if (!hdfsTmpDir.delete()) {
|
if (!hdfsTmpDir.delete()) {
|
||||||
throw new IOException(String.format("Unable to delete hdfsTmpDir [%s]", hdfsTmpDir.getAbsolutePath()));
|
throw new IOException(String.format("Unable to delete hdfsTmpDir [%s]", hdfsTmpDir.getAbsolutePath()));
|
||||||
}
|
}
|
||||||
|
@ -70,7 +70,6 @@ public class HdfsFileTimestampVersionFinderTest
|
||||||
final File tmpFile = File.createTempFile("hdfsHandlerTest", ".data");
|
final File tmpFile = File.createTempFile("hdfsHandlerTest", ".data");
|
||||||
tmpFile.delete();
|
tmpFile.delete();
|
||||||
try {
|
try {
|
||||||
tmpFile.deleteOnExit();
|
|
||||||
Files.copy(new ByteArrayInputStream(pathByteContents), tmpFile.toPath());
|
Files.copy(new ByteArrayInputStream(pathByteContents), tmpFile.toPath());
|
||||||
try (OutputStream stream = miniCluster.getFileSystem().create(filePath)) {
|
try (OutputStream stream = miniCluster.getFileSystem().create(filePath)) {
|
||||||
Files.copy(tmpFile.toPath(), stream);
|
Files.copy(tmpFile.toPath(), stream);
|
||||||
|
@ -87,6 +86,7 @@ public class HdfsFileTimestampVersionFinderTest
|
||||||
if (miniCluster != null) {
|
if (miniCluster != null) {
|
||||||
miniCluster.shutdown(true);
|
miniCluster.shutdown(true);
|
||||||
}
|
}
|
||||||
|
FileUtils.deleteDirectory(hdfsTmpDir);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -55,7 +55,7 @@ class HadoopIOPeon implements IOPeon
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void cleanup() throws IOException
|
public void close() throws IOException
|
||||||
{
|
{
|
||||||
throw new UnsupportedOperationException();
|
throw new UnsupportedOperationException();
|
||||||
}
|
}
|
||||||
|
|
|
@ -33,6 +33,7 @@ import io.druid.query.aggregation.AggregatorFactory;
|
||||||
import io.druid.query.aggregation.DoubleSumAggregatorFactory;
|
import io.druid.query.aggregation.DoubleSumAggregatorFactory;
|
||||||
import io.druid.segment.indexing.DataSchema;
|
import io.druid.segment.indexing.DataSchema;
|
||||||
import io.druid.segment.indexing.granularity.UniformGranularitySpec;
|
import io.druid.segment.indexing.granularity.UniformGranularitySpec;
|
||||||
|
import org.apache.commons.io.FileUtils;
|
||||||
import org.joda.time.Interval;
|
import org.joda.time.Interval;
|
||||||
import org.junit.Assert;
|
import org.junit.Assert;
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
|
@ -95,13 +96,21 @@ public class DetermineHashedPartitionsJobTest
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
public DetermineHashedPartitionsJobTest(String dataFilePath, long targetPartitionSize, String interval, int errorMargin, int expectedNumTimeBuckets, int[] expectedNumOfShards) throws IOException
|
public DetermineHashedPartitionsJobTest(
|
||||||
|
String dataFilePath,
|
||||||
|
long targetPartitionSize,
|
||||||
|
String interval,
|
||||||
|
int errorMargin,
|
||||||
|
int expectedNumTimeBuckets,
|
||||||
|
int[] expectedNumOfShards
|
||||||
|
) throws IOException
|
||||||
{
|
{
|
||||||
this.expectedNumOfShards = expectedNumOfShards;
|
this.expectedNumOfShards = expectedNumOfShards;
|
||||||
this.expectedNumTimeBuckets = expectedNumTimeBuckets;
|
this.expectedNumTimeBuckets = expectedNumTimeBuckets;
|
||||||
this.errorMargin = errorMargin;
|
this.errorMargin = errorMargin;
|
||||||
File tmpDir = Files.createTempDir();
|
File tmpDir = Files.createTempDir();
|
||||||
tmpDir.deleteOnExit();
|
|
||||||
|
try {
|
||||||
|
|
||||||
HadoopIngestionSpec ingestionSpec = new HadoopIngestionSpec(
|
HadoopIngestionSpec ingestionSpec = new HadoopIngestionSpec(
|
||||||
new DataSchema(
|
new DataSchema(
|
||||||
|
@ -111,7 +120,12 @@ public class DetermineHashedPartitionsJobTest
|
||||||
new DelimitedParseSpec(
|
new DelimitedParseSpec(
|
||||||
new TimestampSpec("ts", null, null),
|
new TimestampSpec("ts", null, null),
|
||||||
new DimensionsSpec(
|
new DimensionsSpec(
|
||||||
DimensionsSpec.getDefaultSchemas(ImmutableList.of("market", "quality", "placement", "placementish")),
|
DimensionsSpec.getDefaultSchemas(ImmutableList.of(
|
||||||
|
"market",
|
||||||
|
"quality",
|
||||||
|
"placement",
|
||||||
|
"placementish"
|
||||||
|
)),
|
||||||
null,
|
null,
|
||||||
null
|
null
|
||||||
),
|
),
|
||||||
|
@ -169,6 +183,10 @@ public class DetermineHashedPartitionsJobTest
|
||||||
);
|
);
|
||||||
this.indexerConfig = new HadoopDruidIndexerConfig(ingestionSpec);
|
this.indexerConfig = new HadoopDruidIndexerConfig(ingestionSpec);
|
||||||
}
|
}
|
||||||
|
finally {
|
||||||
|
FileUtils.deleteDirectory(tmpDir);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testDetermineHashedPartitions(){
|
public void testDetermineHashedPartitions(){
|
||||||
|
|
|
@ -69,8 +69,8 @@ public class HadoopIOPeonTest
|
||||||
Assert.assertNotNull(ioPeon.makeInputStream(tmpFolder.newFile(TMP_FILE_NAME).getName()));
|
Assert.assertNotNull(ioPeon.makeInputStream(tmpFolder.newFile(TMP_FILE_NAME).getName()));
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test(expected = UnsupportedOperationException.class) public void testCleanup() throws IOException
|
@Test(expected = UnsupportedOperationException.class) public void testClose() throws IOException
|
||||||
{
|
{
|
||||||
ioPeon.cleanup();
|
ioPeon.close();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -26,6 +26,7 @@ import com.google.common.util.concurrent.MoreExecutors;
|
||||||
import io.druid.common.utils.UUIDUtils;
|
import io.druid.common.utils.UUIDUtils;
|
||||||
import io.druid.java.util.common.StringUtils;
|
import io.druid.java.util.common.StringUtils;
|
||||||
import junit.framework.Assert;
|
import junit.framework.Assert;
|
||||||
|
import org.apache.commons.io.FileUtils;
|
||||||
import org.apache.commons.io.IOUtils;
|
import org.apache.commons.io.IOUtils;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.fs.Path;
|
import org.apache.hadoop.fs.Path;
|
||||||
|
@ -71,7 +72,6 @@ public class HdfsClasspathSetupTest
|
||||||
public static void setupStatic() throws IOException, ClassNotFoundException
|
public static void setupStatic() throws IOException, ClassNotFoundException
|
||||||
{
|
{
|
||||||
hdfsTmpDir = File.createTempFile("hdfsClasspathSetupTest", "dir");
|
hdfsTmpDir = File.createTempFile("hdfsClasspathSetupTest", "dir");
|
||||||
hdfsTmpDir.deleteOnExit();
|
|
||||||
if (!hdfsTmpDir.delete()) {
|
if (!hdfsTmpDir.delete()) {
|
||||||
throw new IOException(String.format("Unable to delete hdfsTmpDir [%s]", hdfsTmpDir.getAbsolutePath()));
|
throw new IOException(String.format("Unable to delete hdfsTmpDir [%s]", hdfsTmpDir.getAbsolutePath()));
|
||||||
}
|
}
|
||||||
|
@ -100,6 +100,7 @@ public class HdfsClasspathSetupTest
|
||||||
if (miniCluster != null) {
|
if (miniCluster != null) {
|
||||||
miniCluster.shutdown(true);
|
miniCluster.shutdown(true);
|
||||||
}
|
}
|
||||||
|
FileUtils.deleteDirectory(hdfsTmpDir);
|
||||||
}
|
}
|
||||||
|
|
||||||
@After
|
@After
|
||||||
|
|
|
@ -53,8 +53,6 @@ import java.util.zip.GZIPOutputStream;
|
||||||
|
|
||||||
public class CompressionUtilsTest
|
public class CompressionUtilsTest
|
||||||
{
|
{
|
||||||
@Rule
|
|
||||||
public final TemporaryFolder temporaryFolder = new TemporaryFolder();
|
|
||||||
private static final String content;
|
private static final String content;
|
||||||
private static final byte[] expected;
|
private static final byte[] expected;
|
||||||
private static final byte[] gzBytes;
|
private static final byte[] gzBytes;
|
||||||
|
@ -85,9 +83,19 @@ public class CompressionUtilsTest
|
||||||
gzBytes = gzByteStream.toByteArray();
|
gzBytes = gzByteStream.toByteArray();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Rule
|
||||||
|
public final TemporaryFolder temporaryFolder = new TemporaryFolder();
|
||||||
private File testDir;
|
private File testDir;
|
||||||
private File testFile;
|
private File testFile;
|
||||||
|
|
||||||
|
public static void assertGoodDataStream(InputStream stream) throws IOException
|
||||||
|
{
|
||||||
|
try (final ByteArrayOutputStream bos = new ByteArrayOutputStream(expected.length)) {
|
||||||
|
ByteStreams.copy(stream, bos);
|
||||||
|
Assert.assertArrayEquals(expected, bos.toByteArray());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
@Before
|
@Before
|
||||||
public void setUp() throws IOException
|
public void setUp() throws IOException
|
||||||
{
|
{
|
||||||
|
@ -99,14 +107,6 @@ public class CompressionUtilsTest
|
||||||
Assert.assertTrue(testFile.getParentFile().equals(testDir));
|
Assert.assertTrue(testFile.getParentFile().equals(testDir));
|
||||||
}
|
}
|
||||||
|
|
||||||
public static void assertGoodDataStream(InputStream stream) throws IOException
|
|
||||||
{
|
|
||||||
try (final ByteArrayOutputStream bos = new ByteArrayOutputStream(expected.length)) {
|
|
||||||
ByteStreams.copy(stream, bos);
|
|
||||||
Assert.assertArrayEquals(expected, bos.toByteArray());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testGoodGzNameResolution()
|
public void testGoodGzNameResolution()
|
||||||
{
|
{
|
||||||
|
@ -131,7 +131,7 @@ public class CompressionUtilsTest
|
||||||
{
|
{
|
||||||
final File tmpDir = temporaryFolder.newFolder("testGoodZipCompressUncompress");
|
final File tmpDir = temporaryFolder.newFolder("testGoodZipCompressUncompress");
|
||||||
final File zipFile = new File(tmpDir, "compressionUtilTest.zip");
|
final File zipFile = new File(tmpDir, "compressionUtilTest.zip");
|
||||||
zipFile.deleteOnExit();
|
try {
|
||||||
CompressionUtils.zip(testDir, zipFile);
|
CompressionUtils.zip(testDir, zipFile);
|
||||||
final File newDir = new File(tmpDir, "newDir");
|
final File newDir = new File(tmpDir, "newDir");
|
||||||
newDir.mkdir();
|
newDir.mkdir();
|
||||||
|
@ -142,6 +142,15 @@ public class CompressionUtilsTest
|
||||||
assertGoodDataStream(inputStream);
|
assertGoodDataStream(inputStream);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
finally {
|
||||||
|
if (zipFile.exists()) {
|
||||||
|
zipFile.delete();
|
||||||
|
}
|
||||||
|
if (tmpDir.exists()) {
|
||||||
|
tmpDir.delete();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
|
@ -252,53 +261,6 @@ public class CompressionUtilsTest
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
private static class ZeroRemainingInputStream extends FilterInputStream
|
|
||||||
{
|
|
||||||
private final AtomicInteger pos = new AtomicInteger(0);
|
|
||||||
|
|
||||||
protected ZeroRemainingInputStream(InputStream in)
|
|
||||||
{
|
|
||||||
super(in);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public synchronized void reset() throws IOException
|
|
||||||
{
|
|
||||||
super.reset();
|
|
||||||
pos.set(0);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public int read(byte b[]) throws IOException
|
|
||||||
{
|
|
||||||
final int len = Math.min(b.length, gzBytes.length - pos.get() % gzBytes.length);
|
|
||||||
pos.addAndGet(len);
|
|
||||||
return read(b, 0, len);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public int read() throws IOException
|
|
||||||
{
|
|
||||||
pos.incrementAndGet();
|
|
||||||
return super.read();
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public int read(byte b[], int off, int len) throws IOException
|
|
||||||
{
|
|
||||||
final int l = Math.min(len, gzBytes.length - pos.get() % gzBytes.length);
|
|
||||||
pos.addAndGet(l);
|
|
||||||
return super.read(b, off, l);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public int available() throws IOException
|
|
||||||
{
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
// Sanity check to make sure the test class works as expected
|
// Sanity check to make sure the test class works as expected
|
||||||
public void testZeroRemainingInputStream() throws IOException
|
public void testZeroRemainingInputStream() throws IOException
|
||||||
|
@ -410,7 +372,6 @@ public class CompressionUtilsTest
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
// http://bugs.java.com/bugdatabase/view_bug.do?bug_id=7036144
|
// http://bugs.java.com/bugdatabase/view_bug.do?bug_id=7036144
|
||||||
public void testGunzipBugStreamWorkarround() throws IOException
|
public void testGunzipBugStreamWorkarround() throws IOException
|
||||||
|
@ -539,7 +500,6 @@ public class CompressionUtilsTest
|
||||||
Assert.assertEquals(4, flushes.get()); // 2 for suppressed closes, 2 for manual calls to shake out errors
|
Assert.assertEquals(4, flushes.get()); // 2 for suppressed closes, 2 for manual calls to shake out errors
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@Test(expected = IOException.class)
|
@Test(expected = IOException.class)
|
||||||
public void testStreamErrorGzip() throws Exception
|
public void testStreamErrorGzip() throws Exception
|
||||||
{
|
{
|
||||||
|
@ -596,4 +556,50 @@ public class CompressionUtilsTest
|
||||||
)
|
)
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private static class ZeroRemainingInputStream extends FilterInputStream
|
||||||
|
{
|
||||||
|
private final AtomicInteger pos = new AtomicInteger(0);
|
||||||
|
|
||||||
|
protected ZeroRemainingInputStream(InputStream in)
|
||||||
|
{
|
||||||
|
super(in);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public synchronized void reset() throws IOException
|
||||||
|
{
|
||||||
|
super.reset();
|
||||||
|
pos.set(0);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public int read(byte b[]) throws IOException
|
||||||
|
{
|
||||||
|
final int len = Math.min(b.length, gzBytes.length - pos.get() % gzBytes.length);
|
||||||
|
pos.addAndGet(len);
|
||||||
|
return read(b, 0, len);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public int read() throws IOException
|
||||||
|
{
|
||||||
|
pos.incrementAndGet();
|
||||||
|
return super.read();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public int read(byte b[], int off, int len) throws IOException
|
||||||
|
{
|
||||||
|
final int l = Math.min(len, gzBytes.length - pos.get() % gzBytes.length);
|
||||||
|
pos.addAndGet(l);
|
||||||
|
return super.read(b, off, l);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public int available() throws IOException
|
||||||
|
{
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -642,7 +642,7 @@ public class IndexMerger
|
||||||
@Override
|
@Override
|
||||||
public void close() throws IOException
|
public void close() throws IOException
|
||||||
{
|
{
|
||||||
ioPeon.cleanup();
|
ioPeon.close();
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
try {
|
try {
|
||||||
|
|
|
@ -135,7 +135,7 @@ public class IndexMergerV9 extends IndexMerger
|
||||||
@Override
|
@Override
|
||||||
public void close() throws IOException
|
public void close() throws IOException
|
||||||
{
|
{
|
||||||
ioPeon.cleanup();
|
ioPeon.close();
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
final FileSmoosher v9Smoosher = new FileSmoosher(outDir);
|
final FileSmoosher v9Smoosher = new FileSmoosher(outDir);
|
||||||
|
|
|
@ -210,7 +210,7 @@ public class StringDimensionMergerLegacy extends StringDimensionMergerV9 impleme
|
||||||
spatialWriter.close();
|
spatialWriter.close();
|
||||||
serializerUtils.writeString(spatialIndexFile, dimensionName);
|
serializerUtils.writeString(spatialIndexFile, dimensionName);
|
||||||
ByteStreams.copy(spatialWriter.combineStreams(), spatialIndexFile);
|
ByteStreams.copy(spatialWriter.combineStreams(), spatialIndexFile);
|
||||||
spatialIoPeon.cleanup();
|
spatialIoPeon.close();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -19,15 +19,15 @@
|
||||||
|
|
||||||
package io.druid.segment.data;
|
package io.druid.segment.data;
|
||||||
|
|
||||||
|
import java.io.Closeable;
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.io.InputStream;
|
import java.io.InputStream;
|
||||||
import java.io.OutputStream;
|
import java.io.OutputStream;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
*/
|
*/
|
||||||
public interface IOPeon
|
public interface IOPeon extends Closeable
|
||||||
{
|
{
|
||||||
public OutputStream makeOutputStream(String filename) throws IOException;
|
public OutputStream makeOutputStream(String filename) throws IOException;
|
||||||
public InputStream makeInputStream(String filename) throws IOException;
|
public InputStream makeInputStream(String filename) throws IOException;
|
||||||
public void cleanup() throws IOException;
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -53,7 +53,6 @@ public class TmpFileIOPeon implements IOPeon
|
||||||
File retFile = createdFiles.get(filename);
|
File retFile = createdFiles.get(filename);
|
||||||
if (retFile == null) {
|
if (retFile == null) {
|
||||||
retFile = File.createTempFile("filePeon", filename);
|
retFile = File.createTempFile("filePeon", filename);
|
||||||
retFile.deleteOnExit();
|
|
||||||
createdFiles.put(filename, retFile);
|
createdFiles.put(filename, retFile);
|
||||||
return new BufferedOutputStream(new FileOutputStream(retFile));
|
return new BufferedOutputStream(new FileOutputStream(retFile));
|
||||||
} else if (allowOverwrite) {
|
} else if (allowOverwrite) {
|
||||||
|
@ -72,7 +71,7 @@ public class TmpFileIOPeon implements IOPeon
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void cleanup() throws IOException
|
public void close() throws IOException
|
||||||
{
|
{
|
||||||
for (File file : createdFiles.values()) {
|
for (File file : createdFiles.values()) {
|
||||||
file.delete();
|
file.delete();
|
||||||
|
|
|
@ -28,6 +28,7 @@ import io.druid.segment.column.Column;
|
||||||
import io.druid.segment.incremental.IncrementalIndex;
|
import io.druid.segment.incremental.IncrementalIndex;
|
||||||
import io.druid.segment.incremental.IncrementalIndexAdapter;
|
import io.druid.segment.incremental.IncrementalIndexAdapter;
|
||||||
import io.druid.segment.incremental.OnheapIncrementalIndex;
|
import io.druid.segment.incremental.OnheapIncrementalIndex;
|
||||||
|
import org.apache.commons.io.FileUtils;
|
||||||
import org.joda.time.Interval;
|
import org.joda.time.Interval;
|
||||||
import org.junit.Assert;
|
import org.junit.Assert;
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
|
@ -46,8 +47,8 @@ public class EmptyIndexTest
|
||||||
if (!tmpDir.mkdir()) {
|
if (!tmpDir.mkdir()) {
|
||||||
throw new IllegalStateException("tmp mkdir failed");
|
throw new IllegalStateException("tmp mkdir failed");
|
||||||
}
|
}
|
||||||
tmpDir.deleteOnExit();
|
|
||||||
|
|
||||||
|
try {
|
||||||
IncrementalIndex emptyIndex = new OnheapIncrementalIndex(
|
IncrementalIndex emptyIndex = new OnheapIncrementalIndex(
|
||||||
0,
|
0,
|
||||||
QueryGranularities.NONE,
|
QueryGranularities.NONE,
|
||||||
|
@ -72,6 +73,14 @@ public class EmptyIndexTest
|
||||||
Assert.assertEquals("getDimensionNames", 0, Iterables.size(emptyQueryableIndex.getAvailableDimensions()));
|
Assert.assertEquals("getDimensionNames", 0, Iterables.size(emptyQueryableIndex.getAvailableDimensions()));
|
||||||
Assert.assertEquals("getMetricNames", 0, Iterables.size(emptyQueryableIndex.getColumnNames()));
|
Assert.assertEquals("getMetricNames", 0, Iterables.size(emptyQueryableIndex.getColumnNames()));
|
||||||
Assert.assertEquals("getDataInterval", new Interval("2012-08-01/P3D"), emptyQueryableIndex.getDataInterval());
|
Assert.assertEquals("getDataInterval", new Interval("2012-08-01/P3D"), emptyQueryableIndex.getDataInterval());
|
||||||
Assert.assertEquals("getReadOnlyTimestamps", 0, emptyQueryableIndex.getColumn(Column.TIME_COLUMN_NAME).getLength());
|
Assert.assertEquals(
|
||||||
|
"getReadOnlyTimestamps",
|
||||||
|
0,
|
||||||
|
emptyQueryableIndex.getColumn(Column.TIME_COLUMN_NAME).getLength()
|
||||||
|
);
|
||||||
|
}
|
||||||
|
finally {
|
||||||
|
FileUtils.deleteDirectory(tmpDir);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -46,6 +46,7 @@ import io.druid.query.timeseries.TimeseriesResultValue;
|
||||||
import io.druid.segment.incremental.IncrementalIndex;
|
import io.druid.segment.incremental.IncrementalIndex;
|
||||||
import io.druid.segment.incremental.IncrementalIndexSchema;
|
import io.druid.segment.incremental.IncrementalIndexSchema;
|
||||||
import io.druid.segment.incremental.OnheapIncrementalIndex;
|
import io.druid.segment.incremental.OnheapIncrementalIndex;
|
||||||
|
import org.apache.commons.io.FileUtils;
|
||||||
import org.joda.time.DateTime;
|
import org.joda.time.DateTime;
|
||||||
import org.joda.time.Interval;
|
import org.joda.time.Interval;
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
|
@ -256,11 +257,15 @@ public class IndexMergerV9WithSpatialIndexTest
|
||||||
File tmpFile = File.createTempFile("billy", "yay");
|
File tmpFile = File.createTempFile("billy", "yay");
|
||||||
tmpFile.delete();
|
tmpFile.delete();
|
||||||
tmpFile.mkdirs();
|
tmpFile.mkdirs();
|
||||||
tmpFile.deleteOnExit();
|
|
||||||
|
|
||||||
|
try {
|
||||||
INDEX_MERGER_V9.persist(theIndex, tmpFile, indexSpec);
|
INDEX_MERGER_V9.persist(theIndex, tmpFile, indexSpec);
|
||||||
return INDEX_IO.loadIndex(tmpFile);
|
return INDEX_IO.loadIndex(tmpFile);
|
||||||
}
|
}
|
||||||
|
finally {
|
||||||
|
FileUtils.deleteDirectory(tmpFile);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
private static QueryableIndex makeMergedQueryableIndex(IndexSpec indexSpec)
|
private static QueryableIndex makeMergedQueryableIndex(IndexSpec indexSpec)
|
||||||
{
|
{
|
||||||
|
@ -470,18 +475,15 @@ public class IndexMergerV9WithSpatialIndexTest
|
||||||
File mergedFile = new File(tmpFile, "merged");
|
File mergedFile = new File(tmpFile, "merged");
|
||||||
|
|
||||||
firstFile.mkdirs();
|
firstFile.mkdirs();
|
||||||
firstFile.deleteOnExit();
|
|
||||||
secondFile.mkdirs();
|
secondFile.mkdirs();
|
||||||
secondFile.deleteOnExit();
|
|
||||||
thirdFile.mkdirs();
|
thirdFile.mkdirs();
|
||||||
thirdFile.deleteOnExit();
|
|
||||||
mergedFile.mkdirs();
|
mergedFile.mkdirs();
|
||||||
mergedFile.deleteOnExit();
|
|
||||||
|
|
||||||
INDEX_MERGER_V9.persist(first, DATA_INTERVAL, firstFile, indexSpec);
|
INDEX_MERGER_V9.persist(first, DATA_INTERVAL, firstFile, indexSpec);
|
||||||
INDEX_MERGER_V9.persist(second, DATA_INTERVAL, secondFile, indexSpec);
|
INDEX_MERGER_V9.persist(second, DATA_INTERVAL, secondFile, indexSpec);
|
||||||
INDEX_MERGER_V9.persist(third, DATA_INTERVAL, thirdFile, indexSpec);
|
INDEX_MERGER_V9.persist(third, DATA_INTERVAL, thirdFile, indexSpec);
|
||||||
|
|
||||||
|
try {
|
||||||
QueryableIndex mergedRealtime = INDEX_IO.loadIndex(
|
QueryableIndex mergedRealtime = INDEX_IO.loadIndex(
|
||||||
INDEX_MERGER_V9.mergeQueryableIndex(
|
INDEX_MERGER_V9.mergeQueryableIndex(
|
||||||
Arrays.asList(
|
Arrays.asList(
|
||||||
|
@ -495,8 +497,16 @@ public class IndexMergerV9WithSpatialIndexTest
|
||||||
indexSpec
|
indexSpec
|
||||||
)
|
)
|
||||||
);
|
);
|
||||||
|
|
||||||
return mergedRealtime;
|
return mergedRealtime;
|
||||||
|
|
||||||
|
}
|
||||||
|
finally {
|
||||||
|
FileUtils.deleteDirectory(firstFile);
|
||||||
|
FileUtils.deleteDirectory(secondFile);
|
||||||
|
FileUtils.deleteDirectory(thirdFile);
|
||||||
|
FileUtils.deleteDirectory(mergedFile);
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
catch (IOException e) {
|
catch (IOException e) {
|
||||||
throw Throwables.propagate(e);
|
throw Throwables.propagate(e);
|
||||||
|
|
|
@ -92,7 +92,7 @@ public class CompressedIntsIndexedWriterTest
|
||||||
@After
|
@After
|
||||||
public void tearDown() throws Exception
|
public void tearDown() throws Exception
|
||||||
{
|
{
|
||||||
ioPeon.cleanup();
|
ioPeon.close();
|
||||||
}
|
}
|
||||||
|
|
||||||
private void generateVals(final int totalSize, final int maxValue) throws IOException
|
private void generateVals(final int totalSize, final int maxValue) throws IOException
|
||||||
|
|
|
@ -182,7 +182,7 @@ public class CompressedVSizeIndexedV3WriterTest
|
||||||
@After
|
@After
|
||||||
public void tearDown() throws Exception
|
public void tearDown() throws Exception
|
||||||
{
|
{
|
||||||
ioPeon.cleanup();
|
ioPeon.close();
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
|
|
|
@ -91,7 +91,7 @@ public class CompressedVSizeIntsIndexedWriterTest
|
||||||
@After
|
@After
|
||||||
public void tearDown() throws Exception
|
public void tearDown() throws Exception
|
||||||
{
|
{
|
||||||
ioPeon.cleanup();
|
ioPeon.close();
|
||||||
}
|
}
|
||||||
|
|
||||||
private void generateVals(final int totalSize, final int maxValue) throws IOException
|
private void generateVals(final int totalSize, final int maxValue) throws IOException
|
||||||
|
|
|
@ -61,7 +61,7 @@ class IOPeonForTesting implements IOPeon
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void cleanup() throws IOException
|
public void close() throws IOException
|
||||||
{
|
{
|
||||||
outStreams.clear();
|
outStreams.clear();
|
||||||
}
|
}
|
||||||
|
|
|
@ -50,7 +50,7 @@ public class VSizeIndexedIntsWriterTest
|
||||||
@After
|
@After
|
||||||
public void tearDown() throws Exception
|
public void tearDown() throws Exception
|
||||||
{
|
{
|
||||||
ioPeon.cleanup();
|
ioPeon.close();
|
||||||
}
|
}
|
||||||
|
|
||||||
private void generateVals(final int totalSize, final int maxValue) throws IOException
|
private void generateVals(final int totalSize, final int maxValue) throws IOException
|
||||||
|
|
|
@ -20,9 +20,9 @@
|
||||||
package io.druid.segment.loading;
|
package io.druid.segment.loading;
|
||||||
|
|
||||||
import com.google.common.io.Files;
|
import com.google.common.io.Files;
|
||||||
|
|
||||||
import io.druid.java.util.common.CompressionUtils;
|
import io.druid.java.util.common.CompressionUtils;
|
||||||
|
import org.apache.commons.io.FileUtils;
|
||||||
|
import org.junit.After;
|
||||||
import org.junit.Assert;
|
import org.junit.Assert;
|
||||||
import org.junit.Before;
|
import org.junit.Before;
|
||||||
import org.junit.Rule;
|
import org.junit.Rule;
|
||||||
|
@ -49,10 +49,15 @@ public class LocalDataSegmentPullerTest
|
||||||
public void setup() throws IOException
|
public void setup() throws IOException
|
||||||
{
|
{
|
||||||
tmpDir = temporaryFolder.newFolder();
|
tmpDir = temporaryFolder.newFolder();
|
||||||
tmpDir.deleteOnExit();
|
|
||||||
puller = new LocalDataSegmentPuller();
|
puller = new LocalDataSegmentPuller();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@After
|
||||||
|
public void after() throws IOException
|
||||||
|
{
|
||||||
|
FileUtils.deleteDirectory(tmpDir);
|
||||||
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void simpleZipTest() throws IOException, SegmentLoadingException
|
public void simpleZipTest() throws IOException, SegmentLoadingException
|
||||||
{
|
{
|
||||||
|
|
|
@ -96,6 +96,7 @@ public class RealtimePlumberSchoolTest
|
||||||
private DataSchema schema;
|
private DataSchema schema;
|
||||||
private DataSchema schema2;
|
private DataSchema schema2;
|
||||||
private FireDepartmentMetrics metrics;
|
private FireDepartmentMetrics metrics;
|
||||||
|
private File tmpDir;
|
||||||
|
|
||||||
public RealtimePlumberSchoolTest(RejectionPolicyFactory rejectionPolicy, boolean buildV9Directly)
|
public RealtimePlumberSchoolTest(RejectionPolicyFactory rejectionPolicy, boolean buildV9Directly)
|
||||||
{
|
{
|
||||||
|
@ -124,8 +125,7 @@ public class RealtimePlumberSchoolTest
|
||||||
@Before
|
@Before
|
||||||
public void setUp() throws Exception
|
public void setUp() throws Exception
|
||||||
{
|
{
|
||||||
final File tmpDir = Files.createTempDir();
|
tmpDir = Files.createTempDir();
|
||||||
tmpDir.deleteOnExit();
|
|
||||||
|
|
||||||
ObjectMapper jsonMapper = new DefaultObjectMapper();
|
ObjectMapper jsonMapper = new DefaultObjectMapper();
|
||||||
|
|
||||||
|
@ -237,6 +237,7 @@ public class RealtimePlumberSchoolTest
|
||||||
schema.getDataSource()
|
schema.getDataSource()
|
||||||
)
|
)
|
||||||
);
|
);
|
||||||
|
FileUtils.deleteDirectory(tmpDir);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test(timeout = 60000)
|
@Test(timeout = 60000)
|
||||||
|
|
Loading…
Reference in New Issue