File.deleteOnExit() (#3923)

* Less use of File.deleteOnExit()
 * removed deleteOnExit from most of the tests/benchmarks/iopeon
 * Made IOpeon closable

* Formatting.

* Revert DeterminePartitionsJobTest, remove cleanup method from IOPeon
This commit is contained in:
Akash Dwivedi 2017-02-13 15:12:14 -08:00 committed by Gian Merlino
parent 9dfcf0763a
commit 8854ce018e
34 changed files with 476 additions and 338 deletions

View File

@ -26,7 +26,6 @@ import com.google.common.base.Strings;
import com.google.common.collect.Lists;
import com.google.common.hash.Hashing;
import com.google.common.io.Files;
import io.druid.benchmark.datagen.BenchmarkDataGenerator;
import io.druid.benchmark.datagen.BenchmarkSchemaInfo;
import io.druid.benchmark.datagen.BenchmarkSchemas;
@ -76,6 +75,7 @@ import io.druid.segment.incremental.IncrementalIndex;
import io.druid.segment.incremental.IncrementalIndexSchema;
import io.druid.segment.incremental.OnheapIncrementalIndex;
import io.druid.segment.serde.ComplexMetrics;
import org.apache.commons.io.FileUtils;
import org.joda.time.Interval;
import org.openjdk.jmh.annotations.Benchmark;
import org.openjdk.jmh.annotations.BenchmarkMode;
@ -87,6 +87,7 @@ import org.openjdk.jmh.annotations.Param;
import org.openjdk.jmh.annotations.Scope;
import org.openjdk.jmh.annotations.Setup;
import org.openjdk.jmh.annotations.State;
import org.openjdk.jmh.annotations.TearDown;
import org.openjdk.jmh.annotations.Warmup;
import org.openjdk.jmh.infra.Blackhole;
@ -118,6 +119,7 @@ public class FilterPartitionBenchmark
private IncrementalIndex incIndex;
private QueryableIndex qIndex;
private File indexFile;
private File tmpDir;
private Filter timeFilterNone;
private Filter timeFilterHalf;
@ -172,13 +174,12 @@ public class FilterPartitionBenchmark
incIndex.add(row);
}
File tmpFile = Files.createTempDir();
log.info("Using temp dir: " + tmpFile.getAbsolutePath());
tmpFile.deleteOnExit();
tmpDir = Files.createTempDir();
log.info("Using temp dir: " + tmpDir.getAbsolutePath());
indexFile = INDEX_MERGER_V9.persist(
incIndex,
tmpFile,
tmpDir,
new IndexSpec()
);
qIndex = INDEX_IO.loadIndex(indexFile);
@ -219,6 +220,12 @@ public class FilterPartitionBenchmark
));
}
@TearDown
public void tearDown() throws IOException
{
FileUtils.deleteDirectory(tmpDir);
}
private IncrementalIndex makeIncIndex()
{
return new OnheapIncrementalIndex(

View File

@ -24,7 +24,6 @@ import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.google.common.hash.Hashing;
import com.google.common.io.Files;
import io.druid.benchmark.datagen.BenchmarkDataGenerator;
import io.druid.benchmark.datagen.BenchmarkSchemaInfo;
import io.druid.benchmark.datagen.BenchmarkSchemas;
@ -77,6 +76,7 @@ import io.druid.segment.incremental.IncrementalIndex;
import io.druid.segment.incremental.IncrementalIndexSchema;
import io.druid.segment.incremental.OnheapIncrementalIndex;
import io.druid.segment.serde.ComplexMetrics;
import org.apache.commons.io.FileUtils;
import org.openjdk.jmh.annotations.Benchmark;
import org.openjdk.jmh.annotations.BenchmarkMode;
import org.openjdk.jmh.annotations.Fork;
@ -87,6 +87,7 @@ import org.openjdk.jmh.annotations.Param;
import org.openjdk.jmh.annotations.Scope;
import org.openjdk.jmh.annotations.Setup;
import org.openjdk.jmh.annotations.State;
import org.openjdk.jmh.annotations.TearDown;
import org.openjdk.jmh.annotations.Warmup;
import org.openjdk.jmh.infra.Blackhole;
@ -124,6 +125,7 @@ public class FilteredAggregatorBenchmark
private QueryRunnerFactory factory;
private BenchmarkSchemaInfo schemaInfo;
private TimeseriesQuery query;
private File tmpDir;
private static String JS_FN = "function(str) { return 'super-' + str; }";
private static ExtractionFn JS_EXTRACTION_FN = new JavaScriptExtractionFn(JS_FN, false, JavaScriptConfig.getEnabledInstance());
@ -187,13 +189,12 @@ public class FilteredAggregatorBenchmark
inputRows.add(row);
}
File tmpFile = Files.createTempDir();
log.info("Using temp dir: " + tmpFile.getAbsolutePath());
tmpFile.deleteOnExit();
tmpDir = Files.createTempDir();
log.info("Using temp dir: " + tmpDir.getAbsolutePath());
indexFile = INDEX_MERGER_V9.persist(
incIndex,
tmpFile,
tmpDir,
new IndexSpec()
);
qIndex = INDEX_IO.loadIndex(indexFile);
@ -220,6 +221,12 @@ public class FilteredAggregatorBenchmark
.build();
}
@TearDown
public void tearDown() throws IOException
{
FileUtils.deleteDirectory(tmpDir);
}
private IncrementalIndex makeIncIndex(AggregatorFactory[] metrics)
{
return new OnheapIncrementalIndex(

View File

@ -184,7 +184,7 @@ public class FloatCompressionBenchmarkFileGenerator
output.write(ByteBuffer.wrap(baos.toByteArray()));
}
finally {
iopeon.cleanup();
iopeon.close();
br.close();
}
System.out.print(compFile.length() / 1024 + "\n");

View File

@ -177,7 +177,7 @@ public class LongCompressionBenchmarkFileGenerator
output.write(ByteBuffer.wrap(baos.toByteArray()));
}
finally {
iopeon.cleanup();
iopeon.close();
br.close();
}
System.out.print(compFile.length() / 1024 + "\n");

View File

@ -22,7 +22,6 @@ package io.druid.benchmark.indexing;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.google.common.hash.Hashing;
import com.google.common.io.Files;
import io.druid.benchmark.datagen.BenchmarkDataGenerator;
import io.druid.benchmark.datagen.BenchmarkSchemaInfo;
import io.druid.benchmark.datagen.BenchmarkSchemas;
@ -42,6 +41,7 @@ import io.druid.segment.incremental.IncrementalIndex;
import io.druid.segment.incremental.IncrementalIndexSchema;
import io.druid.segment.incremental.OnheapIncrementalIndex;
import io.druid.segment.serde.ComplexMetrics;
import org.apache.commons.io.FileUtils;
import org.openjdk.jmh.annotations.Benchmark;
import org.openjdk.jmh.annotations.BenchmarkMode;
import org.openjdk.jmh.annotations.Fork;
@ -52,6 +52,7 @@ import org.openjdk.jmh.annotations.Param;
import org.openjdk.jmh.annotations.Scope;
import org.openjdk.jmh.annotations.Setup;
import org.openjdk.jmh.annotations.State;
import org.openjdk.jmh.annotations.TearDown;
import org.openjdk.jmh.annotations.Warmup;
import org.openjdk.jmh.infra.Blackhole;
@ -88,6 +89,7 @@ public class IndexMergeBenchmark
private List<QueryableIndex> indexesToMerge;
private BenchmarkSchemaInfo schemaInfo;
private File tmpDir;
static {
JSON_MAPPER = new DefaultObjectMapper();
@ -137,13 +139,12 @@ public class IndexMergeBenchmark
incIndex.add(row);
}
File tmpFile = Files.createTempDir();
log.info("Using temp dir: " + tmpFile.getAbsolutePath());
tmpFile.deleteOnExit();
tmpDir = Files.createTempDir();
log.info("Using temp dir: " + tmpDir.getAbsolutePath());
File indexFile = INDEX_MERGER_V9.persist(
incIndex,
tmpFile,
tmpDir,
new IndexSpec()
);
@ -152,6 +153,12 @@ public class IndexMergeBenchmark
}
}
@TearDown
public void tearDown() throws IOException
{
FileUtils.deleteDirectory(tmpDir);
}
private IncrementalIndex makeIncIndex()
{
return new OnheapIncrementalIndex(
@ -176,14 +183,23 @@ public class IndexMergeBenchmark
File tmpFile = File.createTempFile("IndexMergeBenchmark-MERGEDFILE-" + System.currentTimeMillis(), ".TEMPFILE");
tmpFile.delete();
tmpFile.mkdirs();
log.info(tmpFile.getAbsolutePath() + " isFile: " + tmpFile.isFile() + " isDir:" + tmpFile.isDirectory());
tmpFile.deleteOnExit();
try {
log.info(tmpFile.getAbsolutePath() + " isFile: " + tmpFile.isFile() + " isDir:" + tmpFile.isDirectory());
File mergedFile = INDEX_MERGER.mergeQueryableIndex(indexesToMerge, rollup, schemaInfo.getAggsArray(), tmpFile, new IndexSpec());
File mergedFile = INDEX_MERGER.mergeQueryableIndex(
indexesToMerge,
rollup,
schemaInfo.getAggsArray(),
tmpFile,
new IndexSpec()
);
blackhole.consume(mergedFile);
blackhole.consume(mergedFile);
}
finally {
tmpFile.delete();
}
tmpFile.delete();
}
@Benchmark
@ -194,13 +210,23 @@ public class IndexMergeBenchmark
File tmpFile = File.createTempFile("IndexMergeBenchmark-MERGEDFILE-V9-" + System.currentTimeMillis(), ".TEMPFILE");
tmpFile.delete();
tmpFile.mkdirs();
log.info(tmpFile.getAbsolutePath() + " isFile: " + tmpFile.isFile() + " isDir:" + tmpFile.isDirectory());
tmpFile.deleteOnExit();
try {
log.info(tmpFile.getAbsolutePath() + " isFile: " + tmpFile.isFile() + " isDir:" + tmpFile.isDirectory());
File mergedFile = INDEX_MERGER_V9.mergeQueryableIndex(indexesToMerge, rollup, schemaInfo.getAggsArray(), tmpFile, new IndexSpec());
File mergedFile = INDEX_MERGER_V9.mergeQueryableIndex(
indexesToMerge,
rollup,
schemaInfo.getAggsArray(),
tmpFile,
new IndexSpec()
);
blackhole.consume(mergedFile);
blackhole.consume(mergedFile);
}
finally {
tmpFile.delete();
}
tmpFile.delete();
}
}

View File

@ -22,7 +22,6 @@ package io.druid.benchmark.indexing;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.google.common.hash.Hashing;
import com.google.common.io.Files;
import io.druid.benchmark.datagen.BenchmarkDataGenerator;
import io.druid.benchmark.datagen.BenchmarkSchemaInfo;
import io.druid.benchmark.datagen.BenchmarkSchemas;
@ -41,6 +40,7 @@ import io.druid.segment.incremental.IncrementalIndex;
import io.druid.segment.incremental.IncrementalIndexSchema;
import io.druid.segment.incremental.OnheapIncrementalIndex;
import io.druid.segment.serde.ComplexMetrics;
import org.apache.commons.io.FileUtils;
import org.openjdk.jmh.annotations.Benchmark;
import org.openjdk.jmh.annotations.BenchmarkMode;
import org.openjdk.jmh.annotations.Fork;
@ -174,19 +174,21 @@ public class IndexPersistBenchmark
@OutputTimeUnit(TimeUnit.MICROSECONDS)
public void persist(Blackhole blackhole) throws Exception
{
File tmpFile = Files.createTempDir();
log.info("Using temp dir: " + tmpFile.getAbsolutePath());
tmpFile.deleteOnExit();
File tmpDir = Files.createTempDir();
log.info("Using temp dir: " + tmpDir.getAbsolutePath());
try {
File indexFile = INDEX_MERGER.persist(
incIndex,
tmpDir,
new IndexSpec()
);
File indexFile = INDEX_MERGER.persist(
incIndex,
tmpFile,
new IndexSpec()
);
blackhole.consume(indexFile);
}
finally {
FileUtils.deleteDirectory(tmpDir);
}
blackhole.consume(indexFile);
tmpFile.delete();
}
@Benchmark
@ -194,18 +196,20 @@ public class IndexPersistBenchmark
@OutputTimeUnit(TimeUnit.MICROSECONDS)
public void persistV9(Blackhole blackhole) throws Exception
{
File tmpFile = Files.createTempDir();
log.info("Using temp dir: " + tmpFile.getAbsolutePath());
tmpFile.deleteOnExit();;
File tmpDir = Files.createTempDir();
log.info("Using temp dir: " + tmpDir.getAbsolutePath());
try {
File indexFile = INDEX_MERGER_V9.persist(
incIndex,
tmpDir,
new IndexSpec()
);
File indexFile = INDEX_MERGER_V9.persist(
incIndex,
tmpFile,
new IndexSpec()
);
blackhole.consume(indexFile);
blackhole.consume(indexFile);
tmpFile.delete();
}
finally {
FileUtils.deleteDirectory(tmpDir);
}
}
}

View File

@ -79,6 +79,7 @@ import io.druid.segment.incremental.IncrementalIndex;
import io.druid.segment.incremental.IncrementalIndexSchema;
import io.druid.segment.incremental.OnheapIncrementalIndex;
import io.druid.segment.serde.ComplexMetrics;
import org.apache.commons.io.FileUtils;
import org.openjdk.jmh.annotations.Benchmark;
import org.openjdk.jmh.annotations.BenchmarkMode;
import org.openjdk.jmh.annotations.Fork;
@ -89,6 +90,7 @@ import org.openjdk.jmh.annotations.Param;
import org.openjdk.jmh.annotations.Scope;
import org.openjdk.jmh.annotations.Setup;
import org.openjdk.jmh.annotations.State;
import org.openjdk.jmh.annotations.TearDown;
import org.openjdk.jmh.annotations.Warmup;
import org.openjdk.jmh.infra.Blackhole;
@ -132,6 +134,7 @@ public class SearchBenchmark
private BenchmarkSchemaInfo schemaInfo;
private Druids.SearchQueryBuilder queryBuilder;
private SearchQuery query;
private File tmpDir;
private ExecutorService executorService;
@ -351,15 +354,14 @@ public class SearchBenchmark
incIndexes.add(incIndex);
}
File tmpFile = Files.createTempDir();
log.info("Using temp dir: " + tmpFile.getAbsolutePath());
tmpFile.deleteOnExit();
tmpDir = Files.createTempDir();
log.info("Using temp dir: " + tmpDir.getAbsolutePath());
qIndexes = new ArrayList<>();
for (int i = 0; i < numSegments; i++) {
File indexFile = INDEX_MERGER_V9.persist(
incIndexes.get(i),
tmpFile,
tmpDir,
new IndexSpec()
);
@ -378,6 +380,12 @@ public class SearchBenchmark
);
}
@TearDown
public void tearDown() throws IOException
{
FileUtils.deleteDirectory(tmpDir);
}
private IncrementalIndex makeIncIndex()
{
return new OnheapIncrementalIndex(

View File

@ -67,6 +67,7 @@ import io.druid.segment.incremental.IncrementalIndex;
import io.druid.segment.incremental.IncrementalIndexSchema;
import io.druid.segment.incremental.OnheapIncrementalIndex;
import io.druid.segment.serde.ComplexMetrics;
import org.apache.commons.io.FileUtils;
import org.openjdk.jmh.annotations.Benchmark;
import org.openjdk.jmh.annotations.BenchmarkMode;
import org.openjdk.jmh.annotations.Fork;
@ -77,6 +78,7 @@ import org.openjdk.jmh.annotations.Param;
import org.openjdk.jmh.annotations.Scope;
import org.openjdk.jmh.annotations.Setup;
import org.openjdk.jmh.annotations.State;
import org.openjdk.jmh.annotations.TearDown;
import org.openjdk.jmh.annotations.Warmup;
import org.openjdk.jmh.infra.Blackhole;
@ -123,6 +125,7 @@ public class SelectBenchmark
private BenchmarkSchemaInfo schemaInfo;
private Druids.SelectQueryBuilder queryBuilder;
private SelectQuery query;
private File tmpDir;
private ExecutorService executorService;
@ -211,15 +214,14 @@ public class SelectBenchmark
incIndexes.add(incIndex);
}
File tmpFile = Files.createTempDir();
log.info("Using temp dir: " + tmpFile.getAbsolutePath());
tmpFile.deleteOnExit();
tmpDir = Files.createTempDir();
log.info("Using temp dir: " + tmpDir.getAbsolutePath());
qIndexes = new ArrayList<>();
for (int i = 0; i < numSegments; i++) {
File indexFile = INDEX_MERGER_V9.persist(
incIndexes.get(i),
tmpFile,
tmpDir,
new IndexSpec()
);
QueryableIndex qIndex = INDEX_IO.loadIndex(indexFile);
@ -236,6 +238,12 @@ public class SelectBenchmark
);
}
@TearDown
public void tearDown() throws IOException
{
FileUtils.deleteDirectory(tmpDir);
}
private IncrementalIndex makeIncIndex()
{
return new OnheapIncrementalIndex(

View File

@ -24,7 +24,6 @@ import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.google.common.hash.Hashing;
import com.google.common.io.Files;
import io.druid.benchmark.datagen.BenchmarkDataGenerator;
import io.druid.benchmark.datagen.BenchmarkSchemaInfo;
import io.druid.benchmark.datagen.BenchmarkSchemas;
@ -74,6 +73,7 @@ import io.druid.segment.incremental.IncrementalIndex;
import io.druid.segment.incremental.IncrementalIndexSchema;
import io.druid.segment.incremental.OnheapIncrementalIndex;
import io.druid.segment.serde.ComplexMetrics;
import org.apache.commons.io.FileUtils;
import org.joda.time.Interval;
import org.openjdk.jmh.annotations.Benchmark;
import org.openjdk.jmh.annotations.BenchmarkMode;
@ -85,6 +85,7 @@ import org.openjdk.jmh.annotations.Param;
import org.openjdk.jmh.annotations.Scope;
import org.openjdk.jmh.annotations.Setup;
import org.openjdk.jmh.annotations.State;
import org.openjdk.jmh.annotations.TearDown;
import org.openjdk.jmh.annotations.Warmup;
import org.openjdk.jmh.infra.Blackhole;
@ -121,6 +122,7 @@ public class TimeseriesBenchmark
private List<IncrementalIndex> incIndexes;
private List<QueryableIndex> qIndexes;
private File tmpDir;
private QueryRunnerFactory factory;
private BenchmarkSchemaInfo schemaInfo;
@ -278,15 +280,14 @@ public class TimeseriesBenchmark
incIndexes.add(incIndex);
}
File tmpFile = Files.createTempDir();
log.info("Using temp dir: " + tmpFile.getAbsolutePath());
tmpFile.deleteOnExit();
tmpDir = Files.createTempDir();
log.info("Using temp dir: " + tmpDir.getAbsolutePath());
qIndexes = new ArrayList<>();
for (int i = 0; i < numSegments; i++) {
File indexFile = INDEX_MERGER_V9.persist(
incIndexes.get(i),
tmpFile,
tmpDir,
new IndexSpec()
);
@ -303,6 +304,12 @@ public class TimeseriesBenchmark
);
}
@TearDown
public void tearDown() throws IOException
{
FileUtils.deleteDirectory(tmpDir);
}
private IncrementalIndex makeIncIndex()
{
return new OnheapIncrementalIndex(

View File

@ -24,7 +24,6 @@ import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.google.common.hash.Hashing;
import com.google.common.io.Files;
import io.druid.benchmark.datagen.BenchmarkDataGenerator;
import io.druid.benchmark.datagen.BenchmarkSchemaInfo;
import io.druid.benchmark.datagen.BenchmarkSchemas;
@ -72,6 +71,7 @@ import io.druid.segment.incremental.IncrementalIndex;
import io.druid.segment.incremental.IncrementalIndexSchema;
import io.druid.segment.incremental.OnheapIncrementalIndex;
import io.druid.segment.serde.ComplexMetrics;
import org.apache.commons.io.FileUtils;
import org.openjdk.jmh.annotations.Benchmark;
import org.openjdk.jmh.annotations.BenchmarkMode;
import org.openjdk.jmh.annotations.Fork;
@ -82,6 +82,7 @@ import org.openjdk.jmh.annotations.Param;
import org.openjdk.jmh.annotations.Scope;
import org.openjdk.jmh.annotations.Setup;
import org.openjdk.jmh.annotations.State;
import org.openjdk.jmh.annotations.TearDown;
import org.openjdk.jmh.annotations.Warmup;
import org.openjdk.jmh.infra.Blackhole;
@ -126,6 +127,7 @@ public class TopNBenchmark
private BenchmarkSchemaInfo schemaInfo;
private TopNQueryBuilder queryBuilder;
private TopNQuery query;
private File tmpDir;
private ExecutorService executorService;
@ -255,15 +257,14 @@ public class TopNBenchmark
incIndexes.add(incIndex);
}
File tmpFile = Files.createTempDir();
log.info("Using temp dir: " + tmpFile.getAbsolutePath());
tmpFile.deleteOnExit();
tmpDir = Files.createTempDir();
log.info("Using temp dir: " + tmpDir.getAbsolutePath());
qIndexes = new ArrayList<>();
for (int i = 0; i < numSegments; i++) {
File indexFile = INDEX_MERGER_V9.persist(
incIndexes.get(i),
tmpFile,
tmpDir,
new IndexSpec()
);
@ -283,6 +284,12 @@ public class TopNBenchmark
);
}
@TearDown
public void tearDown() throws IOException
{
FileUtils.deleteDirectory(tmpDir);
}
private IncrementalIndex makeIncIndex()
{
return new OnheapIncrementalIndex(

View File

@ -21,7 +21,6 @@ package io.druid.storage.azure;
import com.google.common.collect.ImmutableMap;
import com.microsoft.azure.storage.StorageException;
import io.druid.java.util.common.FileUtils;
import io.druid.segment.loading.SegmentLoadingException;
import io.druid.timeline.DataSegment;
@ -46,7 +45,6 @@ import static org.junit.Assert.assertTrue;
public class AzureDataSegmentPullerTest extends EasyMockSupport
{
private AzureStorage azureStorage;
private static final String SEGMENT_FILE_NAME = "segment";
private static final String containerName = "container";
private static final String blobPath = "/path/to/storage/index.zip";
@ -61,6 +59,7 @@ public class AzureDataSegmentPullerTest extends EasyMockSupport
0,
1
);
private AzureStorage azureStorage;
@Before
public void before()
@ -73,25 +72,29 @@ public class AzureDataSegmentPullerTest extends EasyMockSupport
{
final String value = "bucket";
final File pulledFile = AzureTestUtils.createZipTempFile(SEGMENT_FILE_NAME, value);
pulledFile.deleteOnExit();
final File toDir = Files.createTempDirectory("druid").toFile();
toDir.deleteOnExit();
final InputStream zipStream = new FileInputStream(pulledFile);
try {
final InputStream zipStream = new FileInputStream(pulledFile);
expect(azureStorage.getBlobInputStream(containerName, blobPath)).andReturn(zipStream);
expect(azureStorage.getBlobInputStream(containerName, blobPath)).andReturn(zipStream);
replayAll();
replayAll();
AzureDataSegmentPuller puller = new AzureDataSegmentPuller(azureStorage);
AzureDataSegmentPuller puller = new AzureDataSegmentPuller(azureStorage);
FileUtils.FileCopyResult result = puller.getSegmentFiles(containerName, blobPath, toDir);
FileUtils.FileCopyResult result = puller.getSegmentFiles(containerName, blobPath, toDir);
File expected = new File(toDir, SEGMENT_FILE_NAME);
assertEquals(value.length(), result.size());
assertTrue(expected.exists());
assertEquals(value.length(), expected.length());
File expected = new File(toDir, SEGMENT_FILE_NAME);
assertEquals(value.length(), result.size());
assertTrue(expected.exists());
assertEquals(value.length(), expected.length());
verifyAll();
verifyAll();
}
finally {
pulledFile.delete();
org.apache.commons.io.FileUtils.deleteDirectory(toDir);
}
}
@Test(expected = RuntimeException.class)
@ -100,27 +103,30 @@ public class AzureDataSegmentPullerTest extends EasyMockSupport
{
final File outDir = Files.createTempDirectory("druid").toFile();
outDir.deleteOnExit();
try {
expect(azureStorage.getBlobInputStream(containerName, blobPath)).andThrow(
new StorageException(
"error",
"error",
404,
null,
null
)
);
expect(azureStorage.getBlobInputStream(containerName, blobPath)).andThrow(
new StorageException(
"error",
"error",
404,
null,
null
)
);
replayAll();
replayAll();
AzureDataSegmentPuller puller = new AzureDataSegmentPuller(azureStorage);
AzureDataSegmentPuller puller = new AzureDataSegmentPuller(azureStorage);
puller.getSegmentFiles(containerName, blobPath, outDir);
puller.getSegmentFiles(containerName, blobPath, outDir);
assertFalse(outDir.exists());
assertFalse(outDir.exists());
verifyAll();
verifyAll();
}
finally {
org.apache.commons.io.FileUtils.deleteDirectory(outDir);
}
}
@ -128,18 +134,23 @@ public class AzureDataSegmentPullerTest extends EasyMockSupport
public void getSegmentFilesTest() throws SegmentLoadingException
{
final File outDir = new File("");
final FileUtils.FileCopyResult result = createMock(FileUtils.FileCopyResult.class);
final AzureDataSegmentPuller puller = createMockBuilder(AzureDataSegmentPuller.class).withConstructor(
azureStorage
).addMockedMethod("getSegmentFiles", String.class, String.class, File.class).createMock();
try {
final FileUtils.FileCopyResult result = createMock(FileUtils.FileCopyResult.class);
final AzureDataSegmentPuller puller = createMockBuilder(AzureDataSegmentPuller.class).withConstructor(
azureStorage
).addMockedMethod("getSegmentFiles", String.class, String.class, File.class).createMock();
expect(puller.getSegmentFiles(containerName, blobPath, outDir)).andReturn(result);
expect(puller.getSegmentFiles(containerName, blobPath, outDir)).andReturn(result);
replayAll();
replayAll();
puller.getSegmentFiles(dataSegment, outDir);
puller.getSegmentFiles(dataSegment, outDir);
verifyAll();
verifyAll();
}
finally {
outDir.delete();
}
}

View File

@ -57,38 +57,47 @@ public class GoogleDataSegmentPullerTest extends EasyMockSupport
throws IOException, SegmentLoadingException
{
final File outDir = Files.createTempDirectory("druid").toFile();
outDir.deleteOnExit();
GoogleStorage storage = createMock(GoogleStorage.class);
try {
GoogleStorage storage = createMock(GoogleStorage.class);
expect(storage.get(bucket, path)).andThrow(new IOException(""));
expect(storage.get(bucket, path)).andThrow(new IOException(""));
replayAll();
replayAll();
GoogleDataSegmentPuller puller = new GoogleDataSegmentPuller(storage);
puller.getSegmentFiles(bucket, path, outDir);
GoogleDataSegmentPuller puller = new GoogleDataSegmentPuller(storage);
puller.getSegmentFiles(bucket, path, outDir);
assertFalse(outDir.exists());
assertFalse(outDir.exists());
verifyAll();
verifyAll();
}
finally {
org.apache.commons.io.FileUtils.deleteDirectory(outDir);
}
}
@Test
public void getSegmentFilesTest() throws SegmentLoadingException
public void getSegmentFilesTest() throws SegmentLoadingException, IOException
{
final File outDir = new File("");
final FileUtils.FileCopyResult result = createMock(FileUtils.FileCopyResult.class);
GoogleStorage storage = createMock(GoogleStorage.class);
GoogleDataSegmentPuller puller = createMockBuilder(GoogleDataSegmentPuller.class).withConstructor(
storage
).addMockedMethod("getSegmentFiles", String.class, String.class, File.class).createMock();
try {
final FileUtils.FileCopyResult result = createMock(FileUtils.FileCopyResult.class);
GoogleStorage storage = createMock(GoogleStorage.class);
GoogleDataSegmentPuller puller = createMockBuilder(GoogleDataSegmentPuller.class).withConstructor(
storage
).addMockedMethod("getSegmentFiles", String.class, String.class, File.class).createMock();
expect(puller.getSegmentFiles(bucket, path, outDir)).andReturn(result);
expect(puller.getSegmentFiles(bucket, path, outDir)).andReturn(result);
replayAll();
replayAll();
puller.getSegmentFiles(dataSegment, outDir);
puller.getSegmentFiles(dataSegment, outDir);
verifyAll();
verifyAll();
}
finally {
org.apache.commons.io.FileUtils.deleteDirectory(outDir);
}
}
@Test
@ -104,7 +113,7 @@ public class GoogleDataSegmentPullerTest extends EasyMockSupport
assertTrue(outDir.exists());
}
finally {
outDir.delete();
org.apache.commons.io.FileUtils.deleteDirectory(outDir);
}
}
}

View File

@ -27,6 +27,7 @@ import io.druid.jackson.DefaultObjectMapper;
import io.druid.storage.hdfs.HdfsDataSegmentFinder;
import io.druid.timeline.DataSegment;
import io.druid.timeline.partition.NumberedShardSpec;
import org.apache.commons.io.FileUtils;
import org.apache.commons.io.IOUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
@ -133,7 +134,6 @@ public class HdfsDataSegmentFinderTest
mapper.registerSubtypes(new NamedType(NumberedShardSpec.class, "numbered"));
hdfsTmpDir = File.createTempFile("hdfsDataSource", "dir");
hdfsTmpDir.deleteOnExit();
if (!hdfsTmpDir.delete()) {
throw new IOException(String.format("Unable to delete hdfsTmpDir [%s]", hdfsTmpDir.getAbsolutePath()));
}
@ -145,11 +145,12 @@ public class HdfsDataSegmentFinderTest
}
@AfterClass
public static void tearDownStatic()
public static void tearDownStatic() throws IOException
{
if (miniCluster != null) {
miniCluster.shutdown(true);
}
FileUtils.deleteDirectory(hdfsTmpDir);
}
@Before

View File

@ -24,6 +24,7 @@ import com.google.common.io.ByteStreams;
import io.druid.java.util.common.CompressionUtils;
import io.druid.java.util.common.StringUtils;
import io.druid.storage.hdfs.HdfsDataSegmentPuller;
import org.apache.commons.io.FileUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.MiniDFSCluster;
@ -62,7 +63,6 @@ public class HdfsDataSegmentPullerTest
public static void setupStatic() throws IOException, ClassNotFoundException
{
hdfsTmpDir = File.createTempFile("hdfsHandlerTest", "dir");
hdfsTmpDir.deleteOnExit();
if (!hdfsTmpDir.delete()) {
throw new IOException(String.format("Unable to delete hdfsTmpDir [%s]", hdfsTmpDir.getAbsolutePath()));
}
@ -74,7 +74,6 @@ public class HdfsDataSegmentPullerTest
final File tmpFile = File.createTempFile("hdfsHandlerTest", ".data");
tmpFile.delete();
try {
tmpFile.deleteOnExit();
Files.copy(new ByteArrayInputStream(pathByteContents), tmpFile.toPath());
try (OutputStream stream = miniCluster.getFileSystem().create(filePath)) {
Files.copy(tmpFile.toPath(), stream);
@ -91,6 +90,7 @@ public class HdfsDataSegmentPullerTest
if (miniCluster != null) {
miniCluster.shutdown(true);
}
FileUtils.deleteDirectory(hdfsTmpDir);
}
@ -112,18 +112,14 @@ public class HdfsDataSegmentPullerTest
public void testZip() throws IOException, SegmentLoadingException
{
final File tmpDir = com.google.common.io.Files.createTempDir();
tmpDir.deleteOnExit();
final File tmpFile = File.createTempFile("zipContents", ".txt", tmpDir);
tmpFile.deleteOnExit();
final Path zipPath = new Path("/tmp/testZip.zip");
final File outTmpDir = com.google.common.io.Files.createTempDir();
outTmpDir.deleteOnExit();
final URI uri = URI.create(uriBase.toString() + zipPath.toString());
tmpFile.deleteOnExit();
try (final OutputStream stream = new FileOutputStream(tmpFile)) {
ByteStreams.copy(new ByteArrayInputStream(pathByteContents), stream);
}
@ -164,7 +160,6 @@ public class HdfsDataSegmentPullerTest
final Path zipPath = new Path("/tmp/testZip.gz");
final File outTmpDir = com.google.common.io.Files.createTempDir();
outTmpDir.deleteOnExit();
final File outFile = new File(outTmpDir, "testZip");
outFile.delete();
@ -201,7 +196,6 @@ public class HdfsDataSegmentPullerTest
final Path zipPath = new Path(perTestPath, "test.txt");
final File outTmpDir = com.google.common.io.Files.createTempDir();
outTmpDir.deleteOnExit();
final File outFile = new File(outTmpDir, "test.txt");
outFile.delete();

View File

@ -23,6 +23,7 @@ import com.google.common.io.ByteStreams;
import io.druid.java.util.common.StringUtils;
import io.druid.storage.hdfs.HdfsFileTimestampVersionFinder;
import org.apache.commons.io.FileUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.MiniDFSCluster;
@ -58,7 +59,6 @@ public class HdfsFileTimestampVersionFinderTest
public static void setupStatic() throws IOException, ClassNotFoundException
{
hdfsTmpDir = File.createTempFile("hdfsHandlerTest", "dir");
hdfsTmpDir.deleteOnExit();
if (!hdfsTmpDir.delete()) {
throw new IOException(String.format("Unable to delete hdfsTmpDir [%s]", hdfsTmpDir.getAbsolutePath()));
}
@ -70,7 +70,6 @@ public class HdfsFileTimestampVersionFinderTest
final File tmpFile = File.createTempFile("hdfsHandlerTest", ".data");
tmpFile.delete();
try {
tmpFile.deleteOnExit();
Files.copy(new ByteArrayInputStream(pathByteContents), tmpFile.toPath());
try (OutputStream stream = miniCluster.getFileSystem().create(filePath)) {
Files.copy(tmpFile.toPath(), stream);
@ -87,6 +86,7 @@ public class HdfsFileTimestampVersionFinderTest
if (miniCluster != null) {
miniCluster.shutdown(true);
}
FileUtils.deleteDirectory(hdfsTmpDir);
}

View File

@ -55,7 +55,7 @@ class HadoopIOPeon implements IOPeon
}
@Override
public void cleanup() throws IOException
public void close() throws IOException
{
throw new UnsupportedOperationException();
}

View File

@ -33,6 +33,7 @@ import io.druid.query.aggregation.AggregatorFactory;
import io.druid.query.aggregation.DoubleSumAggregatorFactory;
import io.druid.segment.indexing.DataSchema;
import io.druid.segment.indexing.granularity.UniformGranularitySpec;
import org.apache.commons.io.FileUtils;
import org.joda.time.Interval;
import org.junit.Assert;
import org.junit.Test;
@ -95,79 +96,96 @@ public class DetermineHashedPartitionsJobTest
);
}
public DetermineHashedPartitionsJobTest(String dataFilePath, long targetPartitionSize, String interval, int errorMargin, int expectedNumTimeBuckets, int[] expectedNumOfShards) throws IOException
public DetermineHashedPartitionsJobTest(
String dataFilePath,
long targetPartitionSize,
String interval,
int errorMargin,
int expectedNumTimeBuckets,
int[] expectedNumOfShards
) throws IOException
{
this.expectedNumOfShards = expectedNumOfShards;
this.expectedNumTimeBuckets = expectedNumTimeBuckets;
this.errorMargin = errorMargin;
File tmpDir = Files.createTempDir();
tmpDir.deleteOnExit();
HadoopIngestionSpec ingestionSpec = new HadoopIngestionSpec(
new DataSchema(
"test_schema",
HadoopDruidIndexerConfig.JSON_MAPPER.convertValue(
new StringInputRowParser(
new DelimitedParseSpec(
new TimestampSpec("ts", null, null),
new DimensionsSpec(
DimensionsSpec.getDefaultSchemas(ImmutableList.of("market", "quality", "placement", "placementish")),
null,
null
),
"\t",
null,
Arrays.asList(
"ts",
"market",
"quality",
"placement",
"placementish",
"index"
)
),
null
),
Map.class
),
new AggregatorFactory[]{new DoubleSumAggregatorFactory("index", "index")},
new UniformGranularitySpec(
Granularity.DAY,
QueryGranularities.NONE,
ImmutableList.of(new Interval(interval))
),
HadoopDruidIndexerConfig.JSON_MAPPER
),
new HadoopIOConfig(
ImmutableMap.<String, Object>of(
"paths",
dataFilePath,
"type",
"static"
), null, tmpDir.getAbsolutePath()
),
new HadoopTuningConfig(
tmpDir.getAbsolutePath(),
null,
new HashedPartitionsSpec(targetPartitionSize, null, true, null, null),
null,
null,
null,
false,
false,
false,
false,
null,
false,
false,
null,
null,
null,
false,
false
)
);
this.indexerConfig = new HadoopDruidIndexerConfig(ingestionSpec);
try {
HadoopIngestionSpec ingestionSpec = new HadoopIngestionSpec(
new DataSchema(
"test_schema",
HadoopDruidIndexerConfig.JSON_MAPPER.convertValue(
new StringInputRowParser(
new DelimitedParseSpec(
new TimestampSpec("ts", null, null),
new DimensionsSpec(
DimensionsSpec.getDefaultSchemas(ImmutableList.of(
"market",
"quality",
"placement",
"placementish"
)),
null,
null
),
"\t",
null,
Arrays.asList(
"ts",
"market",
"quality",
"placement",
"placementish",
"index"
)
),
null
),
Map.class
),
new AggregatorFactory[]{new DoubleSumAggregatorFactory("index", "index")},
new UniformGranularitySpec(
Granularity.DAY,
QueryGranularities.NONE,
ImmutableList.of(new Interval(interval))
),
HadoopDruidIndexerConfig.JSON_MAPPER
),
new HadoopIOConfig(
ImmutableMap.<String, Object>of(
"paths",
dataFilePath,
"type",
"static"
), null, tmpDir.getAbsolutePath()
),
new HadoopTuningConfig(
tmpDir.getAbsolutePath(),
null,
new HashedPartitionsSpec(targetPartitionSize, null, true, null, null),
null,
null,
null,
false,
false,
false,
false,
null,
false,
false,
null,
null,
null,
false,
false
)
);
this.indexerConfig = new HadoopDruidIndexerConfig(ingestionSpec);
}
finally {
FileUtils.deleteDirectory(tmpDir);
}
}
@Test

View File

@ -69,8 +69,8 @@ public class HadoopIOPeonTest
Assert.assertNotNull(ioPeon.makeInputStream(tmpFolder.newFile(TMP_FILE_NAME).getName()));
}
@Test(expected = UnsupportedOperationException.class) public void testCleanup() throws IOException
@Test(expected = UnsupportedOperationException.class) public void testClose() throws IOException
{
ioPeon.cleanup();
ioPeon.close();
}
}

View File

@ -26,6 +26,7 @@ import com.google.common.util.concurrent.MoreExecutors;
import io.druid.common.utils.UUIDUtils;
import io.druid.java.util.common.StringUtils;
import junit.framework.Assert;
import org.apache.commons.io.FileUtils;
import org.apache.commons.io.IOUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
@ -71,7 +72,6 @@ public class HdfsClasspathSetupTest
public static void setupStatic() throws IOException, ClassNotFoundException
{
hdfsTmpDir = File.createTempFile("hdfsClasspathSetupTest", "dir");
hdfsTmpDir.deleteOnExit();
if (!hdfsTmpDir.delete()) {
throw new IOException(String.format("Unable to delete hdfsTmpDir [%s]", hdfsTmpDir.getAbsolutePath()));
}
@ -100,6 +100,7 @@ public class HdfsClasspathSetupTest
if (miniCluster != null) {
miniCluster.shutdown(true);
}
FileUtils.deleteDirectory(hdfsTmpDir);
}
@After

View File

@ -53,8 +53,6 @@ import java.util.zip.GZIPOutputStream;
public class CompressionUtilsTest
{
@Rule
public final TemporaryFolder temporaryFolder = new TemporaryFolder();
private static final String content;
private static final byte[] expected;
private static final byte[] gzBytes;
@ -85,9 +83,19 @@ public class CompressionUtilsTest
gzBytes = gzByteStream.toByteArray();
}
@Rule
public final TemporaryFolder temporaryFolder = new TemporaryFolder();
private File testDir;
private File testFile;
public static void assertGoodDataStream(InputStream stream) throws IOException
{
try (final ByteArrayOutputStream bos = new ByteArrayOutputStream(expected.length)) {
ByteStreams.copy(stream, bos);
Assert.assertArrayEquals(expected, bos.toByteArray());
}
}
@Before
public void setUp() throws IOException
{
@ -99,14 +107,6 @@ public class CompressionUtilsTest
Assert.assertTrue(testFile.getParentFile().equals(testDir));
}
public static void assertGoodDataStream(InputStream stream) throws IOException
{
try (final ByteArrayOutputStream bos = new ByteArrayOutputStream(expected.length)) {
ByteStreams.copy(stream, bos);
Assert.assertArrayEquals(expected, bos.toByteArray());
}
}
@Test
public void testGoodGzNameResolution()
{
@ -131,15 +131,24 @@ public class CompressionUtilsTest
{
final File tmpDir = temporaryFolder.newFolder("testGoodZipCompressUncompress");
final File zipFile = new File(tmpDir, "compressionUtilTest.zip");
zipFile.deleteOnExit();
CompressionUtils.zip(testDir, zipFile);
final File newDir = new File(tmpDir, "newDir");
newDir.mkdir();
CompressionUtils.unzip(zipFile, newDir);
final Path newPath = Paths.get(newDir.getAbsolutePath(), testFile.getName());
Assert.assertTrue(newPath.toFile().exists());
try (final FileInputStream inputStream = new FileInputStream(newPath.toFile())) {
assertGoodDataStream(inputStream);
try {
CompressionUtils.zip(testDir, zipFile);
final File newDir = new File(tmpDir, "newDir");
newDir.mkdir();
CompressionUtils.unzip(zipFile, newDir);
final Path newPath = Paths.get(newDir.getAbsolutePath(), testFile.getName());
Assert.assertTrue(newPath.toFile().exists());
try (final FileInputStream inputStream = new FileInputStream(newPath.toFile())) {
assertGoodDataStream(inputStream);
}
}
finally {
if (zipFile.exists()) {
zipFile.delete();
}
if (tmpDir.exists()) {
tmpDir.delete();
}
}
}
@ -252,53 +261,6 @@ public class CompressionUtilsTest
}
}
private static class ZeroRemainingInputStream extends FilterInputStream
{
private final AtomicInteger pos = new AtomicInteger(0);
protected ZeroRemainingInputStream(InputStream in)
{
super(in);
}
@Override
public synchronized void reset() throws IOException
{
super.reset();
pos.set(0);
}
@Override
public int read(byte b[]) throws IOException
{
final int len = Math.min(b.length, gzBytes.length - pos.get() % gzBytes.length);
pos.addAndGet(len);
return read(b, 0, len);
}
@Override
public int read() throws IOException
{
pos.incrementAndGet();
return super.read();
}
@Override
public int read(byte b[], int off, int len) throws IOException
{
final int l = Math.min(len, gzBytes.length - pos.get() % gzBytes.length);
pos.addAndGet(l);
return super.read(b, off, l);
}
@Override
public int available() throws IOException
{
return 0;
}
}
@Test
// Sanity check to make sure the test class works as expected
public void testZeroRemainingInputStream() throws IOException
@ -410,7 +372,6 @@ public class CompressionUtilsTest
}
}
@Test
// http://bugs.java.com/bugdatabase/view_bug.do?bug_id=7036144
public void testGunzipBugStreamWorkarround() throws IOException
@ -539,7 +500,6 @@ public class CompressionUtilsTest
Assert.assertEquals(4, flushes.get()); // 2 for suppressed closes, 2 for manual calls to shake out errors
}
@Test(expected = IOException.class)
public void testStreamErrorGzip() throws Exception
{
@ -596,4 +556,50 @@ public class CompressionUtilsTest
)
);
}
private static class ZeroRemainingInputStream extends FilterInputStream
{
private final AtomicInteger pos = new AtomicInteger(0);
protected ZeroRemainingInputStream(InputStream in)
{
super(in);
}
@Override
public synchronized void reset() throws IOException
{
super.reset();
pos.set(0);
}
@Override
public int read(byte b[]) throws IOException
{
final int len = Math.min(b.length, gzBytes.length - pos.get() % gzBytes.length);
pos.addAndGet(len);
return read(b, 0, len);
}
@Override
public int read() throws IOException
{
pos.incrementAndGet();
return super.read();
}
@Override
public int read(byte b[], int off, int len) throws IOException
{
final int l = Math.min(len, gzBytes.length - pos.get() % gzBytes.length);
pos.addAndGet(l);
return super.read(b, off, l);
}
@Override
public int available() throws IOException
{
return 0;
}
}
}

View File

@ -642,7 +642,7 @@ public class IndexMerger
@Override
public void close() throws IOException
{
ioPeon.cleanup();
ioPeon.close();
}
});
try {

View File

@ -135,7 +135,7 @@ public class IndexMergerV9 extends IndexMerger
@Override
public void close() throws IOException
{
ioPeon.cleanup();
ioPeon.close();
}
});
final FileSmoosher v9Smoosher = new FileSmoosher(outDir);

View File

@ -210,7 +210,7 @@ public class StringDimensionMergerLegacy extends StringDimensionMergerV9 impleme
spatialWriter.close();
serializerUtils.writeString(spatialIndexFile, dimensionName);
ByteStreams.copy(spatialWriter.combineStreams(), spatialIndexFile);
spatialIoPeon.cleanup();
spatialIoPeon.close();
}
}

View File

@ -19,15 +19,15 @@
package io.druid.segment.data;
import java.io.Closeable;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
/**
*/
public interface IOPeon
public interface IOPeon extends Closeable
{
public OutputStream makeOutputStream(String filename) throws IOException;
public InputStream makeInputStream(String filename) throws IOException;
public void cleanup() throws IOException;
}

View File

@ -53,7 +53,6 @@ public class TmpFileIOPeon implements IOPeon
File retFile = createdFiles.get(filename);
if (retFile == null) {
retFile = File.createTempFile("filePeon", filename);
retFile.deleteOnExit();
createdFiles.put(filename, retFile);
return new BufferedOutputStream(new FileOutputStream(retFile));
} else if (allowOverwrite) {
@ -72,7 +71,7 @@ public class TmpFileIOPeon implements IOPeon
}
@Override
public void cleanup() throws IOException
public void close() throws IOException
{
for (File file : createdFiles.values()) {
file.delete();

View File

@ -28,6 +28,7 @@ import io.druid.segment.column.Column;
import io.druid.segment.incremental.IncrementalIndex;
import io.druid.segment.incremental.IncrementalIndexAdapter;
import io.druid.segment.incremental.OnheapIncrementalIndex;
import org.apache.commons.io.FileUtils;
import org.joda.time.Interval;
import org.junit.Assert;
import org.junit.Test;
@ -46,32 +47,40 @@ public class EmptyIndexTest
if (!tmpDir.mkdir()) {
throw new IllegalStateException("tmp mkdir failed");
}
tmpDir.deleteOnExit();
IncrementalIndex emptyIndex = new OnheapIncrementalIndex(
0,
QueryGranularities.NONE,
new AggregatorFactory[0],
1000
);
IncrementalIndexAdapter emptyIndexAdapter = new IncrementalIndexAdapter(
new Interval("2012-08-01/P3D"),
emptyIndex,
new ConciseBitmapFactory()
);
TestHelper.getTestIndexMerger().merge(
Lists.<IndexableAdapter>newArrayList(emptyIndexAdapter),
true,
new AggregatorFactory[0],
tmpDir,
new IndexSpec()
);
try {
IncrementalIndex emptyIndex = new OnheapIncrementalIndex(
0,
QueryGranularities.NONE,
new AggregatorFactory[0],
1000
);
IncrementalIndexAdapter emptyIndexAdapter = new IncrementalIndexAdapter(
new Interval("2012-08-01/P3D"),
emptyIndex,
new ConciseBitmapFactory()
);
TestHelper.getTestIndexMerger().merge(
Lists.<IndexableAdapter>newArrayList(emptyIndexAdapter),
true,
new AggregatorFactory[0],
tmpDir,
new IndexSpec()
);
QueryableIndex emptyQueryableIndex = TestHelper.getTestIndexIO().loadIndex(tmpDir);
QueryableIndex emptyQueryableIndex = TestHelper.getTestIndexIO().loadIndex(tmpDir);
Assert.assertEquals("getDimensionNames", 0, Iterables.size(emptyQueryableIndex.getAvailableDimensions()));
Assert.assertEquals("getMetricNames", 0, Iterables.size(emptyQueryableIndex.getColumnNames()));
Assert.assertEquals("getDataInterval", new Interval("2012-08-01/P3D"), emptyQueryableIndex.getDataInterval());
Assert.assertEquals("getReadOnlyTimestamps", 0, emptyQueryableIndex.getColumn(Column.TIME_COLUMN_NAME).getLength());
Assert.assertEquals("getDimensionNames", 0, Iterables.size(emptyQueryableIndex.getAvailableDimensions()));
Assert.assertEquals("getMetricNames", 0, Iterables.size(emptyQueryableIndex.getColumnNames()));
Assert.assertEquals("getDataInterval", new Interval("2012-08-01/P3D"), emptyQueryableIndex.getDataInterval());
Assert.assertEquals(
"getReadOnlyTimestamps",
0,
emptyQueryableIndex.getColumn(Column.TIME_COLUMN_NAME).getLength()
);
}
finally {
FileUtils.deleteDirectory(tmpDir);
}
}
}

View File

@ -46,6 +46,7 @@ import io.druid.query.timeseries.TimeseriesResultValue;
import io.druid.segment.incremental.IncrementalIndex;
import io.druid.segment.incremental.IncrementalIndexSchema;
import io.druid.segment.incremental.OnheapIncrementalIndex;
import org.apache.commons.io.FileUtils;
import org.joda.time.DateTime;
import org.joda.time.Interval;
import org.junit.Test;
@ -256,10 +257,14 @@ public class IndexMergerV9WithSpatialIndexTest
File tmpFile = File.createTempFile("billy", "yay");
tmpFile.delete();
tmpFile.mkdirs();
tmpFile.deleteOnExit();
INDEX_MERGER_V9.persist(theIndex, tmpFile, indexSpec);
return INDEX_IO.loadIndex(tmpFile);
try {
INDEX_MERGER_V9.persist(theIndex, tmpFile, indexSpec);
return INDEX_IO.loadIndex(tmpFile);
}
finally {
FileUtils.deleteDirectory(tmpFile);
}
}
private static QueryableIndex makeMergedQueryableIndex(IndexSpec indexSpec)
@ -470,33 +475,38 @@ public class IndexMergerV9WithSpatialIndexTest
File mergedFile = new File(tmpFile, "merged");
firstFile.mkdirs();
firstFile.deleteOnExit();
secondFile.mkdirs();
secondFile.deleteOnExit();
thirdFile.mkdirs();
thirdFile.deleteOnExit();
mergedFile.mkdirs();
mergedFile.deleteOnExit();
INDEX_MERGER_V9.persist(first, DATA_INTERVAL, firstFile, indexSpec);
INDEX_MERGER_V9.persist(second, DATA_INTERVAL, secondFile, indexSpec);
INDEX_MERGER_V9.persist(third, DATA_INTERVAL, thirdFile, indexSpec);
QueryableIndex mergedRealtime = INDEX_IO.loadIndex(
INDEX_MERGER_V9.mergeQueryableIndex(
Arrays.asList(
INDEX_IO.loadIndex(firstFile),
INDEX_IO.loadIndex(secondFile),
INDEX_IO.loadIndex(thirdFile)
),
true,
METRIC_AGGS,
mergedFile,
indexSpec
)
);
try {
QueryableIndex mergedRealtime = INDEX_IO.loadIndex(
INDEX_MERGER_V9.mergeQueryableIndex(
Arrays.asList(
INDEX_IO.loadIndex(firstFile),
INDEX_IO.loadIndex(secondFile),
INDEX_IO.loadIndex(thirdFile)
),
true,
METRIC_AGGS,
mergedFile,
indexSpec
)
);
return mergedRealtime;
}
finally {
FileUtils.deleteDirectory(firstFile);
FileUtils.deleteDirectory(secondFile);
FileUtils.deleteDirectory(thirdFile);
FileUtils.deleteDirectory(mergedFile);
}
return mergedRealtime;
}
catch (IOException e) {
throw Throwables.propagate(e);

View File

@ -92,7 +92,7 @@ public class CompressedIntsIndexedWriterTest
@After
public void tearDown() throws Exception
{
ioPeon.cleanup();
ioPeon.close();
}
private void generateVals(final int totalSize, final int maxValue) throws IOException

View File

@ -182,7 +182,7 @@ public class CompressedVSizeIndexedV3WriterTest
@After
public void tearDown() throws Exception
{
ioPeon.cleanup();
ioPeon.close();
}
@Test

View File

@ -91,7 +91,7 @@ public class CompressedVSizeIntsIndexedWriterTest
@After
public void tearDown() throws Exception
{
ioPeon.cleanup();
ioPeon.close();
}
private void generateVals(final int totalSize, final int maxValue) throws IOException

View File

@ -61,7 +61,7 @@ class IOPeonForTesting implements IOPeon
}
@Override
public void cleanup() throws IOException
public void close() throws IOException
{
outStreams.clear();
}

View File

@ -50,7 +50,7 @@ public class VSizeIndexedIntsWriterTest
@After
public void tearDown() throws Exception
{
ioPeon.cleanup();
ioPeon.close();
}
private void generateVals(final int totalSize, final int maxValue) throws IOException

View File

@ -20,9 +20,9 @@
package io.druid.segment.loading;
import com.google.common.io.Files;
import io.druid.java.util.common.CompressionUtils;
import org.apache.commons.io.FileUtils;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Rule;
@ -49,10 +49,15 @@ public class LocalDataSegmentPullerTest
public void setup() throws IOException
{
tmpDir = temporaryFolder.newFolder();
tmpDir.deleteOnExit();
puller = new LocalDataSegmentPuller();
}
@After
public void after() throws IOException
{
FileUtils.deleteDirectory(tmpDir);
}
@Test
public void simpleZipTest() throws IOException, SegmentLoadingException
{

View File

@ -96,6 +96,7 @@ public class RealtimePlumberSchoolTest
private DataSchema schema;
private DataSchema schema2;
private FireDepartmentMetrics metrics;
private File tmpDir;
public RealtimePlumberSchoolTest(RejectionPolicyFactory rejectionPolicy, boolean buildV9Directly)
{
@ -124,8 +125,7 @@ public class RealtimePlumberSchoolTest
@Before
public void setUp() throws Exception
{
final File tmpDir = Files.createTempDir();
tmpDir.deleteOnExit();
tmpDir = Files.createTempDir();
ObjectMapper jsonMapper = new DefaultObjectMapper();
@ -237,6 +237,7 @@ public class RealtimePlumberSchoolTest
schema.getDataSource()
)
);
FileUtils.deleteDirectory(tmpDir);
}
@Test(timeout = 60000)