mirror of https://github.com/apache/druid.git
Add FileUtils.createTempDir() and enforce its usage. (#8932)
* Add FileUtils.createTempDir() and enforce its usage. The purpose of this is to improve error messages. Previously, the error message on a nonexistent or unwritable temp directory would be "Failed to create directory within 10,000 attempts". * Further updates. * Another update. * Remove commons-io from benchmark. * Fix tests.
This commit is contained in:
parent
0514e5686e
commit
e0eb85ace7
|
@ -122,10 +122,6 @@
|
|||
<groupId>org.apache.commons</groupId>
|
||||
<artifactId>commons-math3</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>commons-io</groupId>
|
||||
<artifactId>commons-io</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.fasterxml.jackson.core</groupId>
|
||||
<artifactId>jackson-annotations</artifactId>
|
||||
|
|
|
@ -22,14 +22,13 @@ package org.apache.druid.benchmark;
|
|||
import com.fasterxml.jackson.databind.ObjectMapper;
|
||||
import com.google.common.base.Predicate;
|
||||
import com.google.common.collect.ImmutableList;
|
||||
import com.google.common.io.Files;
|
||||
import org.apache.commons.io.FileUtils;
|
||||
import org.apache.druid.benchmark.datagen.BenchmarkDataGenerator;
|
||||
import org.apache.druid.benchmark.datagen.BenchmarkSchemaInfo;
|
||||
import org.apache.druid.benchmark.datagen.BenchmarkSchemas;
|
||||
import org.apache.druid.common.config.NullHandling;
|
||||
import org.apache.druid.data.input.InputRow;
|
||||
import org.apache.druid.jackson.DefaultObjectMapper;
|
||||
import org.apache.druid.java.util.common.FileUtils;
|
||||
import org.apache.druid.java.util.common.granularity.Granularities;
|
||||
import org.apache.druid.java.util.common.guava.Sequence;
|
||||
import org.apache.druid.java.util.common.guava.Sequences;
|
||||
|
@ -173,7 +172,7 @@ public class FilterPartitionBenchmark
|
|||
incIndex.add(row);
|
||||
}
|
||||
|
||||
tmpDir = Files.createTempDir();
|
||||
tmpDir = FileUtils.createTempDir();
|
||||
log.info("Using temp dir: " + tmpDir.getAbsolutePath());
|
||||
|
||||
indexFile = INDEX_MERGER_V9.persist(
|
||||
|
|
|
@ -21,8 +21,6 @@ package org.apache.druid.benchmark;
|
|||
|
||||
import com.fasterxml.jackson.databind.ObjectMapper;
|
||||
import com.google.common.collect.ImmutableMap;
|
||||
import com.google.common.io.Files;
|
||||
import org.apache.commons.io.FileUtils;
|
||||
import org.apache.druid.benchmark.datagen.BenchmarkDataGenerator;
|
||||
import org.apache.druid.benchmark.datagen.BenchmarkSchemaInfo;
|
||||
import org.apache.druid.benchmark.datagen.BenchmarkSchemas;
|
||||
|
@ -30,6 +28,7 @@ import org.apache.druid.benchmark.query.QueryBenchmarkUtil;
|
|||
import org.apache.druid.common.config.NullHandling;
|
||||
import org.apache.druid.data.input.InputRow;
|
||||
import org.apache.druid.jackson.DefaultObjectMapper;
|
||||
import org.apache.druid.java.util.common.FileUtils;
|
||||
import org.apache.druid.java.util.common.granularity.Granularities;
|
||||
import org.apache.druid.java.util.common.guava.Sequence;
|
||||
import org.apache.druid.java.util.common.logger.Logger;
|
||||
|
@ -186,7 +185,7 @@ public class FilteredAggregatorBenchmark
|
|||
inputRows.add(row);
|
||||
}
|
||||
|
||||
tmpDir = Files.createTempDir();
|
||||
tmpDir = FileUtils.createTempDir();
|
||||
log.info("Using temp dir: " + tmpDir.getAbsolutePath());
|
||||
|
||||
indexFile = INDEX_MERGER_V9.persist(
|
||||
|
|
|
@ -19,9 +19,9 @@
|
|||
|
||||
package org.apache.druid.benchmark;
|
||||
|
||||
import com.google.common.io.Files;
|
||||
import com.google.common.primitives.Ints;
|
||||
import org.apache.druid.common.config.NullHandling;
|
||||
import org.apache.druid.java.util.common.FileUtils;
|
||||
import org.apache.druid.java.util.common.io.smoosh.FileSmoosher;
|
||||
import org.apache.druid.java.util.common.io.smoosh.SmooshedFileMapper;
|
||||
import org.apache.druid.segment.data.GenericIndexed;
|
||||
|
@ -126,7 +126,7 @@ public class GenericIndexedBenchmark
|
|||
element.putInt(0, i);
|
||||
genericIndexedWriter.write(element.array());
|
||||
}
|
||||
smooshDir = Files.createTempDir();
|
||||
smooshDir = FileUtils.createTempDir();
|
||||
file = File.createTempFile("genericIndexedBenchmark", "meta");
|
||||
|
||||
try (FileChannel fileChannel =
|
||||
|
|
|
@ -23,8 +23,6 @@ import com.fasterxml.jackson.databind.ObjectMapper;
|
|||
import com.fasterxml.jackson.dataformat.smile.SmileFactory;
|
||||
import com.google.common.base.Supplier;
|
||||
import com.google.common.base.Suppliers;
|
||||
import com.google.common.io.Files;
|
||||
import org.apache.commons.io.FileUtils;
|
||||
import org.apache.druid.benchmark.datagen.BenchmarkDataGenerator;
|
||||
import org.apache.druid.benchmark.datagen.BenchmarkSchemaInfo;
|
||||
import org.apache.druid.benchmark.datagen.BenchmarkSchemas;
|
||||
|
@ -36,6 +34,7 @@ import org.apache.druid.collections.StupidPool;
|
|||
import org.apache.druid.common.config.NullHandling;
|
||||
import org.apache.druid.data.input.InputRow;
|
||||
import org.apache.druid.jackson.DefaultObjectMapper;
|
||||
import org.apache.druid.java.util.common.FileUtils;
|
||||
import org.apache.druid.java.util.common.granularity.Granularities;
|
||||
import org.apache.druid.java.util.common.granularity.Granularity;
|
||||
import org.apache.druid.java.util.common.guava.Sequence;
|
||||
|
@ -291,7 +290,7 @@ public class GroupByTypeInterfaceBenchmark
|
|||
rowsPerSegment
|
||||
);
|
||||
|
||||
tmpDir = Files.createTempDir();
|
||||
tmpDir = FileUtils.createTempDir();
|
||||
log.info("Using temp dir: %s", tmpDir.getAbsolutePath());
|
||||
|
||||
// queryableIndexes -> numSegments worth of on-disk segments
|
||||
|
|
|
@ -20,7 +20,6 @@
|
|||
package org.apache.druid.benchmark;
|
||||
|
||||
import com.fasterxml.jackson.databind.ObjectMapper;
|
||||
import com.google.common.io.Files;
|
||||
import org.apache.druid.benchmark.datagen.BenchmarkDataGenerator;
|
||||
import org.apache.druid.benchmark.datagen.BenchmarkSchemaInfo;
|
||||
import org.apache.druid.benchmark.datagen.BenchmarkSchemas;
|
||||
|
@ -29,6 +28,7 @@ import org.apache.druid.collections.StupidPool;
|
|||
import org.apache.druid.common.config.NullHandling;
|
||||
import org.apache.druid.data.input.InputRow;
|
||||
import org.apache.druid.jackson.DefaultObjectMapper;
|
||||
import org.apache.druid.java.util.common.FileUtils;
|
||||
import org.apache.druid.java.util.common.granularity.Granularities;
|
||||
import org.apache.druid.java.util.common.guava.Sequence;
|
||||
import org.apache.druid.java.util.common.logger.Logger;
|
||||
|
@ -277,7 +277,7 @@ public class TopNTypeInterfaceBenchmark
|
|||
incIndexes.add(incIndex);
|
||||
}
|
||||
|
||||
File tmpFile = Files.createTempDir();
|
||||
File tmpFile = FileUtils.createTempDir();
|
||||
log.info("Using temp dir: " + tmpFile.getAbsolutePath());
|
||||
tmpFile.deleteOnExit();
|
||||
|
||||
|
|
|
@ -21,8 +21,6 @@ package org.apache.druid.benchmark.datagen;
|
|||
|
||||
import com.google.common.collect.ImmutableList;
|
||||
import com.google.common.hash.Hashing;
|
||||
import com.google.common.io.Files;
|
||||
import org.apache.commons.io.FileUtils;
|
||||
import org.apache.druid.common.config.NullHandling;
|
||||
import org.apache.druid.data.input.InputRow;
|
||||
import org.apache.druid.data.input.impl.DimensionSchema;
|
||||
|
@ -31,6 +29,7 @@ import org.apache.druid.data.input.impl.DoubleDimensionSchema;
|
|||
import org.apache.druid.data.input.impl.FloatDimensionSchema;
|
||||
import org.apache.druid.data.input.impl.LongDimensionSchema;
|
||||
import org.apache.druid.data.input.impl.StringDimensionSchema;
|
||||
import org.apache.druid.java.util.common.FileUtils;
|
||||
import org.apache.druid.java.util.common.ISE;
|
||||
import org.apache.druid.java.util.common.StringUtils;
|
||||
import org.apache.druid.java.util.common.granularity.Granularity;
|
||||
|
@ -95,7 +94,7 @@ public class SegmentGenerator implements Closeable
|
|||
log.warn("No cache directory provided; benchmark data caching is disabled. "
|
||||
+ "Set the 'druid.benchmark.cacheDir' property or 'DRUID_BENCHMARK_CACHE_DIR' environment variable "
|
||||
+ "to use caching.");
|
||||
this.cacheDir = Files.createTempDir();
|
||||
this.cacheDir = FileUtils.createTempDir();
|
||||
this.cleanupCacheDir = true;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -21,14 +21,13 @@ package org.apache.druid.benchmark.indexing;
|
|||
|
||||
import com.fasterxml.jackson.databind.InjectableValues;
|
||||
import com.fasterxml.jackson.databind.ObjectMapper;
|
||||
import com.google.common.io.Files;
|
||||
import org.apache.commons.io.FileUtils;
|
||||
import org.apache.druid.benchmark.datagen.BenchmarkDataGenerator;
|
||||
import org.apache.druid.benchmark.datagen.BenchmarkSchemaInfo;
|
||||
import org.apache.druid.benchmark.datagen.BenchmarkSchemas;
|
||||
import org.apache.druid.common.config.NullHandling;
|
||||
import org.apache.druid.data.input.InputRow;
|
||||
import org.apache.druid.jackson.DefaultObjectMapper;
|
||||
import org.apache.druid.java.util.common.FileUtils;
|
||||
import org.apache.druid.java.util.common.logger.Logger;
|
||||
import org.apache.druid.math.expr.ExprMacroTable;
|
||||
import org.apache.druid.query.aggregation.hyperloglog.HyperUniquesSerde;
|
||||
|
@ -141,7 +140,7 @@ public class IndexMergeBenchmark
|
|||
incIndex.add(row);
|
||||
}
|
||||
|
||||
tmpDir = Files.createTempDir();
|
||||
tmpDir = FileUtils.createTempDir();
|
||||
log.info("Using temp dir: " + tmpDir.getAbsolutePath());
|
||||
|
||||
File indexFile = INDEX_MERGER_V9.persist(
|
||||
|
|
|
@ -20,14 +20,13 @@
|
|||
package org.apache.druid.benchmark.indexing;
|
||||
|
||||
import com.fasterxml.jackson.databind.ObjectMapper;
|
||||
import com.google.common.io.Files;
|
||||
import org.apache.commons.io.FileUtils;
|
||||
import org.apache.druid.benchmark.datagen.BenchmarkDataGenerator;
|
||||
import org.apache.druid.benchmark.datagen.BenchmarkSchemaInfo;
|
||||
import org.apache.druid.benchmark.datagen.BenchmarkSchemas;
|
||||
import org.apache.druid.common.config.NullHandling;
|
||||
import org.apache.druid.data.input.InputRow;
|
||||
import org.apache.druid.jackson.DefaultObjectMapper;
|
||||
import org.apache.druid.java.util.common.FileUtils;
|
||||
import org.apache.druid.java.util.common.logger.Logger;
|
||||
import org.apache.druid.query.aggregation.hyperloglog.HyperUniquesSerde;
|
||||
import org.apache.druid.segment.IndexIO;
|
||||
|
@ -169,7 +168,7 @@ public class IndexPersistBenchmark
|
|||
@OutputTimeUnit(TimeUnit.MICROSECONDS)
|
||||
public void persistV9(Blackhole blackhole) throws Exception
|
||||
{
|
||||
File tmpDir = Files.createTempDir();
|
||||
File tmpDir = FileUtils.createTempDir();
|
||||
log.info("Using temp dir: " + tmpDir.getAbsolutePath());
|
||||
try {
|
||||
File indexFile = INDEX_MERGER_V9.persist(
|
||||
|
|
|
@ -25,8 +25,6 @@ import com.google.common.base.Supplier;
|
|||
import com.google.common.base.Suppliers;
|
||||
import com.google.common.collect.ImmutableList;
|
||||
import com.google.common.collect.ImmutableMap;
|
||||
import com.google.common.io.Files;
|
||||
import org.apache.commons.io.FileUtils;
|
||||
import org.apache.druid.benchmark.datagen.BenchmarkDataGenerator;
|
||||
import org.apache.druid.benchmark.datagen.BenchmarkSchemaInfo;
|
||||
import org.apache.druid.benchmark.datagen.BenchmarkSchemas;
|
||||
|
@ -37,6 +35,7 @@ import org.apache.druid.collections.StupidPool;
|
|||
import org.apache.druid.common.config.NullHandling;
|
||||
import org.apache.druid.data.input.InputRow;
|
||||
import org.apache.druid.jackson.DefaultObjectMapper;
|
||||
import org.apache.druid.java.util.common.FileUtils;
|
||||
import org.apache.druid.java.util.common.concurrent.Execs;
|
||||
import org.apache.druid.java.util.common.granularity.Granularities;
|
||||
import org.apache.druid.java.util.common.granularity.Granularity;
|
||||
|
@ -419,7 +418,7 @@ public class GroupByBenchmark
|
|||
rowsPerSegment
|
||||
);
|
||||
|
||||
tmpDir = Files.createTempDir();
|
||||
tmpDir = FileUtils.createTempDir();
|
||||
log.info("Using temp dir: %s", tmpDir.getAbsolutePath());
|
||||
|
||||
// queryableIndexes -> numSegments worth of on-disk segments
|
||||
|
|
|
@ -22,8 +22,6 @@ package org.apache.druid.benchmark.query;
|
|||
import com.fasterxml.jackson.databind.ObjectMapper;
|
||||
import com.google.common.collect.ImmutableList;
|
||||
import com.google.common.collect.ImmutableMap;
|
||||
import com.google.common.io.Files;
|
||||
import org.apache.commons.io.FileUtils;
|
||||
import org.apache.druid.benchmark.datagen.BenchmarkDataGenerator;
|
||||
import org.apache.druid.benchmark.datagen.BenchmarkSchemaInfo;
|
||||
import org.apache.druid.benchmark.datagen.BenchmarkSchemas;
|
||||
|
@ -31,6 +29,7 @@ import org.apache.druid.common.config.NullHandling;
|
|||
import org.apache.druid.data.input.InputRow;
|
||||
import org.apache.druid.data.input.Row;
|
||||
import org.apache.druid.jackson.DefaultObjectMapper;
|
||||
import org.apache.druid.java.util.common.FileUtils;
|
||||
import org.apache.druid.java.util.common.Intervals;
|
||||
import org.apache.druid.java.util.common.concurrent.Execs;
|
||||
import org.apache.druid.java.util.common.guava.Sequence;
|
||||
|
@ -285,7 +284,7 @@ public class ScanBenchmark
|
|||
incIndexes.add(incIndex);
|
||||
}
|
||||
|
||||
tmpDir = Files.createTempDir();
|
||||
tmpDir = FileUtils.createTempDir();
|
||||
log.info("Using temp dir: " + tmpDir.getAbsolutePath());
|
||||
|
||||
qIndexes = new ArrayList<>();
|
||||
|
|
|
@ -24,8 +24,6 @@ import com.fasterxml.jackson.databind.ObjectMapper;
|
|||
import com.google.common.base.Suppliers;
|
||||
import com.google.common.collect.ImmutableList;
|
||||
import com.google.common.collect.Lists;
|
||||
import com.google.common.io.Files;
|
||||
import org.apache.commons.io.FileUtils;
|
||||
import org.apache.druid.benchmark.datagen.BenchmarkDataGenerator;
|
||||
import org.apache.druid.benchmark.datagen.BenchmarkSchemaInfo;
|
||||
import org.apache.druid.benchmark.datagen.BenchmarkSchemas;
|
||||
|
@ -33,6 +31,7 @@ import org.apache.druid.common.config.NullHandling;
|
|||
import org.apache.druid.data.input.InputRow;
|
||||
import org.apache.druid.data.input.Row;
|
||||
import org.apache.druid.jackson.DefaultObjectMapper;
|
||||
import org.apache.druid.java.util.common.FileUtils;
|
||||
import org.apache.druid.java.util.common.concurrent.Execs;
|
||||
import org.apache.druid.java.util.common.granularity.Granularities;
|
||||
import org.apache.druid.java.util.common.guava.Sequence;
|
||||
|
@ -355,7 +354,7 @@ public class SearchBenchmark
|
|||
incIndexes.add(incIndex);
|
||||
}
|
||||
|
||||
tmpDir = Files.createTempDir();
|
||||
tmpDir = FileUtils.createTempDir();
|
||||
log.info("Using temp dir: " + tmpDir.getAbsolutePath());
|
||||
|
||||
qIndexes = new ArrayList<>();
|
||||
|
|
|
@ -20,14 +20,13 @@
|
|||
package org.apache.druid.benchmark.query;
|
||||
|
||||
import com.fasterxml.jackson.databind.ObjectMapper;
|
||||
import com.google.common.io.Files;
|
||||
import org.apache.commons.io.FileUtils;
|
||||
import org.apache.druid.benchmark.datagen.BenchmarkDataGenerator;
|
||||
import org.apache.druid.benchmark.datagen.BenchmarkSchemaInfo;
|
||||
import org.apache.druid.benchmark.datagen.BenchmarkSchemas;
|
||||
import org.apache.druid.common.config.NullHandling;
|
||||
import org.apache.druid.data.input.InputRow;
|
||||
import org.apache.druid.jackson.DefaultObjectMapper;
|
||||
import org.apache.druid.java.util.common.FileUtils;
|
||||
import org.apache.druid.java.util.common.Intervals;
|
||||
import org.apache.druid.java.util.common.concurrent.Execs;
|
||||
import org.apache.druid.java.util.common.granularity.Granularities;
|
||||
|
@ -280,7 +279,7 @@ public class TimeseriesBenchmark
|
|||
incIndexes.add(incIndex);
|
||||
}
|
||||
|
||||
tmpDir = Files.createTempDir();
|
||||
tmpDir = FileUtils.createTempDir();
|
||||
log.info("Using temp dir: " + tmpDir.getAbsolutePath());
|
||||
|
||||
qIndexes = new ArrayList<>();
|
||||
|
|
|
@ -20,8 +20,6 @@
|
|||
package org.apache.druid.benchmark.query;
|
||||
|
||||
import com.fasterxml.jackson.databind.ObjectMapper;
|
||||
import com.google.common.io.Files;
|
||||
import org.apache.commons.io.FileUtils;
|
||||
import org.apache.druid.benchmark.datagen.BenchmarkDataGenerator;
|
||||
import org.apache.druid.benchmark.datagen.BenchmarkSchemaInfo;
|
||||
import org.apache.druid.benchmark.datagen.BenchmarkSchemas;
|
||||
|
@ -29,6 +27,7 @@ import org.apache.druid.collections.StupidPool;
|
|||
import org.apache.druid.common.config.NullHandling;
|
||||
import org.apache.druid.data.input.InputRow;
|
||||
import org.apache.druid.jackson.DefaultObjectMapper;
|
||||
import org.apache.druid.java.util.common.FileUtils;
|
||||
import org.apache.druid.java.util.common.concurrent.Execs;
|
||||
import org.apache.druid.java.util.common.granularity.Granularities;
|
||||
import org.apache.druid.java.util.common.guava.Sequence;
|
||||
|
@ -257,7 +256,7 @@ public class TopNBenchmark
|
|||
incIndexes.add(incIndex);
|
||||
}
|
||||
|
||||
tmpDir = Files.createTempDir();
|
||||
tmpDir = FileUtils.createTempDir();
|
||||
log.info("Using temp dir: " + tmpDir.getAbsolutePath());
|
||||
|
||||
qIndexes = new ArrayList<>();
|
||||
|
|
|
@ -21,8 +21,6 @@ package org.apache.druid.benchmark.query.timecompare;
|
|||
|
||||
import com.fasterxml.jackson.databind.InjectableValues;
|
||||
import com.fasterxml.jackson.databind.ObjectMapper;
|
||||
import com.google.common.io.Files;
|
||||
import org.apache.commons.io.FileUtils;
|
||||
import org.apache.druid.benchmark.datagen.BenchmarkDataGenerator;
|
||||
import org.apache.druid.benchmark.datagen.BenchmarkSchemaInfo;
|
||||
import org.apache.druid.benchmark.datagen.BenchmarkSchemas;
|
||||
|
@ -31,6 +29,7 @@ import org.apache.druid.collections.StupidPool;
|
|||
import org.apache.druid.common.config.NullHandling;
|
||||
import org.apache.druid.data.input.InputRow;
|
||||
import org.apache.druid.jackson.DefaultObjectMapper;
|
||||
import org.apache.druid.java.util.common.FileUtils;
|
||||
import org.apache.druid.java.util.common.Intervals;
|
||||
import org.apache.druid.java.util.common.concurrent.Execs;
|
||||
import org.apache.druid.java.util.common.granularity.Granularities;
|
||||
|
@ -331,7 +330,7 @@ public class TimeCompareBenchmark
|
|||
incIndexes.add(incIndex);
|
||||
}
|
||||
|
||||
tmpDir = Files.createTempDir();
|
||||
tmpDir = FileUtils.createTempDir();
|
||||
log.info("Using temp dir: " + tmpDir.getAbsolutePath());
|
||||
|
||||
qIndexes = new ArrayList<>();
|
||||
|
|
|
@ -20,6 +20,7 @@ com.google.common.collect.Sets#newHashSet() @ Create java.util.HashSet directly
|
|||
com.google.common.collect.Sets#newLinkedHashSet() @ Create java.util.LinkedHashSet directly
|
||||
com.google.common.collect.Sets#newTreeSet() @ Create java.util.TreeSet directly
|
||||
com.google.common.collect.Sets#newTreeSet(java.util.Comparator) @ Create java.util.TreeSet directly
|
||||
com.google.common.io.Files#createTempDir() @ Use org.apache.druid.java.util.common.FileUtils.createTempDir()
|
||||
com.google.common.util.concurrent.MoreExecutors#sameThreadExecutor() @ Use org.apache.druid.java.util.common.concurrent.Execs#directExecutor()
|
||||
com.google.common.util.concurrent.MoreExecutors#newDirectExecutorService() @ Use org.apache.druid.java.util.common.concurrent.Execs#directExecutor()
|
||||
com.google.common.util.concurrent.MoreExecutors#directExecutor() @ Use org.apache.druid.java.util.common.concurrent.Execs#directExecutor()
|
||||
|
@ -29,11 +30,13 @@ java.lang.String#matches(java.lang.String) @ Use startsWith(), endsWith(), conta
|
|||
java.lang.String#replace(java.lang.CharSequence,java.lang.CharSequence) @ Use one of the appropriate methods in StringUtils instead
|
||||
java.lang.String#replaceAll(java.lang.String,java.lang.String) @ Use one of the appropriate methods in StringUtils instead, or compile and cache a Pattern explicitly
|
||||
java.lang.String#replaceFirst(java.lang.String,java.lang.String) @ Use String.indexOf() and substring methods, or compile and cache a Pattern explicitly
|
||||
java.nio.file.Files#createTempDirectory(java.lang.String prefix,java.nio.file.FileAttribute...) @ Use org.apache.druid.java.util.common.FileUtils.createTempDir()
|
||||
java.util.LinkedList @ Use ArrayList or ArrayDeque instead
|
||||
java.util.Random#<init>() @ Use ThreadLocalRandom.current() or the constructor with a seed (the latter in tests only!)
|
||||
java.lang.Math#random() @ Use ThreadLocalRandom.current()
|
||||
java.util.regex.Pattern#matches(java.lang.String,java.lang.CharSequence) @ Use String.startsWith(), endsWith(), contains(), or compile and cache a Pattern explicitly
|
||||
org.apache.commons.io.FileUtils#getTempDirectory() @ Use org.junit.rules.TemporaryFolder for tests instead
|
||||
org.apache.commons.io.FileUtils#deleteDirectory() @ Use org.apache.druid.java.util.common.FileUtils#deleteDirectory()
|
||||
java.lang.Class#getCanonicalName() @ Class.getCanonicalName can return null for anonymous types, use Class.getName instead.
|
||||
com.google.common.base.Objects#firstNonNull(java.lang.Object, java.lang.Object) @ Use org.apache.druid.common.guava.GuavaUtils#firstNonNull(java.lang.Object, java.lang.Object) instead (probably... the GuavaUtils method return object is nullable)
|
||||
|
||||
|
@ -48,4 +51,4 @@ com.google.common.io.BaseEncoding.base64
|
|||
|
||||
@defaultMessage Use com.google.errorprone.annotations.concurrent.GuardedBy
|
||||
javax.annotations.concurrent.GuardedBy
|
||||
com.amazonaws.annotation.GuardedBy
|
||||
com.amazonaws.annotation.GuardedBy
|
||||
|
|
|
@ -22,11 +22,12 @@ package org.apache.druid.java.util.common;
|
|||
import com.google.common.base.Predicate;
|
||||
import com.google.common.collect.ImmutableList;
|
||||
import com.google.common.io.ByteSource;
|
||||
import com.google.common.io.Files;
|
||||
import io.netty.util.SuppressForbidden;
|
||||
import org.apache.commons.io.IOUtils;
|
||||
import org.apache.druid.data.input.impl.prefetch.ObjectOpenFunction;
|
||||
import org.apache.druid.java.util.common.logger.Logger;
|
||||
|
||||
import javax.annotation.Nullable;
|
||||
import java.io.Closeable;
|
||||
import java.io.File;
|
||||
import java.io.FileNotFoundException;
|
||||
|
@ -38,6 +39,11 @@ import java.io.OutputStream;
|
|||
import java.nio.MappedByteBuffer;
|
||||
import java.nio.channels.Channels;
|
||||
import java.nio.channels.FileChannel;
|
||||
import java.nio.file.AccessDeniedException;
|
||||
import java.nio.file.FileSystemException;
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.NoSuchFileException;
|
||||
import java.nio.file.Path;
|
||||
import java.nio.file.StandardCopyOption;
|
||||
import java.nio.file.StandardOpenOption;
|
||||
import java.util.ArrayList;
|
||||
|
@ -81,7 +87,7 @@ public class FileUtils
|
|||
try {
|
||||
StreamUtils.retryCopy(
|
||||
byteSource,
|
||||
Files.asByteSink(outFile),
|
||||
com.google.common.io.Files.asByteSink(outFile),
|
||||
shouldRetry,
|
||||
maxAttempts
|
||||
);
|
||||
|
@ -156,8 +162,8 @@ public class FileUtils
|
|||
*
|
||||
* <p>This only works for files <= {@link Integer#MAX_VALUE} bytes.
|
||||
*
|
||||
* <p>Similar to {@link Files#map(File)}, but returns {@link MappedByteBufferHandler}, that makes it easier to unmap
|
||||
* the buffer within try-with-resources pattern:
|
||||
* <p>Similar to {@link com.google.common.io.Files#map(File)}, but returns {@link MappedByteBufferHandler}, that
|
||||
* makes it easier to unmap the buffer within try-with-resources pattern:
|
||||
* <pre>{@code
|
||||
* try (MappedByteBufferHandler fileMappingHandler = FileUtils.map(file)) {
|
||||
* ByteBuffer fileMapping = fileMappingHandler.get();
|
||||
|
@ -174,7 +180,7 @@ public class FileUtils
|
|||
*/
|
||||
public static MappedByteBufferHandler map(File file) throws IOException
|
||||
{
|
||||
MappedByteBuffer mappedByteBuffer = Files.map(file);
|
||||
MappedByteBuffer mappedByteBuffer = com.google.common.io.Files.map(file);
|
||||
return new MappedByteBufferHandler(mappedByteBuffer);
|
||||
}
|
||||
|
||||
|
@ -204,7 +210,7 @@ public class FileUtils
|
|||
final File tmpFile = new File(tmpDir, StringUtils.format(".%s.%s", file.getName(), UUID.randomUUID()));
|
||||
|
||||
//noinspection unused
|
||||
try (final Closeable deleter = () -> java.nio.file.Files.deleteIfExists(tmpFile.toPath())) {
|
||||
try (final Closeable deleter = () -> Files.deleteIfExists(tmpFile.toPath())) {
|
||||
final T retVal;
|
||||
|
||||
try (
|
||||
|
@ -226,7 +232,7 @@ public class FileUtils
|
|||
}
|
||||
|
||||
// No exception thrown; do the move.
|
||||
java.nio.file.Files.move(
|
||||
Files.move(
|
||||
tmpFile.toPath(),
|
||||
file.toPath(),
|
||||
StandardCopyOption.ATOMIC_MOVE,
|
||||
|
@ -267,13 +273,13 @@ public class FileUtils
|
|||
* This method is supposed to be used for copying large files.
|
||||
* The output file is deleted automatically if copy fails.
|
||||
*
|
||||
* @param object object to open
|
||||
* @param objectOpenFunction function to open the given object
|
||||
* @param outFile file to write data
|
||||
* @param fetchBuffer a buffer to copy data from the input stream to the file
|
||||
* @param retryCondition condition which should be satisfied for retry
|
||||
* @param numTries max number of retries
|
||||
* @param messageOnRetry log message on retry
|
||||
* @param object object to open
|
||||
* @param objectOpenFunction function to open the given object
|
||||
* @param outFile file to write data
|
||||
* @param fetchBuffer a buffer to copy data from the input stream to the file
|
||||
* @param retryCondition condition which should be satisfied for retry
|
||||
* @param numTries max number of retries
|
||||
* @param messageOnRetry log message on retry
|
||||
*
|
||||
* @return the number of bytes copied
|
||||
*/
|
||||
|
@ -333,6 +339,66 @@ public class FileUtils
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a temporary directory inside the configured temporary space (java.io.tmpdir). Similar to the method
|
||||
* {@link com.google.common.io.Files#createTempDir()} from Guava, but has nicer error messages.
|
||||
*
|
||||
* @throws IllegalStateException if the directory could not be created
|
||||
*/
|
||||
public static File createTempDir()
|
||||
{
|
||||
return createTempDir(null);
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a temporary directory inside the configured temporary space (java.io.tmpdir). Similar to the method
|
||||
* {@link com.google.common.io.Files#createTempDir()} from Guava, but has nicer error messages.
|
||||
*
|
||||
* @param prefix base directory name; if null/empty then this method will use "druid"
|
||||
*
|
||||
* @throws IllegalStateException if the directory could not be created
|
||||
*/
|
||||
@SuppressForbidden(reason = "Files#createTempDirectory")
|
||||
public static File createTempDir(@Nullable final String prefix)
|
||||
{
|
||||
final String parentDirectory = System.getProperty("java.io.tmpdir");
|
||||
|
||||
if (parentDirectory == null) {
|
||||
// Not expected.
|
||||
throw new ISE("System property java.io.tmpdir is not set, cannot create temporary directories");
|
||||
}
|
||||
|
||||
try {
|
||||
final Path tmpPath = Files.createTempDirectory(
|
||||
new File(parentDirectory).toPath(),
|
||||
prefix == null || prefix.isEmpty() ? "druid" : prefix
|
||||
);
|
||||
return tmpPath.toFile();
|
||||
}
|
||||
catch (IOException e) {
|
||||
// Some inspection to improve error messages.
|
||||
if (e instanceof NoSuchFileException && !new File(parentDirectory).exists()) {
|
||||
throw new ISE("java.io.tmpdir (%s) does not exist", parentDirectory);
|
||||
} else if ((e instanceof FileSystemException && e.getMessage().contains("Read-only file system"))
|
||||
|| (e instanceof AccessDeniedException)) {
|
||||
throw new ISE("java.io.tmpdir (%s) is not writable, check permissions", parentDirectory);
|
||||
} else {
|
||||
// Well, maybe it was something else.
|
||||
throw new ISE(e, "Failed to create temporary directory in java.io.tmpdir (%s)", parentDirectory);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Equivalent to {@link org.apache.commons.io.FileUtils#deleteDirectory(File)}. Exists here mostly so callers
|
||||
* can avoid dealing with our FileUtils and the Commons FileUtils having the same name.
|
||||
*/
|
||||
@SuppressForbidden(reason = "FilesUtils#deleteDirectory")
|
||||
public static void deleteDirectory(final File directory) throws IOException
|
||||
{
|
||||
org.apache.commons.io.FileUtils.deleteDirectory(directory);
|
||||
}
|
||||
|
||||
public interface OutputStreamConsumer<T>
|
||||
{
|
||||
T apply(OutputStream outputStream) throws IOException;
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
|
||||
package org.apache.druid.java.util.metrics;
|
||||
|
||||
import org.apache.druid.java.util.common.FileUtils;
|
||||
import org.apache.druid.java.util.common.StreamUtils;
|
||||
import org.apache.druid.java.util.common.logger.Logger;
|
||||
import org.hyperic.jni.ArchLoaderException;
|
||||
|
@ -29,7 +30,6 @@ import org.hyperic.sigar.SigarLoader;
|
|||
import java.io.File;
|
||||
import java.io.IOException;
|
||||
import java.net.URL;
|
||||
import java.nio.file.Files;
|
||||
|
||||
public class SigarUtil
|
||||
{
|
||||
|
@ -43,7 +43,7 @@ public class SigarUtil
|
|||
|
||||
final URL url = SysMonitor.class.getResource("/" + libName);
|
||||
if (url != null) {
|
||||
final File tmpDir = Files.createTempDirectory("sigar").toFile();
|
||||
final File tmpDir = FileUtils.createTempDir("sigar");
|
||||
// As per java.io.DeleteOnExitHook.runHooks() deletion order is reversed from registration order
|
||||
tmpDir.deleteOnExit();
|
||||
final File nativeLibTmpFile = new File(tmpDir, libName);
|
||||
|
|
|
@ -22,6 +22,7 @@ package org.apache.druid.java.util.common;
|
|||
import org.junit.Assert;
|
||||
import org.junit.Rule;
|
||||
import org.junit.Test;
|
||||
import org.junit.rules.ExpectedException;
|
||||
import org.junit.rules.TemporaryFolder;
|
||||
|
||||
import java.io.File;
|
||||
|
@ -34,6 +35,9 @@ public class FileUtilsTest
|
|||
@Rule
|
||||
public TemporaryFolder folder = new TemporaryFolder();
|
||||
|
||||
@Rule
|
||||
public ExpectedException expectedException = ExpectedException.none();
|
||||
|
||||
@Test
|
||||
public void testMap() throws IOException
|
||||
{
|
||||
|
@ -80,4 +84,59 @@ public class FileUtilsTest
|
|||
});
|
||||
Assert.assertEquals("baz", StringUtils.fromUtf8(Files.readAllBytes(tmpFile.toPath())));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testCreateTempDir() throws IOException
|
||||
{
|
||||
final File tempDir = FileUtils.createTempDir();
|
||||
try {
|
||||
Assert.assertEquals(
|
||||
new File(System.getProperty("java.io.tmpdir")).toPath(),
|
||||
tempDir.getParentFile().toPath()
|
||||
);
|
||||
}
|
||||
finally {
|
||||
Files.delete(tempDir.toPath());
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testCreateTempDirNonexistentBase()
|
||||
{
|
||||
expectedException.expect(IllegalStateException.class);
|
||||
expectedException.expectMessage("java.io.tmpdir (/nonexistent) does not exist");
|
||||
|
||||
final String oldJavaTmpDir = System.getProperty("java.io.tmpdir");
|
||||
try {
|
||||
System.setProperty("java.io.tmpdir", "/nonexistent");
|
||||
FileUtils.createTempDir();
|
||||
}
|
||||
finally {
|
||||
System.setProperty("java.io.tmpdir", oldJavaTmpDir);
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testCreateTempDirUnwritableBase() throws IOException
|
||||
{
|
||||
final File baseDir = FileUtils.createTempDir();
|
||||
try {
|
||||
expectedException.expect(IllegalStateException.class);
|
||||
expectedException.expectMessage("java.io.tmpdir (" + baseDir + ") is not writable");
|
||||
|
||||
final String oldJavaTmpDir = System.getProperty("java.io.tmpdir");
|
||||
try {
|
||||
System.setProperty("java.io.tmpdir", baseDir.getPath());
|
||||
baseDir.setWritable(false);
|
||||
FileUtils.createTempDir();
|
||||
}
|
||||
finally {
|
||||
System.setProperty("java.io.tmpdir", oldJavaTmpDir);
|
||||
}
|
||||
}
|
||||
finally {
|
||||
baseDir.setWritable(true);
|
||||
Files.delete(baseDir.toPath());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -21,7 +21,7 @@ package org.apache.druid.storage.azure;
|
|||
|
||||
import com.google.common.io.ByteSource;
|
||||
import com.google.inject.Inject;
|
||||
import org.apache.commons.io.FileUtils;
|
||||
import org.apache.druid.java.util.common.FileUtils;
|
||||
import org.apache.druid.java.util.common.logger.Logger;
|
||||
import org.apache.druid.segment.loading.SegmentLoadingException;
|
||||
import org.apache.druid.utils.CompressionUtils;
|
||||
|
@ -50,7 +50,7 @@ public class AzureDataSegmentPuller
|
|||
this.azureStorage = azureStorage;
|
||||
}
|
||||
|
||||
org.apache.druid.java.util.common.FileUtils.FileCopyResult getSegmentFiles(
|
||||
FileUtils.FileCopyResult getSegmentFiles(
|
||||
final String containerName,
|
||||
final String blobPath,
|
||||
final File outDir
|
||||
|
@ -58,7 +58,7 @@ public class AzureDataSegmentPuller
|
|||
throws SegmentLoadingException
|
||||
{
|
||||
try {
|
||||
FileUtils.forceMkdir(outDir);
|
||||
org.apache.commons.io.FileUtils.forceMkdir(outDir);
|
||||
|
||||
log.info(
|
||||
"Loading container: [%s], with blobPath: [%s] and outDir: [%s]", containerName, blobPath, outDir
|
||||
|
@ -75,7 +75,7 @@ public class AzureDataSegmentPuller
|
|||
}
|
||||
|
||||
final ByteSource byteSource = new AzureByteSource(azureStorage, containerName, actualBlobPath);
|
||||
final org.apache.druid.java.util.common.FileUtils.FileCopyResult result = CompressionUtils.unzip(
|
||||
final FileUtils.FileCopyResult result = CompressionUtils.unzip(
|
||||
byteSource,
|
||||
outDir,
|
||||
AzureUtils.AZURE_RETRY,
|
||||
|
|
|
@ -33,7 +33,6 @@ import java.io.FileInputStream;
|
|||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.net.URISyntaxException;
|
||||
import java.nio.file.Files;
|
||||
|
||||
public class AzureDataSegmentPullerTest extends EasyMockSupport
|
||||
{
|
||||
|
@ -54,7 +53,7 @@ public class AzureDataSegmentPullerTest extends EasyMockSupport
|
|||
{
|
||||
final String value = "bucket";
|
||||
final File pulledFile = AzureTestUtils.createZipTempFile(SEGMENT_FILE_NAME, value);
|
||||
final File toDir = Files.createTempDirectory("druid").toFile();
|
||||
final File toDir = FileUtils.createTempDir();
|
||||
try {
|
||||
final InputStream zipStream = new FileInputStream(pulledFile);
|
||||
|
||||
|
@ -75,7 +74,7 @@ public class AzureDataSegmentPullerTest extends EasyMockSupport
|
|||
}
|
||||
finally {
|
||||
pulledFile.delete();
|
||||
org.apache.commons.io.FileUtils.deleteDirectory(toDir);
|
||||
FileUtils.deleteDirectory(toDir);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -84,7 +83,7 @@ public class AzureDataSegmentPullerTest extends EasyMockSupport
|
|||
throws IOException, URISyntaxException, StorageException, SegmentLoadingException
|
||||
{
|
||||
|
||||
final File outDir = Files.createTempDirectory("druid").toFile();
|
||||
final File outDir = FileUtils.createTempDir();
|
||||
try {
|
||||
EasyMock.expect(azureStorage.getBlobInputStream(CONTAINER_NAME, BLOB_PATH)).andThrow(
|
||||
new URISyntaxException(
|
||||
|
@ -105,7 +104,7 @@ public class AzureDataSegmentPullerTest extends EasyMockSupport
|
|||
verifyAll();
|
||||
}
|
||||
finally {
|
||||
org.apache.commons.io.FileUtils.deleteDirectory(outDir);
|
||||
FileUtils.deleteDirectory(outDir);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -21,9 +21,8 @@ package org.apache.druid.storage.azure;
|
|||
|
||||
import com.google.common.base.Optional;
|
||||
import com.google.common.io.ByteSource;
|
||||
import com.google.common.io.Files;
|
||||
import org.apache.commons.io.FileUtils;
|
||||
import org.apache.commons.io.IOUtils;
|
||||
import org.apache.druid.java.util.common.FileUtils;
|
||||
import org.apache.druid.java.util.common.StringUtils;
|
||||
import org.easymock.EasyMock;
|
||||
import org.easymock.EasyMockSupport;
|
||||
|
@ -58,7 +57,7 @@ public class AzureTaskLogsTest extends EasyMockSupport
|
|||
@Test
|
||||
public void testPushTaskLog() throws Exception
|
||||
{
|
||||
final File tmpDir = Files.createTempDir();
|
||||
final File tmpDir = FileUtils.createTempDir();
|
||||
|
||||
try {
|
||||
final File logFile = new File(tmpDir, "log");
|
||||
|
|
|
@ -91,7 +91,7 @@ public class CassandraDataSegmentPuller extends CassandraStorage
|
|||
}
|
||||
catch (Exception e) {
|
||||
try {
|
||||
org.apache.commons.io.FileUtils.deleteDirectory(outDir);
|
||||
FileUtils.deleteDirectory(outDir);
|
||||
}
|
||||
catch (IOException e1) {
|
||||
log.error(e1, "Error clearing segment directory [%s]", outDir.getAbsolutePath());
|
||||
|
|
|
@ -60,7 +60,7 @@ public class CloudFilesDataSegmentPuller
|
|||
}
|
||||
catch (Exception e) {
|
||||
try {
|
||||
org.apache.commons.io.FileUtils.deleteDirectory(outDir);
|
||||
FileUtils.deleteDirectory(outDir);
|
||||
}
|
||||
catch (IOException ioe) {
|
||||
log.warn(
|
||||
|
|
|
@ -21,7 +21,6 @@ package org.apache.druid.data.input;
|
|||
|
||||
import com.fasterxml.jackson.databind.Module;
|
||||
import com.fasterxml.jackson.databind.ObjectMapper;
|
||||
import com.google.common.io.Files;
|
||||
import org.apache.avro.file.DataFileReader;
|
||||
import org.apache.avro.file.DataFileWriter;
|
||||
import org.apache.avro.file.FileReader;
|
||||
|
@ -29,6 +28,7 @@ import org.apache.avro.generic.GenericDatumReader;
|
|||
import org.apache.avro.generic.GenericRecord;
|
||||
import org.apache.avro.specific.SpecificDatumWriter;
|
||||
import org.apache.druid.data.input.avro.AvroExtensionsModule;
|
||||
import org.apache.druid.java.util.common.FileUtils;
|
||||
import org.junit.Before;
|
||||
import org.junit.Test;
|
||||
|
||||
|
@ -81,7 +81,7 @@ public class AvroHadoopInputRowParserTest
|
|||
private static GenericRecord buildAvroFromFile(GenericRecord datum)
|
||||
throws IOException
|
||||
{
|
||||
final File tmpDir = Files.createTempDir();
|
||||
final File tmpDir = FileUtils.createTempDir();
|
||||
|
||||
// 0. write avro object into temp file.
|
||||
File someAvroDatumFile = new File(tmpDir, "someAvroDatum.avro");
|
||||
|
|
|
@ -64,7 +64,7 @@ public class GoogleDataSegmentPuller implements URIDataPuller
|
|||
}
|
||||
catch (Exception e) {
|
||||
try {
|
||||
org.apache.commons.io.FileUtils.deleteDirectory(outDir);
|
||||
FileUtils.deleteDirectory(outDir);
|
||||
}
|
||||
catch (IOException ioe) {
|
||||
LOG.warn(
|
||||
|
|
|
@ -22,7 +22,7 @@ package org.apache.druid.storage.google;
|
|||
import com.google.api.client.googleapis.json.GoogleJsonResponseException;
|
||||
import com.google.api.client.googleapis.testing.json.GoogleJsonResponseExceptionFactoryTesting;
|
||||
import com.google.api.client.json.jackson2.JacksonFactory;
|
||||
import org.apache.commons.io.FileUtils;
|
||||
import org.apache.druid.java.util.common.FileUtils;
|
||||
import org.apache.druid.segment.loading.SegmentLoadingException;
|
||||
import org.easymock.EasyMock;
|
||||
import org.easymock.EasyMockSupport;
|
||||
|
@ -31,7 +31,6 @@ import org.junit.Test;
|
|||
|
||||
import java.io.File;
|
||||
import java.io.IOException;
|
||||
import java.nio.file.Files;
|
||||
|
||||
public class GoogleDataSegmentPullerTest extends EasyMockSupport
|
||||
{
|
||||
|
@ -42,7 +41,7 @@ public class GoogleDataSegmentPullerTest extends EasyMockSupport
|
|||
public void testDeleteOutputDirectoryWhenErrorIsRaisedPullingSegmentFiles()
|
||||
throws IOException, SegmentLoadingException
|
||||
{
|
||||
final File outDir = Files.createTempDirectory("druid").toFile();
|
||||
final File outDir = FileUtils.createTempDir();
|
||||
try {
|
||||
GoogleStorage storage = createMock(GoogleStorage.class);
|
||||
final GoogleJsonResponseException exception = GoogleJsonResponseExceptionFactoryTesting.newMock(
|
||||
|
|
|
@ -22,9 +22,8 @@ package org.apache.druid.storage.google;
|
|||
import com.google.api.client.http.InputStreamContent;
|
||||
import com.google.common.base.Optional;
|
||||
import com.google.common.io.ByteSource;
|
||||
import com.google.common.io.Files;
|
||||
import org.apache.commons.io.FileUtils;
|
||||
import org.apache.commons.io.IOUtils;
|
||||
import org.apache.druid.java.util.common.FileUtils;
|
||||
import org.apache.druid.java.util.common.StringUtils;
|
||||
import org.easymock.EasyMock;
|
||||
import org.easymock.EasyMockSupport;
|
||||
|
@ -37,6 +36,7 @@ import java.io.ByteArrayInputStream;
|
|||
import java.io.File;
|
||||
import java.io.StringWriter;
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import java.nio.file.Files;
|
||||
|
||||
public class GoogleTaskLogsTest extends EasyMockSupport
|
||||
{
|
||||
|
@ -58,11 +58,11 @@ public class GoogleTaskLogsTest extends EasyMockSupport
|
|||
@Test
|
||||
public void testPushTaskLog() throws Exception
|
||||
{
|
||||
final File tmpDir = Files.createTempDir();
|
||||
final File tmpDir = FileUtils.createTempDir();
|
||||
|
||||
try {
|
||||
final File logFile = new File(tmpDir, "log");
|
||||
BufferedWriter output = java.nio.file.Files.newBufferedWriter(logFile.toPath(), StandardCharsets.UTF_8);
|
||||
BufferedWriter output = Files.newBufferedWriter(logFile.toPath(), StandardCharsets.UTF_8);
|
||||
output.write("test");
|
||||
output.close();
|
||||
|
||||
|
|
|
@ -20,7 +20,7 @@
|
|||
package org.apache.druid.segment.loading;
|
||||
|
||||
import com.google.common.io.ByteStreams;
|
||||
import org.apache.commons.io.FileUtils;
|
||||
import org.apache.druid.java.util.common.FileUtils;
|
||||
import org.apache.druid.java.util.common.IOE;
|
||||
import org.apache.druid.java.util.common.StringUtils;
|
||||
import org.apache.druid.storage.hdfs.HdfsFileTimestampVersionFinder;
|
||||
|
|
|
@ -20,7 +20,7 @@
|
|||
package org.apache.druid.storage.hdfs;
|
||||
|
||||
import com.google.common.io.ByteStreams;
|
||||
import org.apache.commons.io.FileUtils;
|
||||
import org.apache.druid.java.util.common.FileUtils;
|
||||
import org.apache.druid.java.util.common.IOE;
|
||||
import org.apache.druid.java.util.common.StringUtils;
|
||||
import org.apache.druid.segment.loading.SegmentLoadingException;
|
||||
|
@ -111,12 +111,12 @@ public class HdfsDataSegmentPullerTest
|
|||
@Test
|
||||
public void testZip() throws IOException, SegmentLoadingException
|
||||
{
|
||||
final File tmpDir = com.google.common.io.Files.createTempDir();
|
||||
final File tmpDir = FileUtils.createTempDir();
|
||||
final File tmpFile = File.createTempFile("zipContents", ".txt", tmpDir);
|
||||
|
||||
final Path zipPath = new Path("/tmp/testZip.zip");
|
||||
|
||||
final File outTmpDir = com.google.common.io.Files.createTempDir();
|
||||
final File outTmpDir = FileUtils.createTempDir();
|
||||
|
||||
final URI uri = URI.create(uriBase.toString() + zipPath);
|
||||
|
||||
|
@ -159,7 +159,7 @@ public class HdfsDataSegmentPullerTest
|
|||
{
|
||||
final Path zipPath = new Path("/tmp/testZip.gz");
|
||||
|
||||
final File outTmpDir = com.google.common.io.Files.createTempDir();
|
||||
final File outTmpDir = FileUtils.createTempDir();
|
||||
final File outFile = new File(outTmpDir, "testZip");
|
||||
outFile.delete();
|
||||
|
||||
|
@ -193,7 +193,7 @@ public class HdfsDataSegmentPullerTest
|
|||
|
||||
final Path zipPath = new Path(perTestPath, "test.txt");
|
||||
|
||||
final File outTmpDir = com.google.common.io.Files.createTempDir();
|
||||
final File outTmpDir = FileUtils.createTempDir();
|
||||
final File outFile = new File(outTmpDir, "test.txt");
|
||||
outFile.delete();
|
||||
|
||||
|
|
|
@ -20,11 +20,10 @@
|
|||
package org.apache.druid.indexing.kafka.test;
|
||||
|
||||
import com.google.common.collect.ImmutableMap;
|
||||
import com.google.common.io.Files;
|
||||
import kafka.server.KafkaConfig;
|
||||
import kafka.server.KafkaServer;
|
||||
import org.apache.commons.io.FileUtils;
|
||||
import org.apache.druid.indexing.kafka.KafkaConsumerConfigs;
|
||||
import org.apache.druid.java.util.common.FileUtils;
|
||||
import org.apache.druid.java.util.common.StringUtils;
|
||||
import org.apache.kafka.clients.consumer.KafkaConsumer;
|
||||
import org.apache.kafka.clients.producer.KafkaProducer;
|
||||
|
@ -62,7 +61,7 @@ public class TestBroker implements Closeable
|
|||
)
|
||||
{
|
||||
this.zookeeperConnect = zookeeperConnect;
|
||||
this.directory = directory == null ? Files.createTempDir() : directory;
|
||||
this.directory = directory == null ? FileUtils.createTempDir() : directory;
|
||||
this.directoryCleanup = directory == null;
|
||||
this.id = id;
|
||||
this.brokerProps = brokerProps == null ? ImmutableMap.of() : brokerProps;
|
||||
|
@ -136,7 +135,7 @@ public class TestBroker implements Closeable
|
|||
server.awaitShutdown();
|
||||
}
|
||||
if (directoryCleanup) {
|
||||
FileUtils.forceDelete(directory);
|
||||
FileUtils.deleteDirectory(directory);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -125,7 +125,7 @@ public class S3DataSegmentPuller implements URIDataPuller
|
|||
}
|
||||
catch (Exception e) {
|
||||
try {
|
||||
org.apache.commons.io.FileUtils.deleteDirectory(outDir);
|
||||
FileUtils.deleteDirectory(outDir);
|
||||
}
|
||||
catch (IOException ioe) {
|
||||
log.warn(
|
||||
|
|
|
@ -30,13 +30,13 @@ import com.google.common.util.concurrent.Futures;
|
|||
import com.google.common.util.concurrent.ListenableFuture;
|
||||
import com.google.common.util.concurrent.ListeningExecutorService;
|
||||
import com.google.common.util.concurrent.MoreExecutors;
|
||||
import org.apache.commons.io.FileUtils;
|
||||
import org.apache.druid.common.guava.ThreadRenamingRunnable;
|
||||
import org.apache.druid.data.input.InputRow;
|
||||
import org.apache.druid.data.input.Row;
|
||||
import org.apache.druid.data.input.Rows;
|
||||
import org.apache.druid.indexer.hadoop.SegmentInputRow;
|
||||
import org.apache.druid.indexer.path.DatasourcePathSpec;
|
||||
import org.apache.druid.java.util.common.FileUtils;
|
||||
import org.apache.druid.java.util.common.IAE;
|
||||
import org.apache.druid.java.util.common.ISE;
|
||||
import org.apache.druid.java.util.common.StringUtils;
|
||||
|
|
|
@ -23,12 +23,11 @@ import com.google.common.base.Function;
|
|||
import com.google.common.collect.Iterators;
|
||||
import com.google.common.collect.Lists;
|
||||
import com.google.common.io.Closeables;
|
||||
import com.google.common.io.Files;
|
||||
import org.apache.commons.io.FileUtils;
|
||||
import org.apache.druid.data.input.InputRow;
|
||||
import org.apache.druid.data.input.Row;
|
||||
import org.apache.druid.indexer.HadoopDruidIndexerConfig;
|
||||
import org.apache.druid.indexer.JobHelper;
|
||||
import org.apache.druid.java.util.common.FileUtils;
|
||||
import org.apache.druid.java.util.common.logger.Logger;
|
||||
import org.apache.druid.segment.QueryableIndex;
|
||||
import org.apache.druid.segment.QueryableIndexStorageAdapter;
|
||||
|
@ -86,7 +85,7 @@ public class DatasourceRecordReader extends RecordReader<NullWritable, InputRow>
|
|||
|
||||
logger.info("Fetch segment files from [%s]", path);
|
||||
|
||||
File dir = Files.createTempDir();
|
||||
File dir = FileUtils.createTempDir();
|
||||
tmpSegmentDirs.add(dir);
|
||||
logger.info("Locally storing fetched segment at [%s]", dir);
|
||||
|
||||
|
|
|
@ -21,13 +21,13 @@ package org.apache.druid.indexer;
|
|||
|
||||
import com.google.common.collect.ImmutableList;
|
||||
import com.google.common.collect.ImmutableMap;
|
||||
import com.google.common.io.Files;
|
||||
import org.apache.druid.data.input.impl.DelimitedParseSpec;
|
||||
import org.apache.druid.data.input.impl.DimensionsSpec;
|
||||
import org.apache.druid.data.input.impl.StringInputRowParser;
|
||||
import org.apache.druid.data.input.impl.TimestampSpec;
|
||||
import org.apache.druid.indexer.partitions.HashedPartitionsSpec;
|
||||
import org.apache.druid.java.util.common.DateTimes;
|
||||
import org.apache.druid.java.util.common.FileUtils;
|
||||
import org.apache.druid.java.util.common.Intervals;
|
||||
import org.apache.druid.java.util.common.granularity.Granularities;
|
||||
import org.apache.druid.java.util.common.granularity.Granularity;
|
||||
|
@ -133,7 +133,7 @@ public class DetermineHashedPartitionsJobTest
|
|||
this.expectedNumOfShards = expectedNumOfShards;
|
||||
this.expectedNumTimeBuckets = expectedNumTimeBuckets;
|
||||
this.errorMargin = errorMargin;
|
||||
File tmpDir = Files.createTempDir();
|
||||
File tmpDir = FileUtils.createTempDir();
|
||||
|
||||
ImmutableList<Interval> intervals = null;
|
||||
if (interval != null) {
|
||||
|
|
|
@ -21,13 +21,12 @@ package org.apache.druid.indexer;
|
|||
|
||||
import com.google.common.collect.ImmutableList;
|
||||
import com.google.common.collect.ImmutableMap;
|
||||
import com.google.common.io.Files;
|
||||
import org.apache.commons.io.FileUtils;
|
||||
import org.apache.druid.data.input.impl.CSVParseSpec;
|
||||
import org.apache.druid.data.input.impl.DimensionsSpec;
|
||||
import org.apache.druid.data.input.impl.StringInputRowParser;
|
||||
import org.apache.druid.data.input.impl.TimestampSpec;
|
||||
import org.apache.druid.indexer.partitions.SingleDimensionPartitionsSpec;
|
||||
import org.apache.druid.java.util.common.FileUtils;
|
||||
import org.apache.druid.java.util.common.Intervals;
|
||||
import org.apache.druid.java.util.common.granularity.Granularities;
|
||||
import org.apache.druid.query.aggregation.AggregatorFactory;
|
||||
|
@ -274,10 +273,10 @@ public class DeterminePartitionsJobTest
|
|||
|
||||
dataFile = File.createTempFile("test_website_data", "tmp");
|
||||
dataFile.deleteOnExit();
|
||||
tmpDir = Files.createTempDir();
|
||||
tmpDir = FileUtils.createTempDir();
|
||||
tmpDir.deleteOnExit();
|
||||
|
||||
FileUtils.writeLines(dataFile, data);
|
||||
org.apache.commons.io.FileUtils.writeLines(dataFile, data);
|
||||
|
||||
config = new HadoopDruidIndexerConfig(
|
||||
new HadoopIngestionSpec(
|
||||
|
@ -385,7 +384,7 @@ public class DeterminePartitionsJobTest
|
|||
@After
|
||||
public void tearDown() throws Exception
|
||||
{
|
||||
FileUtils.forceDelete(dataFile);
|
||||
org.apache.commons.io.FileUtils.forceDelete(dataFile);
|
||||
FileUtils.deleteDirectory(tmpDir);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -23,10 +23,9 @@ import com.google.common.util.concurrent.Futures;
|
|||
import com.google.common.util.concurrent.ListenableFuture;
|
||||
import com.google.common.util.concurrent.ListeningExecutorService;
|
||||
import com.google.common.util.concurrent.MoreExecutors;
|
||||
import junit.framework.Assert;
|
||||
import org.apache.commons.io.FileUtils;
|
||||
import org.apache.commons.io.IOUtils;
|
||||
import org.apache.druid.common.utils.UUIDUtils;
|
||||
import org.apache.druid.java.util.common.FileUtils;
|
||||
import org.apache.druid.java.util.common.IOE;
|
||||
import org.apache.druid.java.util.common.StringUtils;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
|
@ -37,6 +36,7 @@ import org.apache.hadoop.mapreduce.Job;
|
|||
import org.apache.hadoop.mapreduce.MRJobConfig;
|
||||
import org.junit.After;
|
||||
import org.junit.AfterClass;
|
||||
import org.junit.Assert;
|
||||
import org.junit.Before;
|
||||
import org.junit.BeforeClass;
|
||||
import org.junit.Rule;
|
||||
|
|
|
@ -22,8 +22,7 @@ package org.apache.druid.indexer.hadoop;
|
|||
import com.google.common.base.Function;
|
||||
import com.google.common.collect.Iterables;
|
||||
import com.google.common.collect.Lists;
|
||||
import com.google.common.io.Files;
|
||||
import org.apache.commons.io.FileUtils;
|
||||
import org.apache.druid.java.util.common.FileUtils;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.FileStatus;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
|
@ -45,7 +44,7 @@ public class FSSpideringIteratorTest
|
|||
{
|
||||
String[] testFiles = {"file1", "file2", "file3", "file4", "file5"};
|
||||
|
||||
File baseDir = Files.createTempDir();
|
||||
File baseDir = FileUtils.createTempDir();
|
||||
|
||||
try {
|
||||
new File(baseDir, "dir1").mkdir();
|
||||
|
|
|
@ -23,8 +23,8 @@ import com.fasterxml.jackson.annotation.JsonCreator;
|
|||
import com.fasterxml.jackson.annotation.JsonProperty;
|
||||
import com.fasterxml.jackson.annotation.JsonTypeName;
|
||||
import com.google.common.base.Preconditions;
|
||||
import com.google.common.io.Files;
|
||||
import org.apache.druid.indexer.partitions.DynamicPartitionsSpec;
|
||||
import org.apache.druid.java.util.common.FileUtils;
|
||||
import org.apache.druid.segment.IndexSpec;
|
||||
import org.apache.druid.segment.indexing.TuningConfig;
|
||||
import org.apache.druid.segment.realtime.appenderator.AppenderatorConfig;
|
||||
|
@ -50,7 +50,7 @@ public class RealtimeAppenderatorTuningConfig implements TuningConfig, Appendera
|
|||
|
||||
private static File createNewBasePersistDirectory()
|
||||
{
|
||||
return Files.createTempDir();
|
||||
return FileUtils.createTempDir("druid-realtime-persist");
|
||||
}
|
||||
|
||||
private final int maxRowsInMemory;
|
||||
|
|
|
@ -23,15 +23,14 @@ import com.google.common.base.Optional;
|
|||
import com.google.common.io.ByteSource;
|
||||
import com.google.common.io.Files;
|
||||
import com.google.inject.Inject;
|
||||
import org.apache.commons.io.FileUtils;
|
||||
import org.apache.druid.indexing.common.config.FileTaskLogsConfig;
|
||||
import org.apache.druid.java.util.common.FileUtils;
|
||||
import org.apache.druid.java.util.common.IOE;
|
||||
import org.apache.druid.java.util.common.StringUtils;
|
||||
import org.apache.druid.java.util.common.logger.Logger;
|
||||
import org.apache.druid.tasklogs.TaskLogs;
|
||||
|
||||
import java.io.File;
|
||||
import java.io.FileFilter;
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
|
||||
|
@ -135,20 +134,11 @@ public class FileTaskLogs implements TaskLogs
|
|||
throw new IOE("taskLogDir [%s] must be a directory.", taskLogDir);
|
||||
}
|
||||
|
||||
File[] files = taskLogDir.listFiles(
|
||||
new FileFilter()
|
||||
{
|
||||
@Override
|
||||
public boolean accept(File f)
|
||||
{
|
||||
return f.lastModified() < timestamp;
|
||||
}
|
||||
}
|
||||
);
|
||||
File[] files = taskLogDir.listFiles(f -> f.lastModified() < timestamp);
|
||||
|
||||
for (File file : files) {
|
||||
log.info("Deleting local task log [%s].", file.getAbsolutePath());
|
||||
FileUtils.forceDelete(file);
|
||||
org.apache.commons.io.FileUtils.forceDelete(file);
|
||||
|
||||
if (Thread.currentThread().isInterrupted()) {
|
||||
throw new IOException(
|
||||
|
|
|
@ -37,7 +37,6 @@ import com.google.common.util.concurrent.ListenableFuture;
|
|||
import com.google.common.util.concurrent.ListeningExecutorService;
|
||||
import com.google.common.util.concurrent.MoreExecutors;
|
||||
import com.google.inject.Inject;
|
||||
import org.apache.commons.io.FileUtils;
|
||||
import org.apache.druid.guice.annotations.Self;
|
||||
import org.apache.druid.indexer.RunnerTaskState;
|
||||
import org.apache.druid.indexer.TaskLocation;
|
||||
|
@ -49,6 +48,7 @@ import org.apache.druid.indexing.overlord.autoscaling.ScalingStats;
|
|||
import org.apache.druid.indexing.overlord.config.ForkingTaskRunnerConfig;
|
||||
import org.apache.druid.indexing.worker.config.WorkerConfig;
|
||||
import org.apache.druid.java.util.common.DateTimes;
|
||||
import org.apache.druid.java.util.common.FileUtils;
|
||||
import org.apache.druid.java.util.common.IOE;
|
||||
import org.apache.druid.java.util.common.ISE;
|
||||
import org.apache.druid.java.util.common.StringUtils;
|
||||
|
|
|
@ -29,7 +29,6 @@ import com.google.common.util.concurrent.ListenableFuture;
|
|||
import com.google.common.util.concurrent.ListeningExecutorService;
|
||||
import com.google.common.util.concurrent.MoreExecutors;
|
||||
import com.google.inject.Inject;
|
||||
import org.apache.commons.io.FileUtils;
|
||||
import org.apache.druid.guice.annotations.Self;
|
||||
import org.apache.druid.indexer.RunnerTaskState;
|
||||
import org.apache.druid.indexer.TaskLocation;
|
||||
|
@ -43,6 +42,7 @@ import org.apache.druid.indexing.common.task.Task;
|
|||
import org.apache.druid.indexing.overlord.autoscaling.ScalingStats;
|
||||
import org.apache.druid.indexing.worker.config.WorkerConfig;
|
||||
import org.apache.druid.java.util.common.DateTimes;
|
||||
import org.apache.druid.java.util.common.FileUtils;
|
||||
import org.apache.druid.java.util.common.IOE;
|
||||
import org.apache.druid.java.util.common.ISE;
|
||||
import org.apache.druid.java.util.common.StringUtils;
|
||||
|
|
|
@ -20,8 +20,6 @@
|
|||
package org.apache.druid.indexing.overlord.sampler;
|
||||
|
||||
import com.google.common.base.Preconditions;
|
||||
import com.google.common.io.Files;
|
||||
import org.apache.commons.io.FileUtils;
|
||||
import org.apache.commons.lang3.ArrayUtils;
|
||||
import org.apache.druid.data.input.InputFormat;
|
||||
import org.apache.druid.data.input.InputRow;
|
||||
|
@ -35,6 +33,7 @@ import org.apache.druid.data.input.impl.TimedShutoffInputSourceReader;
|
|||
import org.apache.druid.data.input.impl.TimestampSpec;
|
||||
import org.apache.druid.indexing.overlord.sampler.SamplerResponse.SamplerResponseRow;
|
||||
import org.apache.druid.java.util.common.DateTimes;
|
||||
import org.apache.druid.java.util.common.FileUtils;
|
||||
import org.apache.druid.java.util.common.Intervals;
|
||||
import org.apache.druid.java.util.common.io.Closer;
|
||||
import org.apache.druid.java.util.common.parsers.CloseableIterator;
|
||||
|
@ -101,7 +100,7 @@ public class InputSourceSampler
|
|||
: samplerConfig;
|
||||
|
||||
final Closer closer = Closer.create();
|
||||
final File tempDir = Files.createTempDir();
|
||||
final File tempDir = FileUtils.createTempDir();
|
||||
closer.register(() -> FileUtils.deleteDirectory(tempDir));
|
||||
|
||||
final InputSourceReader reader = buildReader(
|
||||
|
|
|
@ -22,8 +22,8 @@ package org.apache.druid.indexing.common.tasklogs;
|
|||
import com.google.common.collect.ImmutableMap;
|
||||
import com.google.common.io.ByteStreams;
|
||||
import com.google.common.io.Files;
|
||||
import org.apache.commons.io.FileUtils;
|
||||
import org.apache.druid.indexing.common.config.FileTaskLogsConfig;
|
||||
import org.apache.druid.java.util.common.FileUtils;
|
||||
import org.apache.druid.java.util.common.StringUtils;
|
||||
import org.apache.druid.tasklogs.TaskLogs;
|
||||
import org.junit.Assert;
|
||||
|
|
|
@ -26,7 +26,6 @@ import com.google.common.base.Preconditions;
|
|||
import com.google.common.collect.ImmutableList;
|
||||
import com.google.common.collect.ImmutableMap;
|
||||
import com.google.common.collect.ImmutableSet;
|
||||
import com.google.common.io.Files;
|
||||
import com.google.inject.Binder;
|
||||
import com.google.inject.Module;
|
||||
import org.apache.druid.client.coordinator.CoordinatorClient;
|
||||
|
@ -53,6 +52,7 @@ import org.apache.druid.indexing.overlord.HeapMemoryTaskStorage;
|
|||
import org.apache.druid.indexing.overlord.Segments;
|
||||
import org.apache.druid.indexing.overlord.TaskLockbox;
|
||||
import org.apache.druid.indexing.overlord.TaskStorage;
|
||||
import org.apache.druid.java.util.common.FileUtils;
|
||||
import org.apache.druid.java.util.common.IOE;
|
||||
import org.apache.druid.java.util.common.Intervals;
|
||||
import org.apache.druid.java.util.common.JodaUtils;
|
||||
|
@ -344,7 +344,7 @@ public class IngestSegmentFirehoseFactoryTest
|
|||
private static final String TIME_COLUMN = "ts";
|
||||
private static final Integer MAX_SHARD_NUMBER = 10;
|
||||
private static final Integer MAX_ROWS = 10;
|
||||
private static final File TMP_DIR = Files.createTempDir();
|
||||
private static final File TMP_DIR = FileUtils.createTempDir();
|
||||
private static final File PERSIST_DIR = Paths.get(TMP_DIR.getAbsolutePath(), "indexTestMerger").toFile();
|
||||
private static final List<DataSegment> SEGMENT_SET = new ArrayList<>(MAX_SHARD_NUMBER);
|
||||
|
||||
|
|
|
@ -24,8 +24,6 @@ import com.google.common.collect.ImmutableList;
|
|||
import com.google.common.collect.ImmutableMap;
|
||||
import com.google.common.collect.ImmutableSet;
|
||||
import com.google.common.collect.Iterables;
|
||||
import com.google.common.io.Files;
|
||||
import org.apache.commons.io.FileUtils;
|
||||
import org.apache.druid.client.coordinator.CoordinatorClient;
|
||||
import org.apache.druid.data.input.FiniteFirehoseFactory;
|
||||
import org.apache.druid.data.input.Firehose;
|
||||
|
@ -42,6 +40,7 @@ import org.apache.druid.indexing.common.RetryPolicyFactory;
|
|||
import org.apache.druid.indexing.common.SegmentLoaderFactory;
|
||||
import org.apache.druid.indexing.common.TestUtils;
|
||||
import org.apache.druid.java.util.common.DateTimes;
|
||||
import org.apache.druid.java.util.common.FileUtils;
|
||||
import org.apache.druid.java.util.common.Intervals;
|
||||
import org.apache.druid.java.util.common.JodaUtils;
|
||||
import org.apache.druid.query.aggregation.LongSumAggregatorFactory;
|
||||
|
@ -204,7 +203,7 @@ public class IngestSegmentFirehoseFactoryTimelineTest
|
|||
DataSegmentMaker... segmentMakers
|
||||
)
|
||||
{
|
||||
final File tmpDir = Files.createTempDir();
|
||||
final File tmpDir = FileUtils.createTempDir();
|
||||
final Set<DataSegment> segments = new HashSet<>();
|
||||
for (DataSegmentMaker segmentMaker : segmentMakers) {
|
||||
segments.add(segmentMaker.make(tmpDir));
|
||||
|
|
|
@ -22,7 +22,6 @@ package org.apache.druid.indexing.worker;
|
|||
import com.fasterxml.jackson.databind.ObjectMapper;
|
||||
import com.google.common.collect.ImmutableMap;
|
||||
import com.google.common.collect.ImmutableSet;
|
||||
import com.google.common.io.Files;
|
||||
import org.apache.druid.discovery.DruidLeaderClient;
|
||||
import org.apache.druid.indexer.TaskLocation;
|
||||
import org.apache.druid.indexer.TaskStatus;
|
||||
|
@ -38,6 +37,7 @@ import org.apache.druid.indexing.common.task.NoopTestTaskReportFileWriter;
|
|||
import org.apache.druid.indexing.common.task.Task;
|
||||
import org.apache.druid.indexing.common.task.Tasks;
|
||||
import org.apache.druid.indexing.overlord.TestTaskRunner;
|
||||
import org.apache.druid.java.util.common.FileUtils;
|
||||
import org.apache.druid.segment.IndexIO;
|
||||
import org.apache.druid.segment.IndexMergerV9;
|
||||
import org.apache.druid.segment.loading.SegmentLoaderConfig;
|
||||
|
@ -78,7 +78,7 @@ public class WorkerTaskManagerTest
|
|||
private WorkerTaskManager createWorkerTaskManager()
|
||||
{
|
||||
TaskConfig taskConfig = new TaskConfig(
|
||||
Files.createTempDir().toString(),
|
||||
FileUtils.createTempDir().toString(),
|
||||
null,
|
||||
null,
|
||||
0,
|
||||
|
|
|
@ -22,7 +22,6 @@ package org.apache.druid.indexing.worker;
|
|||
import com.fasterxml.jackson.databind.ObjectMapper;
|
||||
import com.fasterxml.jackson.databind.jsontype.NamedType;
|
||||
import com.google.common.base.Joiner;
|
||||
import com.google.common.io.Files;
|
||||
import org.apache.curator.framework.CuratorFramework;
|
||||
import org.apache.curator.framework.CuratorFrameworkFactory;
|
||||
import org.apache.curator.retry.ExponentialBackoffRetry;
|
||||
|
@ -44,6 +43,7 @@ import org.apache.druid.indexing.common.task.Task;
|
|||
import org.apache.druid.indexing.overlord.SingleTaskBackgroundRunner;
|
||||
import org.apache.druid.indexing.overlord.TestRemoteTaskRunnerConfig;
|
||||
import org.apache.druid.indexing.worker.config.WorkerConfig;
|
||||
import org.apache.druid.java.util.common.FileUtils;
|
||||
import org.apache.druid.java.util.common.StringUtils;
|
||||
import org.apache.druid.segment.IndexIO;
|
||||
import org.apache.druid.segment.IndexMergerV9;
|
||||
|
@ -148,7 +148,7 @@ public class WorkerTaskMonitorTest
|
|||
private WorkerTaskMonitor createTaskMonitor()
|
||||
{
|
||||
final TaskConfig taskConfig = new TaskConfig(
|
||||
Files.createTempDir().toString(),
|
||||
FileUtils.createTempDir().toString(),
|
||||
null,
|
||||
null,
|
||||
0,
|
||||
|
|
|
@ -27,10 +27,10 @@ import com.google.common.collect.Lists;
|
|||
import com.google.common.io.Files;
|
||||
import com.google.common.primitives.Ints;
|
||||
import com.google.inject.Inject;
|
||||
import org.apache.commons.io.FileUtils;
|
||||
import org.apache.druid.common.config.NullHandling;
|
||||
import org.apache.druid.io.ZeroCopyByteArrayOutputStream;
|
||||
import org.apache.druid.java.util.common.DateTimes;
|
||||
import org.apache.druid.java.util.common.FileUtils;
|
||||
import org.apache.druid.java.util.common.IAE;
|
||||
import org.apache.druid.java.util.common.ISE;
|
||||
import org.apache.druid.java.util.common.JodaUtils;
|
||||
|
@ -138,7 +138,7 @@ public class IndexMergerV9 implements IndexMerger
|
|||
Closer closer = Closer.create();
|
||||
try {
|
||||
final FileSmoosher v9Smoosher = new FileSmoosher(outDir);
|
||||
FileUtils.forceMkdir(outDir);
|
||||
org.apache.commons.io.FileUtils.forceMkdir(outDir);
|
||||
|
||||
SegmentWriteOutMediumFactory omf = segmentWriteOutMediumFactory != null ? segmentWriteOutMediumFactory
|
||||
: defaultSegmentWriteOutMediumFactory;
|
||||
|
@ -774,7 +774,7 @@ public class IndexMergerV9 implements IndexMerger
|
|||
);
|
||||
}
|
||||
|
||||
FileUtils.forceMkdir(outDir);
|
||||
org.apache.commons.io.FileUtils.forceMkdir(outDir);
|
||||
|
||||
log.debug("Starting persist for interval[%s], rows[%,d]", dataInterval, index.size());
|
||||
return merge(
|
||||
|
@ -864,7 +864,7 @@ public class IndexMergerV9 implements IndexMerger
|
|||
) throws IOException
|
||||
{
|
||||
FileUtils.deleteDirectory(outDir);
|
||||
FileUtils.forceMkdir(outDir);
|
||||
org.apache.commons.io.FileUtils.forceMkdir(outDir);
|
||||
|
||||
final List<String> mergedDimensions = IndexMerger.getMergedDimensions(indexes);
|
||||
|
||||
|
@ -967,7 +967,7 @@ public class IndexMergerV9 implements IndexMerger
|
|||
) throws IOException
|
||||
{
|
||||
FileUtils.deleteDirectory(outDir);
|
||||
FileUtils.forceMkdir(outDir);
|
||||
org.apache.commons.io.FileUtils.forceMkdir(outDir);
|
||||
|
||||
final List<String> mergedDimensions = IndexMerger.getMergedDimensions(indexes);
|
||||
|
||||
|
|
|
@ -19,7 +19,7 @@
|
|||
|
||||
package org.apache.druid.segment.writeout;
|
||||
|
||||
import org.apache.commons.io.FileUtils;
|
||||
import org.apache.druid.java.util.common.FileUtils;
|
||||
import org.apache.druid.java.util.common.io.Closer;
|
||||
|
||||
import java.io.File;
|
||||
|
@ -35,7 +35,7 @@ public final class TmpFileSegmentWriteOutMedium implements SegmentWriteOutMedium
|
|||
TmpFileSegmentWriteOutMedium(File outDir) throws IOException
|
||||
{
|
||||
File tmpOutputFilesDir = new File(outDir, "tmpOutputFiles");
|
||||
FileUtils.forceMkdir(tmpOutputFilesDir);
|
||||
org.apache.commons.io.FileUtils.forceMkdir(tmpOutputFilesDir);
|
||||
closer.register(() -> FileUtils.deleteDirectory(tmpOutputFilesDir));
|
||||
this.dir = tmpOutputFilesDir;
|
||||
}
|
||||
|
|
|
@ -22,8 +22,6 @@ package org.apache.druid.query;
|
|||
import com.google.common.collect.ImmutableList;
|
||||
import com.google.common.collect.ImmutableMap;
|
||||
import com.google.common.collect.ImmutableSet;
|
||||
import com.google.common.io.Files;
|
||||
import org.apache.commons.io.FileUtils;
|
||||
import org.apache.druid.collections.CloseableStupidPool;
|
||||
import org.apache.druid.common.config.NullHandling;
|
||||
import org.apache.druid.data.input.impl.CSVParseSpec;
|
||||
|
@ -32,6 +30,7 @@ import org.apache.druid.data.input.impl.JSONParseSpec;
|
|||
import org.apache.druid.data.input.impl.StringInputRowParser;
|
||||
import org.apache.druid.data.input.impl.TimestampSpec;
|
||||
import org.apache.druid.java.util.common.DateTimes;
|
||||
import org.apache.druid.java.util.common.FileUtils;
|
||||
import org.apache.druid.java.util.common.granularity.Granularities;
|
||||
import org.apache.druid.java.util.common.guava.Sequence;
|
||||
import org.apache.druid.query.aggregation.AggregationTestHelper;
|
||||
|
@ -169,7 +168,7 @@ public class MultiValuedDimensionTest
|
|||
}
|
||||
|
||||
|
||||
persistedSegmentDir = Files.createTempDir();
|
||||
persistedSegmentDir = FileUtils.createTempDir();
|
||||
TestHelper.getTestIndexMergerV9(segmentWriteOutMediumFactory)
|
||||
.persist(incrementalIndex, persistedSegmentDir, new IndexSpec(), null);
|
||||
queryableIndex = TestHelper.getTestIndexIO().loadIndex(persistedSegmentDir);
|
||||
|
@ -202,7 +201,7 @@ public class MultiValuedDimensionTest
|
|||
for (String row : rowsNullSampler) {
|
||||
incrementalIndexNullSampler.add(parserNullSampler.parse(row));
|
||||
}
|
||||
persistedSegmentDirNullSampler = Files.createTempDir();
|
||||
persistedSegmentDirNullSampler = FileUtils.createTempDir();
|
||||
TestHelper.getTestIndexMergerV9(segmentWriteOutMediumFactory)
|
||||
.persist(incrementalIndexNullSampler, persistedSegmentDirNullSampler, new IndexSpec(), null);
|
||||
|
||||
|
|
|
@ -26,9 +26,7 @@ import com.google.common.base.Supplier;
|
|||
import com.google.common.base.Suppliers;
|
||||
import com.google.common.collect.ImmutableList;
|
||||
import com.google.common.collect.ImmutableMap;
|
||||
import com.google.common.io.Files;
|
||||
import com.google.common.util.concurrent.ListenableFuture;
|
||||
import org.apache.commons.io.FileUtils;
|
||||
import org.apache.druid.collections.CloseableDefaultBlockingPool;
|
||||
import org.apache.druid.collections.CloseableStupidPool;
|
||||
import org.apache.druid.data.input.InputRow;
|
||||
|
@ -37,6 +35,7 @@ import org.apache.druid.data.input.impl.DimensionsSpec;
|
|||
import org.apache.druid.data.input.impl.LongDimensionSchema;
|
||||
import org.apache.druid.data.input.impl.StringDimensionSchema;
|
||||
import org.apache.druid.jackson.DefaultObjectMapper;
|
||||
import org.apache.druid.java.util.common.FileUtils;
|
||||
import org.apache.druid.java.util.common.Intervals;
|
||||
import org.apache.druid.java.util.common.concurrent.Execs;
|
||||
import org.apache.druid.java.util.common.granularity.Granularities;
|
||||
|
@ -158,7 +157,7 @@ public class GroupByLimitPushDownInsufficientBufferTest
|
|||
@Before
|
||||
public void setup() throws Exception
|
||||
{
|
||||
tmpDir = Files.createTempDir();
|
||||
tmpDir = FileUtils.createTempDir();
|
||||
|
||||
InputRow row;
|
||||
List<String> dimNames = Arrays.asList("dimA", "metA");
|
||||
|
|
|
@ -26,9 +26,7 @@ import com.google.common.base.Supplier;
|
|||
import com.google.common.base.Suppliers;
|
||||
import com.google.common.collect.ImmutableList;
|
||||
import com.google.common.collect.ImmutableMap;
|
||||
import com.google.common.io.Files;
|
||||
import com.google.common.util.concurrent.ListenableFuture;
|
||||
import org.apache.commons.io.FileUtils;
|
||||
import org.apache.druid.collections.CloseableDefaultBlockingPool;
|
||||
import org.apache.druid.collections.CloseableStupidPool;
|
||||
import org.apache.druid.data.input.InputRow;
|
||||
|
@ -37,6 +35,7 @@ import org.apache.druid.data.input.impl.DimensionsSpec;
|
|||
import org.apache.druid.data.input.impl.LongDimensionSchema;
|
||||
import org.apache.druid.data.input.impl.StringDimensionSchema;
|
||||
import org.apache.druid.jackson.DefaultObjectMapper;
|
||||
import org.apache.druid.java.util.common.FileUtils;
|
||||
import org.apache.druid.java.util.common.Intervals;
|
||||
import org.apache.druid.java.util.common.concurrent.Execs;
|
||||
import org.apache.druid.java.util.common.granularity.Granularities;
|
||||
|
@ -167,7 +166,7 @@ public class GroupByLimitPushDownMultiNodeMergeTest
|
|||
@Before
|
||||
public void setup() throws Exception
|
||||
{
|
||||
tmpDir = Files.createTempDir();
|
||||
tmpDir = FileUtils.createTempDir();
|
||||
|
||||
InputRow row;
|
||||
List<String> dimNames = Arrays.asList("dimA", "metA");
|
||||
|
|
|
@ -24,9 +24,7 @@ import com.fasterxml.jackson.databind.ObjectMapper;
|
|||
import com.fasterxml.jackson.dataformat.smile.SmileFactory;
|
||||
import com.google.common.base.Supplier;
|
||||
import com.google.common.base.Suppliers;
|
||||
import com.google.common.io.Files;
|
||||
import com.google.common.util.concurrent.ListenableFuture;
|
||||
import org.apache.commons.io.FileUtils;
|
||||
import org.apache.druid.collections.CloseableDefaultBlockingPool;
|
||||
import org.apache.druid.collections.CloseableStupidPool;
|
||||
import org.apache.druid.data.input.InputRow;
|
||||
|
@ -35,6 +33,7 @@ import org.apache.druid.data.input.impl.DimensionsSpec;
|
|||
import org.apache.druid.data.input.impl.LongDimensionSchema;
|
||||
import org.apache.druid.data.input.impl.StringDimensionSchema;
|
||||
import org.apache.druid.jackson.DefaultObjectMapper;
|
||||
import org.apache.druid.java.util.common.FileUtils;
|
||||
import org.apache.druid.java.util.common.Intervals;
|
||||
import org.apache.druid.java.util.common.concurrent.Execs;
|
||||
import org.apache.druid.java.util.common.granularity.Granularities;
|
||||
|
@ -153,7 +152,7 @@ public class GroupByMultiSegmentTest
|
|||
@Before
|
||||
public void setup() throws Exception
|
||||
{
|
||||
tmpDir = Files.createTempDir();
|
||||
tmpDir = FileUtils.createTempDir();
|
||||
|
||||
InputRow row;
|
||||
List<String> dimNames = Arrays.asList("dimA", "metA");
|
||||
|
|
|
@ -26,9 +26,7 @@ import com.google.common.base.Supplier;
|
|||
import com.google.common.base.Suppliers;
|
||||
import com.google.common.collect.ImmutableList;
|
||||
import com.google.common.collect.ImmutableMap;
|
||||
import com.google.common.io.Files;
|
||||
import com.google.common.util.concurrent.ListenableFuture;
|
||||
import org.apache.commons.io.FileUtils;
|
||||
import org.apache.druid.collections.BlockingPool;
|
||||
import org.apache.druid.collections.DefaultBlockingPool;
|
||||
import org.apache.druid.collections.NonBlockingPool;
|
||||
|
@ -39,6 +37,7 @@ import org.apache.druid.data.input.impl.DimensionsSpec;
|
|||
import org.apache.druid.data.input.impl.LongDimensionSchema;
|
||||
import org.apache.druid.data.input.impl.StringDimensionSchema;
|
||||
import org.apache.druid.jackson.DefaultObjectMapper;
|
||||
import org.apache.druid.java.util.common.FileUtils;
|
||||
import org.apache.druid.java.util.common.Intervals;
|
||||
import org.apache.druid.java.util.common.concurrent.Execs;
|
||||
import org.apache.druid.java.util.common.granularity.Granularities;
|
||||
|
@ -154,7 +153,7 @@ public class NestedQueryPushDownTest
|
|||
public void setup() throws Exception
|
||||
|
||||
{
|
||||
tmpDir = Files.createTempDir();
|
||||
tmpDir = FileUtils.createTempDir();
|
||||
|
||||
InputRow row;
|
||||
List<String> dimNames = Arrays.asList("dimA", "metA", "dimB", "metB");
|
||||
|
|
|
@ -21,8 +21,8 @@ package org.apache.druid.segment;
|
|||
|
||||
import com.google.common.collect.ImmutableList;
|
||||
import com.google.common.collect.Iterables;
|
||||
import org.apache.commons.io.FileUtils;
|
||||
import org.apache.druid.collections.bitmap.ConciseBitmapFactory;
|
||||
import org.apache.druid.java.util.common.FileUtils;
|
||||
import org.apache.druid.java.util.common.Intervals;
|
||||
import org.apache.druid.query.aggregation.AggregatorFactory;
|
||||
import org.apache.druid.segment.column.ColumnHolder;
|
||||
|
|
|
@ -23,10 +23,10 @@ import com.google.common.collect.ImmutableList;
|
|||
import com.google.common.collect.ImmutableMap;
|
||||
import com.google.common.io.ByteSource;
|
||||
import com.google.common.io.Files;
|
||||
import org.apache.commons.io.FileUtils;
|
||||
import org.apache.druid.data.input.InputRow;
|
||||
import org.apache.druid.data.input.MapBasedInputRow;
|
||||
import org.apache.druid.java.util.common.DateTimes;
|
||||
import org.apache.druid.java.util.common.FileUtils;
|
||||
import org.apache.druid.java.util.common.JodaUtils;
|
||||
import org.apache.druid.query.aggregation.AggregatorFactory;
|
||||
import org.apache.druid.query.aggregation.CountAggregatorFactory;
|
||||
|
@ -147,9 +147,9 @@ public class IndexMergerV9CompatibilityTest
|
|||
for (InputRow event : events) {
|
||||
toPersist.add(event);
|
||||
}
|
||||
tmpDir = Files.createTempDir();
|
||||
tmpDir = FileUtils.createTempDir();
|
||||
persistTmpDir = new File(tmpDir, "persistDir");
|
||||
FileUtils.forceMkdir(persistTmpDir);
|
||||
org.apache.commons.io.FileUtils.forceMkdir(persistTmpDir);
|
||||
String[] files = new String[] {"00000.smoosh", "meta.smoosh", "version.bin"};
|
||||
for (String file : files) {
|
||||
new ByteSource()
|
||||
|
@ -172,10 +172,10 @@ public class IndexMergerV9CompatibilityTest
|
|||
@Test
|
||||
public void testPersistWithSegmentMetadata() throws IOException
|
||||
{
|
||||
File outDir = Files.createTempDir();
|
||||
File outDir = FileUtils.createTempDir();
|
||||
QueryableIndex index = null;
|
||||
try {
|
||||
outDir = Files.createTempDir();
|
||||
outDir = FileUtils.createTempDir();
|
||||
index = indexIO.loadIndex(indexMerger.persist(toPersist, outDir, INDEX_SPEC, null));
|
||||
|
||||
Assert.assertEquals("value", index.getMetadata().get("key"));
|
||||
|
|
|
@ -21,13 +21,13 @@ package org.apache.druid.segment;
|
|||
|
||||
import com.google.common.collect.ImmutableMap;
|
||||
import com.google.common.collect.Lists;
|
||||
import org.apache.commons.io.FileUtils;
|
||||
import org.apache.druid.collections.spatial.search.RadiusBound;
|
||||
import org.apache.druid.collections.spatial.search.RectangularBound;
|
||||
import org.apache.druid.data.input.MapBasedInputRow;
|
||||
import org.apache.druid.data.input.impl.DimensionsSpec;
|
||||
import org.apache.druid.data.input.impl.SpatialDimensionSchema;
|
||||
import org.apache.druid.java.util.common.DateTimes;
|
||||
import org.apache.druid.java.util.common.FileUtils;
|
||||
import org.apache.druid.java.util.common.Intervals;
|
||||
import org.apache.druid.java.util.common.granularity.Granularities;
|
||||
import org.apache.druid.query.Druids;
|
||||
|
|
|
@ -24,6 +24,7 @@ import com.google.common.collect.Iterables;
|
|||
import com.google.common.collect.Sets;
|
||||
import it.unimi.dsi.fastutil.ints.IntArrayList;
|
||||
import org.apache.commons.io.IOUtils;
|
||||
import org.apache.druid.java.util.common.FileUtils;
|
||||
import org.apache.druid.java.util.common.StringUtils;
|
||||
import org.apache.druid.java.util.common.guava.CloseQuietly;
|
||||
import org.apache.druid.java.util.common.io.smoosh.FileSmoosher;
|
||||
|
@ -45,7 +46,6 @@ import org.junit.runners.Parameterized;
|
|||
import java.io.File;
|
||||
import java.nio.ByteBuffer;
|
||||
import java.nio.ByteOrder;
|
||||
import java.nio.file.Files;
|
||||
import java.util.List;
|
||||
import java.util.Random;
|
||||
import java.util.Set;
|
||||
|
@ -183,11 +183,7 @@ public class CompressedColumnarIntsSerializerTest
|
|||
|
||||
private void checkV2SerializedSizeAndData(int chunkFactor) throws Exception
|
||||
{
|
||||
File tmpDirectory = Files.createTempDirectory(StringUtils.format(
|
||||
"CompressedIntsIndexedWriterTest_%d",
|
||||
chunkFactor
|
||||
)).toFile();
|
||||
|
||||
File tmpDirectory = FileUtils.createTempDir(StringUtils.format("CompressedIntsIndexedWriterTest_%d", chunkFactor));
|
||||
FileSmoosher smoosher = new FileSmoosher(tmpDirectory);
|
||||
|
||||
CompressedColumnarIntsSerializer writer = new CompressedColumnarIntsSerializer(
|
||||
|
|
|
@ -23,6 +23,7 @@ import com.google.common.base.Function;
|
|||
import com.google.common.collect.Iterables;
|
||||
import com.google.common.collect.Sets;
|
||||
import org.apache.commons.io.IOUtils;
|
||||
import org.apache.druid.java.util.common.FileUtils;
|
||||
import org.apache.druid.java.util.common.StringUtils;
|
||||
import org.apache.druid.java.util.common.guava.CloseQuietly;
|
||||
import org.apache.druid.java.util.common.io.smoosh.FileSmoosher;
|
||||
|
@ -43,7 +44,6 @@ import org.junit.runners.Parameterized;
|
|||
import java.io.File;
|
||||
import java.nio.ByteBuffer;
|
||||
import java.nio.ByteOrder;
|
||||
import java.nio.file.Files;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.NoSuchElementException;
|
||||
|
@ -217,11 +217,11 @@ public class V3CompressedVSizeColumnarMultiIntsSerializerTest
|
|||
|
||||
private void checkV2SerializedSizeAndData(int offsetChunkFactor, int valueChunkFactor) throws Exception
|
||||
{
|
||||
File tmpDirectory = Files.createTempDirectory(StringUtils.format(
|
||||
File tmpDirectory = FileUtils.createTempDir(StringUtils.format(
|
||||
"CompressedVSizeIndexedV3WriterTest_%d_%d",
|
||||
offsetChunkFactor,
|
||||
offsetChunkFactor
|
||||
)).toFile();
|
||||
));
|
||||
FileSmoosher smoosher = new FileSmoosher(tmpDirectory);
|
||||
int maxValue = vals.size() > 0 ? getMaxValue(vals) : 0;
|
||||
|
||||
|
|
|
@ -19,9 +19,9 @@
|
|||
|
||||
package org.apache.druid.segment.writeout;
|
||||
|
||||
import com.google.common.io.Files;
|
||||
import com.google.common.primitives.Ints;
|
||||
import org.apache.commons.io.IOUtils;
|
||||
import org.apache.druid.java.util.common.FileUtils;
|
||||
import org.apache.druid.java.util.common.StringUtils;
|
||||
import org.junit.Assert;
|
||||
import org.junit.Test;
|
||||
|
@ -42,7 +42,7 @@ public class WriteOutBytesTest
|
|||
public static Collection<Object[]> constructorFeeder() throws IOException
|
||||
{
|
||||
return Arrays.asList(
|
||||
new Object[] {new TmpFileSegmentWriteOutMedium(Files.createTempDir())},
|
||||
new Object[] {new TmpFileSegmentWriteOutMedium(FileUtils.createTempDir())},
|
||||
new Object[] {new OffHeapMemorySegmentWriteOutMedium()},
|
||||
new Object[] {new OnHeapMemorySegmentWriteOutMedium()}
|
||||
);
|
||||
|
|
|
@ -22,9 +22,8 @@ package org.apache.druid.segment.indexing;
|
|||
import com.fasterxml.jackson.annotation.JsonCreator;
|
||||
import com.fasterxml.jackson.annotation.JsonProperty;
|
||||
import com.google.common.base.Preconditions;
|
||||
import com.google.common.io.Files;
|
||||
import org.apache.druid.indexer.partitions.PartitionsSpec;
|
||||
import org.apache.druid.java.util.common.ISE;
|
||||
import org.apache.druid.java.util.common.FileUtils;
|
||||
import org.apache.druid.segment.IndexSpec;
|
||||
import org.apache.druid.segment.realtime.appenderator.AppenderatorConfig;
|
||||
import org.apache.druid.segment.realtime.plumber.IntervalStartVersioningPolicy;
|
||||
|
@ -40,6 +39,7 @@ import javax.annotation.Nullable;
|
|||
import java.io.File;
|
||||
|
||||
/**
|
||||
*
|
||||
*/
|
||||
public class RealtimeTuningConfig implements TuningConfig, AppenderatorConfig
|
||||
{
|
||||
|
@ -58,15 +58,7 @@ public class RealtimeTuningConfig implements TuningConfig, AppenderatorConfig
|
|||
|
||||
private static File createNewBasePersistDirectory()
|
||||
{
|
||||
try {
|
||||
return Files.createTempDir();
|
||||
}
|
||||
catch (IllegalStateException e) {
|
||||
String messageTemplate = "Failed to create temporary directory in [%s]! " +
|
||||
"Make sure the `java.io.tmpdir` property is set to an existing and writable directory " +
|
||||
"with enough free space.";
|
||||
throw new ISE(e, messageTemplate, System.getProperty("java.io.tmpdir"));
|
||||
}
|
||||
return FileUtils.createTempDir("druid-realtime-persist");
|
||||
}
|
||||
|
||||
// Might make sense for this to be a builder
|
||||
|
|
|
@ -20,7 +20,7 @@
|
|||
package org.apache.druid.segment.loading;
|
||||
|
||||
import com.google.inject.Inject;
|
||||
import org.apache.commons.io.FileUtils;
|
||||
import org.apache.druid.java.util.common.FileUtils;
|
||||
import org.apache.druid.java.util.common.MapUtils;
|
||||
import org.apache.druid.java.util.common.logger.Logger;
|
||||
import org.apache.druid.timeline.DataSegment;
|
||||
|
|
|
@ -21,7 +21,7 @@ package org.apache.druid.segment.loading;
|
|||
|
||||
import com.google.common.collect.ImmutableMap;
|
||||
import com.google.inject.Inject;
|
||||
import org.apache.commons.io.FileUtils;
|
||||
import org.apache.druid.java.util.common.FileUtils;
|
||||
import org.apache.druid.java.util.common.IOE;
|
||||
import org.apache.druid.java.util.common.logger.Logger;
|
||||
import org.apache.druid.segment.SegmentUtils;
|
||||
|
@ -83,7 +83,7 @@ public class LocalDataSegmentPusher implements DataSegmentPusher
|
|||
|
||||
final File tmpOutDir = new File(baseStorageDir, makeIntermediateDir());
|
||||
log.debug("Creating intermediate directory[%s] for segment[%s].", tmpOutDir.toString(), segment.getId());
|
||||
FileUtils.forceMkdir(tmpOutDir);
|
||||
org.apache.commons.io.FileUtils.forceMkdir(tmpOutDir);
|
||||
|
||||
try {
|
||||
final File tmpIndexFile = new File(tmpOutDir, INDEX_FILENAME);
|
||||
|
@ -93,7 +93,7 @@ public class LocalDataSegmentPusher implements DataSegmentPusher
|
|||
.withSize(size)
|
||||
.withBinaryVersion(SegmentUtils.getVersionFromDir(dataSegmentFile));
|
||||
|
||||
FileUtils.forceMkdir(outDir);
|
||||
org.apache.commons.io.FileUtils.forceMkdir(outDir);
|
||||
final File indexFileTarget = new File(outDir, tmpIndexFile.getName());
|
||||
|
||||
if (!tmpIndexFile.renameTo(indexFileTarget)) {
|
||||
|
|
|
@ -22,8 +22,8 @@ package org.apache.druid.segment.loading;
|
|||
import com.fasterxml.jackson.databind.ObjectMapper;
|
||||
import com.google.common.annotations.VisibleForTesting;
|
||||
import com.google.inject.Inject;
|
||||
import org.apache.commons.io.FileUtils;
|
||||
import org.apache.druid.guice.annotations.Json;
|
||||
import org.apache.druid.java.util.common.FileUtils;
|
||||
import org.apache.druid.java.util.common.ISE;
|
||||
import org.apache.druid.java.util.emitter.EmittingLogger;
|
||||
import org.apache.druid.segment.IndexIO;
|
||||
|
|
|
@ -37,11 +37,11 @@ import com.google.common.util.concurrent.Futures;
|
|||
import com.google.common.util.concurrent.ListenableFuture;
|
||||
import com.google.common.util.concurrent.ListeningExecutorService;
|
||||
import com.google.common.util.concurrent.MoreExecutors;
|
||||
import org.apache.commons.io.FileUtils;
|
||||
import org.apache.druid.client.cache.Cache;
|
||||
import org.apache.druid.data.input.Committer;
|
||||
import org.apache.druid.data.input.InputRow;
|
||||
import org.apache.druid.java.util.common.DateTimes;
|
||||
import org.apache.druid.java.util.common.FileUtils;
|
||||
import org.apache.druid.java.util.common.IAE;
|
||||
import org.apache.druid.java.util.common.ISE;
|
||||
import org.apache.druid.java.util.common.Pair;
|
||||
|
@ -1293,7 +1293,7 @@ public class AppenderatorImpl implements Appenderator
|
|||
private File createPersistDirIfNeeded(SegmentIdWithShardSpec identifier) throws IOException
|
||||
{
|
||||
final File persistDir = computePersistDir(identifier);
|
||||
FileUtils.forceMkdir(persistDir);
|
||||
org.apache.commons.io.FileUtils.forceMkdir(persistDir);
|
||||
|
||||
objectMapper.writeValue(computeIdentifierFile(identifier), identifier);
|
||||
|
||||
|
|
|
@ -26,7 +26,6 @@ import com.google.common.base.Supplier;
|
|||
import com.google.common.collect.Collections2;
|
||||
import com.google.common.collect.ImmutableMap;
|
||||
import com.google.common.primitives.Ints;
|
||||
import org.apache.commons.io.FileUtils;
|
||||
import org.apache.druid.client.cache.Cache;
|
||||
import org.apache.druid.client.cache.CacheConfig;
|
||||
import org.apache.druid.client.cache.CachePopulatorStats;
|
||||
|
@ -36,6 +35,7 @@ import org.apache.druid.concurrent.TaskThreadPriority;
|
|||
import org.apache.druid.data.input.Committer;
|
||||
import org.apache.druid.data.input.InputRow;
|
||||
import org.apache.druid.java.util.common.DateTimes;
|
||||
import org.apache.druid.java.util.common.FileUtils;
|
||||
import org.apache.druid.java.util.common.ISE;
|
||||
import org.apache.druid.java.util.common.Intervals;
|
||||
import org.apache.druid.java.util.common.Pair;
|
||||
|
@ -665,7 +665,7 @@ public class RealtimePlumber implements Plumber
|
|||
try {
|
||||
File corruptSegmentDir = computeCorruptedFileDumpDir(segmentDir, schema);
|
||||
log.info("Renaming %s to %s", segmentDir.getAbsolutePath(), corruptSegmentDir.getAbsolutePath());
|
||||
FileUtils.copyDirectory(segmentDir, corruptSegmentDir);
|
||||
org.apache.commons.io.FileUtils.copyDirectory(segmentDir, corruptSegmentDir);
|
||||
FileUtils.deleteDirectory(segmentDir);
|
||||
}
|
||||
catch (Exception e1) {
|
||||
|
|
|
@ -24,6 +24,7 @@ import org.apache.druid.segment.IndexSpec;
|
|||
import org.apache.druid.segment.TestHelper;
|
||||
import org.apache.druid.segment.data.CompressionStrategy;
|
||||
import org.apache.druid.timeline.partition.NumberedShardSpec;
|
||||
import org.hamcrest.CoreMatchers;
|
||||
import org.joda.time.Period;
|
||||
import org.junit.Assert;
|
||||
import org.junit.Test;
|
||||
|
@ -46,13 +47,16 @@ public class RealtimeTuningConfigTest
|
|||
{
|
||||
String propertyName = "java.io.tmpdir";
|
||||
String originalValue = System.getProperty(propertyName);
|
||||
String nonExistedDirectory = "/tmp/" + UUID.randomUUID();
|
||||
try {
|
||||
String nonExistedDirectory = "/tmp/" + UUID.randomUUID();
|
||||
System.setProperty(propertyName, nonExistedDirectory);
|
||||
RealtimeTuningConfig.makeDefaultTuningConfig(null);
|
||||
}
|
||||
catch (IllegalStateException e) {
|
||||
Assert.assertTrue(e.getMessage().startsWith("Failed to create temporary directory in"));
|
||||
Assert.assertThat(
|
||||
e.getMessage(),
|
||||
CoreMatchers.startsWith("java.io.tmpdir (" + nonExistedDirectory + ") does not exist")
|
||||
);
|
||||
}
|
||||
finally {
|
||||
System.setProperty(propertyName, originalValue);
|
||||
|
@ -141,7 +145,10 @@ public class RealtimeTuningConfigTest
|
|||
Assert.assertEquals(new Period("PT1H"), config.getWindowPeriod());
|
||||
Assert.assertEquals(true, config.isReportParseExceptions());
|
||||
Assert.assertEquals(new IndexSpec(null, null, CompressionStrategy.NONE, null), config.getIndexSpec());
|
||||
Assert.assertEquals(new IndexSpec(null, CompressionStrategy.UNCOMPRESSED, null, null), config.getIndexSpecForIntermediatePersists());
|
||||
Assert.assertEquals(
|
||||
new IndexSpec(null, CompressionStrategy.UNCOMPRESSED, null, null),
|
||||
config.getIndexSpecForIntermediatePersists()
|
||||
);
|
||||
|
||||
}
|
||||
}
|
||||
|
|
|
@ -20,7 +20,7 @@
|
|||
package org.apache.druid.segment.loading;
|
||||
|
||||
import com.google.common.io.Files;
|
||||
import org.apache.commons.io.FileUtils;
|
||||
import org.apache.druid.java.util.common.FileUtils;
|
||||
import org.apache.druid.utils.CompressionUtils;
|
||||
import org.junit.After;
|
||||
import org.junit.Assert;
|
||||
|
|
|
@ -21,7 +21,6 @@ package org.apache.druid.segment.realtime.appenderator;
|
|||
|
||||
import com.fasterxml.jackson.databind.ObjectMapper;
|
||||
import com.google.common.collect.ImmutableMap;
|
||||
import org.apache.commons.io.FileUtils;
|
||||
import org.apache.druid.client.cache.CacheConfig;
|
||||
import org.apache.druid.client.cache.CachePopulatorStats;
|
||||
import org.apache.druid.client.cache.MapCache;
|
||||
|
@ -30,6 +29,7 @@ import org.apache.druid.data.input.impl.JSONParseSpec;
|
|||
import org.apache.druid.data.input.impl.MapInputRowParser;
|
||||
import org.apache.druid.data.input.impl.TimestampSpec;
|
||||
import org.apache.druid.jackson.DefaultObjectMapper;
|
||||
import org.apache.druid.java.util.common.FileUtils;
|
||||
import org.apache.druid.java.util.common.concurrent.Execs;
|
||||
import org.apache.druid.java.util.common.granularity.Granularities;
|
||||
import org.apache.druid.java.util.emitter.EmittingLogger;
|
||||
|
|
|
@ -24,8 +24,6 @@ import com.google.common.base.Supplier;
|
|||
import com.google.common.base.Suppliers;
|
||||
import com.google.common.collect.ImmutableList;
|
||||
import com.google.common.collect.Lists;
|
||||
import com.google.common.io.Files;
|
||||
import org.apache.commons.io.FileUtils;
|
||||
import org.apache.druid.client.cache.CachePopulatorStats;
|
||||
import org.apache.druid.client.cache.MapCache;
|
||||
import org.apache.druid.data.input.Committer;
|
||||
|
@ -37,6 +35,7 @@ import org.apache.druid.data.input.impl.StringInputRowParser;
|
|||
import org.apache.druid.data.input.impl.TimestampSpec;
|
||||
import org.apache.druid.jackson.DefaultObjectMapper;
|
||||
import org.apache.druid.java.util.common.DateTimes;
|
||||
import org.apache.druid.java.util.common.FileUtils;
|
||||
import org.apache.druid.java.util.common.Intervals;
|
||||
import org.apache.druid.java.util.common.concurrent.Execs;
|
||||
import org.apache.druid.java.util.common.granularity.Granularities;
|
||||
|
@ -129,7 +128,7 @@ public class RealtimePlumberSchoolTest
|
|||
@Before
|
||||
public void setUp() throws Exception
|
||||
{
|
||||
tmpDir = Files.createTempDir();
|
||||
tmpDir = FileUtils.createTempDir();
|
||||
|
||||
ObjectMapper jsonMapper = new DefaultObjectMapper();
|
||||
|
||||
|
|
|
@ -24,8 +24,8 @@ import com.fasterxml.jackson.databind.ObjectMapper;
|
|||
import com.fasterxml.jackson.databind.jsontype.NamedType;
|
||||
import com.fasterxml.jackson.databind.module.SimpleModule;
|
||||
import com.google.common.collect.ImmutableMap;
|
||||
import org.apache.commons.io.FileUtils;
|
||||
import org.apache.druid.jackson.DefaultObjectMapper;
|
||||
import org.apache.druid.java.util.common.FileUtils;
|
||||
import org.apache.druid.java.util.common.FileUtils.FileCopyResult;
|
||||
import org.apache.druid.java.util.common.Intervals;
|
||||
import org.apache.druid.java.util.common.StringUtils;
|
||||
|
@ -185,7 +185,7 @@ public class SegmentManagerThreadSafetyTest
|
|||
);
|
||||
final String storageDir = DataSegmentPusher.getDefaultStorageDir(tmpSegment, false);
|
||||
final File segmentDir = new File(segmentDeepStorageDir, storageDir);
|
||||
FileUtils.forceMkdir(segmentDir);
|
||||
org.apache.commons.io.FileUtils.forceMkdir(segmentDir);
|
||||
|
||||
final File factoryJson = new File(segmentDir, "factory.json");
|
||||
objectMapper.writeValue(factoryJson, new TestSegmentizerFactory());
|
||||
|
@ -203,7 +203,7 @@ public class SegmentManagerThreadSafetyTest
|
|||
{
|
||||
numFileLoaded.compute(sourceFile, (f, numLoaded) -> numLoaded == null ? 1 : numLoaded + 1);
|
||||
try {
|
||||
FileUtils.copyDirectory(sourceFile, dir);
|
||||
org.apache.commons.io.FileUtils.copyDirectory(sourceFile, dir);
|
||||
}
|
||||
catch (IOException e) {
|
||||
throw new RuntimeException(e);
|
||||
|
|
|
@ -29,9 +29,9 @@ import io.tesla.aether.Repository;
|
|||
import io.tesla.aether.TeslaAether;
|
||||
import io.tesla.aether.guice.RepositorySystemSessionProvider;
|
||||
import io.tesla.aether.internal.DefaultTeslaAether;
|
||||
import org.apache.commons.io.FileUtils;
|
||||
import org.apache.druid.guice.ExtensionsConfig;
|
||||
import org.apache.druid.indexing.common.config.TaskConfig;
|
||||
import org.apache.druid.java.util.common.FileUtils;
|
||||
import org.apache.druid.java.util.common.ISE;
|
||||
import org.apache.druid.java.util.common.StringUtils;
|
||||
import org.apache.druid.java.util.common.logger.Logger;
|
||||
|
@ -273,8 +273,8 @@ public class PullDependencies implements Runnable
|
|||
FileUtils.deleteDirectory(extensionsDir);
|
||||
FileUtils.deleteDirectory(hadoopDependenciesDir);
|
||||
}
|
||||
FileUtils.forceMkdir(extensionsDir);
|
||||
FileUtils.forceMkdir(hadoopDependenciesDir);
|
||||
org.apache.commons.io.FileUtils.forceMkdir(extensionsDir);
|
||||
org.apache.commons.io.FileUtils.forceMkdir(hadoopDependenciesDir);
|
||||
}
|
||||
catch (IOException e) {
|
||||
log.error(e, "Unable to clear or create extension directory at [%s]", extensionsDir);
|
||||
|
@ -402,7 +402,7 @@ public class PullDependencies implements Runnable
|
|||
for (Artifact artifact : artifacts) {
|
||||
if (!EXCLUSIONS.contains(artifact.getGroupId())) {
|
||||
log.info("Adding file [%s] at [%s]", artifact.getFile().getName(), toLocation.getAbsolutePath());
|
||||
FileUtils.copyFileToDirectory(artifact.getFile(), toLocation);
|
||||
org.apache.commons.io.FileUtils.copyFileToDirectory(artifact.getFile(), toLocation);
|
||||
} else {
|
||||
log.debug("Skipped Artifact[%s]", artifact);
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue