LUCENE-5951: try to detect if index is on an SSD and default CMS's settings accordingly

git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1646775 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Michael McCandless 2014-12-19 15:36:55 +00:00
parent 8908328de4
commit 33410e30c1
11 changed files with 666 additions and 67 deletions

View File

@ -172,6 +172,11 @@ Optimizations
TokenStream with MemoryIndex when it comes from TokenSources (term vectors) with offsets and
positions. (David Smiley)
* LUCENE-5951: ConcurrentMergeScheduler detects whether the index is on SSD or not
and does a better job defaulting its settings. This only works on Linux for now;
other OS's will continue to use the previous defaults (tuned for spinning disks).
(Robert Muir, Uwe Schindler, hossman, Mike McCandless)
API Changes
* LUCENE-5900: Deprecated more constructors taking Version in *InfixSuggester and

View File

@ -24,7 +24,6 @@ import java.nio.charset.Charset;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.text.ParseException;
import org.apache.lucene.benchmark.byTask.PerfRunData;
import org.apache.lucene.benchmark.byTask.utils.Config;
@ -35,8 +34,8 @@ import org.apache.lucene.index.ConcurrentMergeScheduler;
import org.apache.lucene.index.IndexCommit;
import org.apache.lucene.index.IndexDeletionPolicy;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.IndexWriterConfig.OpenMode;
import org.apache.lucene.index.IndexWriterConfig;
import org.apache.lucene.index.IndexWriterConfig.OpenMode;
import org.apache.lucene.index.LogMergePolicy;
import org.apache.lucene.index.MergePolicy;
import org.apache.lucene.index.MergeScheduler;
@ -102,8 +101,9 @@ public class CreateIndexTask extends PerfTask {
iwConf.setOpenMode(mode);
IndexDeletionPolicy indexDeletionPolicy = getIndexDeletionPolicy(config);
iwConf.setIndexDeletionPolicy(indexDeletionPolicy);
if(commit != null)
if (commit != null) {
iwConf.setIndexCommit(commit);
}
final String mergeScheduler = config.get("merge.scheduler",
@ -119,8 +119,8 @@ public class CreateIndexTask extends PerfTask {
if (mergeScheduler.equals("org.apache.lucene.index.ConcurrentMergeScheduler")) {
ConcurrentMergeScheduler cms = (ConcurrentMergeScheduler) iwConf.getMergeScheduler();
int maxThreadCount = config.get("concurrent.merge.scheduler.max.thread.count", ConcurrentMergeScheduler.DEFAULT_MAX_THREAD_COUNT);
int maxMergeCount = config.get("concurrent.merge.scheduler.max.merge.count", ConcurrentMergeScheduler.DEFAULT_MAX_MERGE_COUNT);
int maxThreadCount = config.get("concurrent.merge.scheduler.max.thread.count", ConcurrentMergeScheduler.AUTO_DETECT_MERGES_AND_THREADS);
int maxMergeCount = config.get("concurrent.merge.scheduler.max.merge.count", ConcurrentMergeScheduler.AUTO_DETECT_MERGES_AND_THREADS);
cms.setMaxMergesAndThreads(maxMergeCount, maxThreadCount);
}
}

View File

@ -17,14 +17,15 @@ package org.apache.lucene.index;
* limitations under the License.
*/
import org.apache.lucene.store.Directory;
import org.apache.lucene.util.ThreadInterruptedException;
import org.apache.lucene.util.CollectionUtil;
import java.io.IOException;
import java.util.List;
import java.util.ArrayList;
import java.util.Comparator;
import java.util.List;
import org.apache.lucene.store.Directory;
import org.apache.lucene.util.CollectionUtil;
import org.apache.lucene.util.IOUtils;
import org.apache.lucene.util.ThreadInterruptedException;
/** A {@link MergeScheduler} that runs each merge using a
* separate thread.
@ -41,36 +42,44 @@ import java.util.Comparator;
* requested then this class will forcefully throttle the
* incoming threads by pausing until one more more merges
* complete.</p>
*
* <p>This class attempts to detect whether the index is
* on rotational storage (traditional hard drive) or not
* (e.g. solid-state disk) and changes the default max merge
* and thread count accordingly. This detection is currently
* Linux-only, and relies on the OS to put the right value
* into /sys/block/&lt;dev&gt;/block/rotational. For all
* other operating systems it currently assumes a rotational
* disk for backwards compatibility. To enable default
* settings for spinning or solid state disks for such
* operating systems, use {@link #setDefaultMaxMergesAndThreads(boolean)}.
*/
public class ConcurrentMergeScheduler extends MergeScheduler {
/** Dynamic default for {@code maxThreadCount} and {@code maxMergeCount},
* used to detect whether the index is backed by an SSD or rotational disk and
* set {@code maxThreadCount} accordingly. If it's an SSD,
* {@code maxThreadCount} is set to {@code max(1, min(3, cpuCoreCount/2))},
* otherwise 1. Note that detection only currently works on
* Linux; other platforms will assume the index is not on an SSD. */
public static final int AUTO_DETECT_MERGES_AND_THREADS = -1;
private int mergeThreadPriority = -1;
/** List of currently active {@link MergeThread}s. */
protected final List<MergeThread> mergeThreads = new ArrayList<>();
/**
* Default {@code maxThreadCount}.
* We default to 1: tests on spinning-magnet drives showed slower
* indexing performance if more than one merge thread runs at
* once (though on an SSD it was faster)
*/
public static final int DEFAULT_MAX_THREAD_COUNT = 1;
/** Default {@code maxMergeCount}. */
public static final int DEFAULT_MAX_MERGE_COUNT = 2;
// Max number of merge threads allowed to be running at
// once. When there are more merges then this, we
// forcefully pause the larger ones, letting the smaller
// ones run, up until maxMergeCount merges at which point
// we forcefully pause incoming threads (that presumably
// are the ones causing so much merging).
private int maxThreadCount = DEFAULT_MAX_THREAD_COUNT;
private int maxThreadCount = AUTO_DETECT_MERGES_AND_THREADS;
// Max number of merges we accept before forcefully
// throttling the incoming threads
private int maxMergeCount = DEFAULT_MAX_MERGE_COUNT;
private int maxMergeCount = AUTO_DETECT_MERGES_AND_THREADS;
/** {@link Directory} that holds the index. */
protected Directory dir;
@ -88,7 +97,8 @@ public class ConcurrentMergeScheduler extends MergeScheduler {
}
/**
* Sets the maximum number of merge threads and simultaneous merges allowed.
* Expert: directly set the maximum number of merge threads and
* simultaneous merges allowed.
*
* @param maxMergeCount the max # simultaneous merges that are allowed.
* If a merge is necessary yet we already have this many
@ -99,7 +109,16 @@ public class ConcurrentMergeScheduler extends MergeScheduler {
* @param maxThreadCount the max # simultaneous merge threads that should
* be running at once. This must be &lt;= <code>maxMergeCount</code>
*/
public void setMaxMergesAndThreads(int maxMergeCount, int maxThreadCount) {
public synchronized void setMaxMergesAndThreads(int maxMergeCount, int maxThreadCount) {
if (maxMergeCount == AUTO_DETECT_MERGES_AND_THREADS && maxThreadCount == AUTO_DETECT_MERGES_AND_THREADS) {
// OK
this.maxMergeCount = AUTO_DETECT_MERGES_AND_THREADS;
this.maxThreadCount = AUTO_DETECT_MERGES_AND_THREADS;
} else if (maxMergeCount == AUTO_DETECT_MERGES_AND_THREADS) {
throw new IllegalArgumentException("both maxMergeCount and maxThreadCount must be AUTO_DETECT_MERGES_AND_THREADS");
} else if (maxThreadCount == AUTO_DETECT_MERGES_AND_THREADS) {
throw new IllegalArgumentException("both maxMergeCount and maxThreadCount must be AUTO_DETECT_MERGES_AND_THREADS");
} else {
if (maxThreadCount < 1) {
throw new IllegalArgumentException("maxThreadCount should be at least 1");
}
@ -112,16 +131,33 @@ public class ConcurrentMergeScheduler extends MergeScheduler {
this.maxThreadCount = maxThreadCount;
this.maxMergeCount = maxMergeCount;
}
}
/** Sets max merges and threads to proper defaults for rotational
* or non-rotational storage.
*
* @param spins true to set defaults best for traditional rotatational storage (spinning disks),
* else false (e.g. for solid-state disks)
*/
public synchronized void setDefaultMaxMergesAndThreads(boolean spins) {
if (spins) {
maxThreadCount = 1;
maxMergeCount = 2;
} else {
maxThreadCount = Math.max(1, Math.min(3, Runtime.getRuntime().availableProcessors()/2));
maxMergeCount = maxThreadCount+2;
}
}
/** Returns {@code maxThreadCount}.
*
* @see #setMaxMergesAndThreads(int, int) */
public int getMaxThreadCount() {
public synchronized int getMaxThreadCount() {
return maxThreadCount;
}
/** See {@link #setMaxMergesAndThreads}. */
public int getMaxMergeCount() {
public synchronized int getMaxMergeCount() {
return maxMergeCount;
}
@ -257,6 +293,17 @@ public class ConcurrentMergeScheduler extends MergeScheduler {
}
}
private synchronized void initMaxMergesAndThreads() throws IOException {
if (maxThreadCount == AUTO_DETECT_MERGES_AND_THREADS) {
assert writer != null;
boolean spins = IOUtils.spins(writer.getDirectory());
setDefaultMaxMergesAndThreads(spins);
if (verbose()) {
message("initMaxMergesAndThreads spins=" + spins + " maxThreadCount=" + maxThreadCount + " maxMergeCount=" + maxMergeCount);
}
}
}
@Override
public void close() {
sync();
@ -318,6 +365,7 @@ public class ConcurrentMergeScheduler extends MergeScheduler {
this.writer = writer;
initMergeThreadPriority();
initMaxMergesAndThreads();
dir = writer.getDirectory();

View File

@ -126,7 +126,7 @@ public abstract class FSDirectory extends BaseDirectory {
*/
protected FSDirectory(Path path, LockFactory lockFactory) throws IOException {
super(lockFactory);
Files.createDirectories(path); // create directory, if it doesnt exist
Files.createDirectories(path); // create directory, if it doesn't exist
directory = path.toRealPath();
}

View File

@ -17,8 +17,6 @@ package org.apache.lucene.util;
* limitations under the License.
*/
import org.apache.lucene.store.Directory;
import java.io.BufferedReader;
import java.io.Closeable;
import java.io.IOException;
@ -30,6 +28,7 @@ import java.nio.charset.Charset;
import java.nio.charset.CharsetDecoder;
import java.nio.charset.CodingErrorAction;
import java.nio.charset.StandardCharsets;
import java.nio.file.FileStore;
import java.nio.file.FileVisitResult;
import java.nio.file.FileVisitor;
import java.nio.file.Files;
@ -41,6 +40,12 @@ import java.util.Collection;
import java.util.LinkedHashMap;
import java.util.Map;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.FSDirectory;
import org.apache.lucene.store.FileSwitchDirectory;
import org.apache.lucene.store.FilterDirectory;
import org.apache.lucene.store.RAMDirectory;
/** This class emulates the new Java 7 "Try-With-Resources" statement.
* Remove once Lucene is on Java 7.
* @lucene.internal */
@ -412,4 +417,129 @@ public final class IOUtils {
// Throw original exception
throw exc;
}
/** If the dir is an {@link FSDirectory} or wraps one via possibly
* nested {@link FilterDirectory} or {@link FileSwitchDirectory},
* this returns {@link #spins(Path)} for the wrapped directory,
* else, true.
*
* @throws IOException if {@code path} does not exist.
*
* @lucene.internal */
public static boolean spins(Directory dir) throws IOException {
dir = FilterDirectory.unwrap(dir);
if (dir instanceof FileSwitchDirectory) {
FileSwitchDirectory fsd = (FileSwitchDirectory) dir;
// Spinning is contagious:
return spins(fsd.getPrimaryDir()) || spins(fsd.getSecondaryDir());
} else if (dir instanceof RAMDirectory) {
return false;
} else if (dir instanceof FSDirectory) {
return spins(((FSDirectory) dir).getDirectory());
} else {
return true;
}
}
/** Rough Linux-only heuristics to determine whether the provided
* {@code Path} is backed by spinning storage. For example, this
* returns false if the disk is a solid-state disk.
*
* @param path a location to check which must exist. the mount point will be determined from this location.
* @return false if the storage is non-rotational (e.g. an SSD), or true if it is spinning or could not be determined
* @throws IOException if {@code path} does not exist.
*
* @lucene.internal */
public static boolean spins(Path path) throws IOException {
// resolve symlinks (this will throw exception if the path does not exist)
path = path.toRealPath();
// Super cowboy approach, but seems to work!
if (!Constants.LINUX) {
return true; // no detection
}
try {
return spinsLinux(path);
} catch (Exception exc) {
// our crazy heuristics can easily trigger SecurityException, AIOOBE, etc ...
return true;
}
}
// following methods are package-private for testing ONLY
// note: requires a real or fake linux filesystem!
static boolean spinsLinux(Path path) throws IOException {
FileStore store = getFileStore(path);
// if fs type is tmpfs, it doesn't spin.
// this won't have a corresponding block device
if ("tmpfs".equals(store.type())) {
return false;
}
// get block device name
String devName = getBlockDevice(store);
// not a device (e.g. NFS server)
if (!devName.startsWith("/")) {
return true;
}
// resolve any symlinks to real block device (e.g. LVM)
// /dev/sda0 -> sda0
// /devices/XXX -> sda0
devName = path.getRoot().resolve(devName).toRealPath().getFileName().toString();
// now read:
Path sysinfo = path.getRoot().resolve("sys/block");
Path devinfo = sysinfo.resolve(devName);
// tear away partition numbers until we find it.
while (!Files.exists(devinfo)) {
if (!devName.isEmpty() && Character.isDigit(devName.charAt(devName.length()-1))) {
devName = devName.substring(0, devName.length()-1);
} else {
break; // give up
}
devinfo = sysinfo.resolve(devName);
}
// read first byte from rotational, its a 1 if it spins.
Path info = devinfo.resolve("queue/rotational");
try (InputStream stream = Files.newInputStream(info)) {
return stream.read() == '1';
}
}
// Files.getFileStore(Path) useless here!
// don't complain, just try it yourself
static FileStore getFileStore(Path path) throws IOException {
FileStore store = Files.getFileStore(path);
String mount = getMountPoint(store);
// find the "matching" FileStore from system list, its the one we want.
for (FileStore fs : path.getFileSystem().getFileStores()) {
if (mount.equals(getMountPoint(fs))) {
return fs;
}
}
// fall back to crappy one we got from Files.getFileStore
return store;
}
// these are hacks that are not guaranteed
static String getMountPoint(FileStore store) {
String desc = store.toString();
return desc.substring(0, desc.lastIndexOf('(') - 1);
}
// these are hacks that are not guaranteed
static String getBlockDevice(FileStore store) {
String desc = store.toString();
int start = desc.lastIndexOf('(');
int end = desc.indexOf(')', start);
return desc.substring(start+1, end);
}
}

View File

@ -394,6 +394,22 @@ public class TestConcurrentMergeScheduler extends LuceneTestCase {
d.close();
}
public void testInvalidMaxMergeCountAndThreads() throws Exception {
ConcurrentMergeScheduler cms = new ConcurrentMergeScheduler();
try {
cms.setMaxMergesAndThreads(ConcurrentMergeScheduler.AUTO_DETECT_MERGES_AND_THREADS, 3);
fail("did not hit exception");
} catch (IllegalArgumentException iae) {
// good
}
try {
cms.setMaxMergesAndThreads(3, ConcurrentMergeScheduler.AUTO_DETECT_MERGES_AND_THREADS);
fail("did not hit exception");
} catch (IllegalArgumentException iae) {
// good
}
}
public void testLiveMaxMergeCount() throws Exception {
Directory d = newDirectory();
IndexWriterConfig iwc = new IndexWriterConfig(new MockAnalyzer(random()));
@ -429,6 +445,9 @@ public class TestConcurrentMergeScheduler extends LuceneTestCase {
}
};
assertEquals(ConcurrentMergeScheduler.AUTO_DETECT_MERGES_AND_THREADS, cms.getMaxMergeCount());
assertEquals(ConcurrentMergeScheduler.AUTO_DETECT_MERGES_AND_THREADS, cms.getMaxThreadCount());
cms.setMaxMergesAndThreads(5, 3);
iwc.setMergeScheduler(cms);
@ -550,4 +569,75 @@ public class TestConcurrentMergeScheduler extends LuceneTestCase {
w.rollback();
dir.close();
}
public void testDynamicDefaults() throws Exception {
Directory dir = newDirectory();
IndexWriterConfig iwc = new IndexWriterConfig(new MockAnalyzer(random()));
ConcurrentMergeScheduler cms = new ConcurrentMergeScheduler();
assertEquals(ConcurrentMergeScheduler.AUTO_DETECT_MERGES_AND_THREADS, cms.getMaxMergeCount());
assertEquals(ConcurrentMergeScheduler.AUTO_DETECT_MERGES_AND_THREADS, cms.getMaxThreadCount());
iwc.setMergeScheduler(cms);
iwc.setMaxBufferedDocs(2);
LogMergePolicy lmp = newLogMergePolicy();
lmp.setMergeFactor(2);
iwc.setMergePolicy(lmp);
IndexWriter w = new IndexWriter(dir, iwc);
w.addDocument(new Document());
w.addDocument(new Document());
// flush
w.addDocument(new Document());
w.addDocument(new Document());
// flush + merge
// CMS should have now set true values:
assertTrue(cms.getMaxMergeCount() != ConcurrentMergeScheduler.AUTO_DETECT_MERGES_AND_THREADS);
assertTrue(cms.getMaxThreadCount() != ConcurrentMergeScheduler.AUTO_DETECT_MERGES_AND_THREADS);
w.close();
dir.close();
}
public void testResetToAutoDefault() throws Exception {
ConcurrentMergeScheduler cms = new ConcurrentMergeScheduler();
assertEquals(ConcurrentMergeScheduler.AUTO_DETECT_MERGES_AND_THREADS, cms.getMaxMergeCount());
assertEquals(ConcurrentMergeScheduler.AUTO_DETECT_MERGES_AND_THREADS, cms.getMaxThreadCount());
cms.setMaxMergesAndThreads(4, 3);
assertEquals(4, cms.getMaxMergeCount());
assertEquals(3, cms.getMaxThreadCount());
try {
cms.setMaxMergesAndThreads(ConcurrentMergeScheduler.AUTO_DETECT_MERGES_AND_THREADS, 4);
fail("did not hit exception");
} catch (IllegalArgumentException iae) {
// expected
}
try {
cms.setMaxMergesAndThreads(4, ConcurrentMergeScheduler.AUTO_DETECT_MERGES_AND_THREADS);
fail("did not hit exception");
} catch (IllegalArgumentException iae) {
// expected
}
cms.setMaxMergesAndThreads(ConcurrentMergeScheduler.AUTO_DETECT_MERGES_AND_THREADS, ConcurrentMergeScheduler.AUTO_DETECT_MERGES_AND_THREADS);
assertEquals(ConcurrentMergeScheduler.AUTO_DETECT_MERGES_AND_THREADS, cms.getMaxMergeCount());
assertEquals(ConcurrentMergeScheduler.AUTO_DETECT_MERGES_AND_THREADS, cms.getMaxThreadCount());
}
public void testSpinningDefaults() throws Exception {
ConcurrentMergeScheduler cms = new ConcurrentMergeScheduler();
cms.setDefaultMaxMergesAndThreads(true);
assertEquals(1, cms.getMaxThreadCount());
assertEquals(2, cms.getMaxMergeCount());
}
public void testNonSpinningDefaults() throws Exception {
ConcurrentMergeScheduler cms = new ConcurrentMergeScheduler();
cms.setDefaultMaxMergesAndThreads(false);
int threadCount = cms.getMaxThreadCount();
assertTrue(threadCount >= 1);
assertTrue(threadCount <= 3);
assertEquals(cms.getMaxMergeCount(), 2+threadCount);
}
}

View File

@ -17,8 +17,27 @@ package org.apache.lucene.util;
* limitations under the License.
*/
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.nio.charset.StandardCharsets;
import java.nio.file.AccessMode;
import java.nio.file.FileStore;
import java.nio.file.FileSystem;
import java.nio.file.Files;
import java.nio.file.LinkOption;
import java.nio.file.OpenOption;
import java.nio.file.Path;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Map;
import org.apache.lucene.mockfile.FilterFileStore;
import org.apache.lucene.mockfile.FilterFileSystem;
import org.apache.lucene.mockfile.FilterFileSystemProvider;
import org.apache.lucene.mockfile.FilterPath;
/** Simple test methods for IOUtils */
public class TestIOUtils extends LuceneTestCase {
@ -78,4 +97,281 @@ public class TestIOUtils extends LuceneTestCase {
// no exception
// actually deletes file2
}
public void testSpinsBasics() throws Exception {
Path dir = createTempDir();
// no exception, directory exists
IOUtils.spins(dir);
Path file = dir.resolve("exists");
Files.createFile(file);
// no exception, file exists
IOUtils.spins(file);
// exception: file doesn't exist
Path fake = dir.resolve("nonexistent");
try {
IOUtils.spins(fake);
fail();
} catch (IOException expected) {
// ok
}
}
// fake up a filestore to test some underlying methods
static class MockFileStore extends FilterFileStore {
final String description;
final String type;
MockFileStore(FileStore delegate, String description) {
this(delegate, description, "mockfs");
}
MockFileStore(FileStore delegate, String description, String type) {
super(delegate, "justafake://");
this.description = description;
this.type = type;
}
@Override
public String type() {
return type;
}
@Override
public String toString() {
return description;
}
}
public void testGetBlockDevice() throws Exception {
Path dir = createTempDir();
FileStore actual = Files.getFileStore(dir);
assertEquals("/dev/sda1", IOUtils.getBlockDevice(new MockFileStore(actual, "/ (/dev/sda1)")));
assertEquals("/dev/sda1", IOUtils.getBlockDevice(new MockFileStore(actual, "/test/ space(((trash)))/ (/dev/sda1)")));
assertEquals("notreal", IOUtils.getBlockDevice(new MockFileStore(actual, "/ (notreal)")));
}
public void testGetMountPoint() throws Exception {
Path dir = createTempDir();
FileStore actual = Files.getFileStore(dir);
assertEquals("/", IOUtils.getMountPoint(new MockFileStore(actual, "/ (/dev/sda1)")));
assertEquals("/test/ space(((trash)))/", IOUtils.getMountPoint(new MockFileStore(actual, "/test/ space(((trash)))/ (/dev/sda1)")));
assertEquals("/", IOUtils.getMountPoint(new MockFileStore(actual, "/ (notreal)")));
}
/** mock linux that takes mappings of test files, to their associated filesystems.
* it will chroot /dev and /sys requests to root, so you can mock those too.
* <p>
* It is hacky by definition, so don't try putting it around a complex chain or anything.
* Use FilterPath.unwrap
*/
static class MockLinuxFileSystemProvider extends FilterFileSystemProvider {
final Map<String,FileStore> filesToStore;
final Path root;
public MockLinuxFileSystemProvider(FileSystem delegateInstance, final Map<String,FileStore> filesToStore, Path root) {
super("mocklinux://", delegateInstance);
final Collection<FileStore> allStores = new HashSet<>(filesToStore.values());
this.fileSystem = new FilterFileSystem(this, delegateInstance) {
@Override
public Iterable<FileStore> getFileStores() {
return allStores;
}
@Override
public Path getPath(String first, String... more) {
return new MockLinuxPath(super.getPath(first, more), this);
}
};
this.filesToStore = filesToStore;
this.root = root;
}
@Override
public FileStore getFileStore(Path path) throws IOException {
FileStore ret = filesToStore.get(path.toString());
if (ret == null) {
throw new IllegalArgumentException("this mock doesnt know wtf to do with: " + path);
}
// act like the linux fs provider here, return a crazy rootfs one
if (ret.toString().startsWith(root + " (")) {
return new MockFileStore(ret, root + " (rootfs)", "rootfs");
}
return ret;
}
Path maybeChroot(Path path) {
if (path.toAbsolutePath().startsWith("/sys") || path.toAbsolutePath().startsWith("/dev")) {
// map to our chrooted location;
return path.getRoot().resolve(root).resolve(path.toString().substring(1));
} else {
return path;
}
}
@Override
public void checkAccess(Path path, AccessMode... modes) throws IOException {
// TODO: kinda screwed up how we do this, but its easy to get lost. just unravel completely.
delegate.checkAccess(FilterPath.unwrap(path), modes);
}
@Override
public InputStream newInputStream(Path path, OpenOption... options) throws IOException {
return super.newInputStream(maybeChroot(path), options);
}
class MockLinuxPath extends FilterPath {
MockLinuxPath(Path delegate, FileSystem fileSystem) {
super(delegate, fileSystem);
}
@Override
public Path toRealPath(LinkOption... options) throws IOException {
Path p = maybeChroot(this);
if (p == this) {
return super.toRealPath(options);
} else {
return p.toRealPath(options);
}
}
@Override
protected Path wrap(Path other) {
return new MockLinuxPath(other, fileSystem);
}
}
}
public void testGetFileStore() throws Exception {
Path dir = createTempDir();
dir = FilterPath.unwrap(dir).toRealPath();
// now we can create some fake mount points:
FileStore root = new MockFileStore(Files.getFileStore(dir), dir.toString() + " (/dev/sda1)");
FileStore usr = new MockFileStore(Files.getFileStore(dir), dir.resolve("usr").toString() + " (/dev/sda2)");
// associate some preset files to these
Map<String,FileStore> mappings = new HashMap<>();
mappings.put(dir.toString(), root);
mappings.put(dir.resolve("foo.txt").toString(), root);
mappings.put(dir.resolve("usr").toString(), usr);
mappings.put(dir.resolve("usr/bar.txt").toString(), usr);
FileSystem mockLinux = new MockLinuxFileSystemProvider(dir.getFileSystem(), mappings, dir).getFileSystem(null);
Path mockPath = mockLinux.getPath(dir.toString());
// sanity check our mock:
assertSame(usr, Files.getFileStore(mockPath.resolve("usr")));
assertSame(usr, Files.getFileStore(mockPath.resolve("usr/bar.txt")));
// for root filesystem we get a crappy one
assertNotSame(root, Files.getFileStore(mockPath));
assertNotSame(usr, Files.getFileStore(mockPath));
assertNotSame(root, Files.getFileStore(mockPath.resolve("foo.txt")));
assertNotSame(usr, Files.getFileStore(mockPath.resolve("foo.txt")));
// now test our method:
assertSame(usr, IOUtils.getFileStore(mockPath.resolve("usr")));
assertSame(usr, IOUtils.getFileStore(mockPath.resolve("usr/bar.txt")));
assertSame(root, IOUtils.getFileStore(mockPath));
assertSame(root, IOUtils.getFileStore(mockPath.resolve("foo.txt")));
}
public void testTmpfsDoesntSpin() throws Exception {
Path dir = createTempDir();
dir = FilterPath.unwrap(dir).toRealPath();
// fake tmpfs
FileStore root = new MockFileStore(Files.getFileStore(dir), dir.toString() + " (/dev/sda1)", "tmpfs");
Map<String,FileStore> mappings = Collections.singletonMap(dir.toString(), root);
FileSystem mockLinux = new MockLinuxFileSystemProvider(dir.getFileSystem(), mappings, dir).getFileSystem(null);
Path mockPath = mockLinux.getPath(dir.toString());
assertFalse(IOUtils.spinsLinux(mockPath));
}
public void testNfsSpins() throws Exception {
Path dir = createTempDir();
dir = FilterPath.unwrap(dir).toRealPath();
// fake nfs
FileStore root = new MockFileStore(Files.getFileStore(dir), dir.toString() + " (somenfsserver:/some/mount)", "nfs");
Map<String,FileStore> mappings = Collections.singletonMap(dir.toString(), root);
FileSystem mockLinux = new MockLinuxFileSystemProvider(dir.getFileSystem(), mappings, dir).getFileSystem(null);
Path mockPath = mockLinux.getPath(dir.toString());
assertTrue(IOUtils.spinsLinux(mockPath));
}
public void testSSD() throws Exception {
Path dir = createTempDir();
dir = FilterPath.unwrap(dir).toRealPath();
// fake ssd
FileStore root = new MockFileStore(Files.getFileStore(dir), dir.toString() + " (/dev/zzz1)");
// make a fake /dev/zzz1 for it
Path devdir = dir.resolve("dev");
Files.createDirectories(devdir);
Files.createFile(devdir.resolve("zzz1"));
// make a fake /sys/block/zzz/queue/rotational file for it
Path sysdir = dir.resolve("sys").resolve("block").resolve("zzz").resolve("queue");
Files.createDirectories(sysdir);
try (OutputStream o = Files.newOutputStream(sysdir.resolve("rotational"))) {
o.write("0\n".getBytes(StandardCharsets.US_ASCII));
}
Map<String,FileStore> mappings = Collections.singletonMap(dir.toString(), root);
FileSystem mockLinux = new MockLinuxFileSystemProvider(dir.getFileSystem(), mappings, dir).getFileSystem(null);
Path mockPath = mockLinux.getPath(dir.toString());
assertFalse(IOUtils.spinsLinux(mockPath));
}
public void testRotatingPlatters() throws Exception {
Path dir = createTempDir();
dir = FilterPath.unwrap(dir).toRealPath();
// fake ssd
FileStore root = new MockFileStore(Files.getFileStore(dir), dir.toString() + " (/dev/zzz1)");
// make a fake /dev/zzz1 for it
Path devdir = dir.resolve("dev");
Files.createDirectories(devdir);
Files.createFile(devdir.resolve("zzz1"));
// make a fake /sys/block/zzz/queue/rotational file for it
Path sysdir = dir.resolve("sys").resolve("block").resolve("zzz").resolve("queue");
Files.createDirectories(sysdir);
try (OutputStream o = Files.newOutputStream(sysdir.resolve("rotational"))) {
o.write("1\n".getBytes(StandardCharsets.US_ASCII));
}
Map<String,FileStore> mappings = Collections.singletonMap(dir.toString(), root);
FileSystem mockLinux = new MockLinuxFileSystemProvider(dir.getFileSystem(), mappings, dir).getFileSystem(null);
Path mockPath = mockLinux.getPath(dir.toString());
assertTrue(IOUtils.spinsLinux(mockPath));
}
public void testManyPartitions() throws Exception {
Path dir = createTempDir();
dir = FilterPath.unwrap(dir).toRealPath();
// fake ssd
FileStore root = new MockFileStore(Files.getFileStore(dir), dir.toString() + " (/dev/zzz12)");
// make a fake /dev/zzz11 for it
Path devdir = dir.resolve("dev");
Files.createDirectories(devdir);
Files.createFile(devdir.resolve("zzz12"));
// make a fake /sys/block/zzz/queue/rotational file for it
Path sysdir = dir.resolve("sys").resolve("block").resolve("zzz").resolve("queue");
Files.createDirectories(sysdir);
try (OutputStream o = Files.newOutputStream(sysdir.resolve("rotational"))) {
o.write("0\n".getBytes(StandardCharsets.US_ASCII));
}
Map<String,FileStore> mappings = Collections.singletonMap(dir.toString(), root);
FileSystem mockLinux = new MockLinuxFileSystemProvider(dir.getFileSystem(), mappings, dir).getFileSystem(null);
Path mockPath = mockLinux.getPath(dir.toString());
assertFalse(IOUtils.spinsLinux(mockPath));
}
}

View File

@ -59,12 +59,12 @@ public class FilterFileStore extends FileStore {
@Override
public String type() {
return scheme + "(" + delegate.type() + ")";
return delegate.type();
}
@Override
public String toString() {
return scheme + "(" + delegate.toString() + ")";
return delegate.toString();
}
@Override

View File

@ -59,7 +59,7 @@ public class FilterFileSystemProvider extends FileSystemProvider {
/**
* The underlying {@code FileSystem} instance.
*/
protected final FileSystem fileSystem;
protected FileSystem fileSystem;
/**
* The URI scheme for this provider.
*/
@ -79,6 +79,18 @@ public class FilterFileSystemProvider extends FileSystemProvider {
this.fileSystem = new FilterFileSystem(this, delegateInstance);
}
/**
* Construct a {@code FilterFileSystemProvider} indicated by
* the specified {@code scheme} and wrapping functionality of the
* provider. You must set the singleton {@code filesystem} yourself.
* @param scheme URI scheme
* @param delegate specified base provider.
*/
public FilterFileSystemProvider(String scheme, FileSystemProvider delegate) {
this.scheme = Objects.requireNonNull(scheme);
this.delegate = Objects.requireNonNull(delegate);
}
@Override
public String getScheme() {
return scheme;
@ -86,21 +98,33 @@ public class FilterFileSystemProvider extends FileSystemProvider {
@Override
public FileSystem newFileSystem(URI uri, Map<String,?> env) throws IOException {
if (fileSystem == null) {
throw new IllegalStateException("subclass did not initialize singleton filesystem");
}
return fileSystem;
}
@Override
public FileSystem newFileSystem(Path path, Map<String,?> env) throws IOException {
if (fileSystem == null) {
throw new IllegalStateException("subclass did not initialize singleton filesystem");
}
return fileSystem;
}
@Override
public FileSystem getFileSystem(URI uri) {
if (fileSystem == null) {
throw new IllegalStateException("subclass did not initialize singleton filesystem");
}
return fileSystem;
}
@Override
public Path getPath(URI uri) {
if (fileSystem == null) {
throw new IllegalStateException("subclass did not initialize singleton filesystem");
}
Path path = delegate.getPath(toDelegate(uri));
return new FilterPath(path, fileSystem);
}

View File

@ -82,7 +82,7 @@ public class FilterPath implements Path {
if (root == null) {
return null;
}
return new FilterPath(root, fileSystem);
return wrap(root);
}
@Override
@ -91,7 +91,7 @@ public class FilterPath implements Path {
if (fileName == null) {
return null;
}
return new FilterPath(fileName, fileSystem);
return wrap(fileName);
}
@Override
@ -100,7 +100,7 @@ public class FilterPath implements Path {
if (parent == null) {
return null;
}
return new FilterPath(parent, fileSystem);
return wrap(parent);
}
@Override
@ -110,12 +110,12 @@ public class FilterPath implements Path {
@Override
public Path getName(int index) {
return new FilterPath(delegate.getName(index), fileSystem);
return wrap(delegate.getName(index));
}
@Override
public Path subpath(int beginIndex, int endIndex) {
return new FilterPath(delegate.subpath(beginIndex, endIndex), fileSystem);
return wrap(delegate.subpath(beginIndex, endIndex));
}
@Override
@ -148,7 +148,7 @@ public class FilterPath implements Path {
@Override
public Path normalize() {
return new FilterPath(delegate.normalize(), fileSystem);
return wrap(delegate.normalize());
}
@Override
@ -156,12 +156,12 @@ public class FilterPath implements Path {
if (other instanceof FilterPath) {
other = ((FilterPath)other).delegate;
}
return new FilterPath(delegate.resolve(other), fileSystem);
return wrap(delegate.resolve(other));
}
@Override
public Path resolve(String other) {
return new FilterPath(delegate.resolve(other), fileSystem);
return wrap(delegate.resolve(other));
}
@Override
@ -169,12 +169,12 @@ public class FilterPath implements Path {
if (other instanceof FilterPath) {
other = ((FilterPath)other).delegate;
}
return new FilterPath(delegate.resolveSibling(other), fileSystem);
return wrap(delegate.resolveSibling(other));
}
@Override
public Path resolveSibling(String other) {
return new FilterPath(delegate.resolveSibling(other), fileSystem);
return wrap(delegate.resolveSibling(other));
}
@Override
@ -182,7 +182,7 @@ public class FilterPath implements Path {
if (other instanceof FilterPath) {
other = ((FilterPath)other).delegate;
}
return new FilterPath(delegate.relativize(other), fileSystem);
return wrap(delegate.relativize(other));
}
// TODO: should these methods not expose delegate result directly?
@ -200,12 +200,12 @@ public class FilterPath implements Path {
@Override
public Path toAbsolutePath() {
return new FilterPath(delegate.toAbsolutePath(), fileSystem);
return wrap(delegate.toAbsolutePath());
}
@Override
public Path toRealPath(LinkOption... options) throws IOException {
return new FilterPath(delegate.toRealPath(options), fileSystem);
return wrap(delegate.toRealPath(options));
}
@Override
@ -235,7 +235,7 @@ public class FilterPath implements Path {
@Override
public Path next() {
return new FilterPath(iterator.next(), fileSystem);
return wrap(iterator.next());
}
@Override
@ -267,4 +267,10 @@ public class FilterPath implements Path {
}
return path;
}
/** Override this to customize the return wrapped
* path from various operations */
protected Path wrap(Path other) {
return new FilterPath(other, fileSystem);
}
}

View File

@ -57,8 +57,8 @@ import java.util.logging.Logger;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.MockAnalyzer;
import org.apache.lucene.document.Field.Store;
import org.apache.lucene.document.Field;
import org.apache.lucene.document.Field.Store;
import org.apache.lucene.document.FieldType;
import org.apache.lucene.document.StringField;
import org.apache.lucene.document.TextField;
@ -77,8 +77,8 @@ import org.apache.lucene.index.FieldInfo;
import org.apache.lucene.index.FieldInfos;
import org.apache.lucene.index.Fields;
import org.apache.lucene.index.IndexOptions;
import org.apache.lucene.index.IndexReader.ReaderClosedListener;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexReader.ReaderClosedListener;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.IndexWriterConfig;
import org.apache.lucene.index.LeafReader;
@ -105,8 +105,8 @@ import org.apache.lucene.index.SortedSetDocValues;
import org.apache.lucene.index.StorableField;
import org.apache.lucene.index.StoredDocument;
import org.apache.lucene.index.Terms;
import org.apache.lucene.index.TermsEnum.SeekStatus;
import org.apache.lucene.index.TermsEnum;
import org.apache.lucene.index.TermsEnum.SeekStatus;
import org.apache.lucene.index.TieredMergePolicy;
import org.apache.lucene.search.AssertingIndexSearcher;
import org.apache.lucene.search.DocIdSet;
@ -120,12 +120,12 @@ import org.apache.lucene.store.Directory;
import org.apache.lucene.store.FSDirectory;
import org.apache.lucene.store.FSLockFactory;
import org.apache.lucene.store.FlushInfo;
import org.apache.lucene.store.IOContext.Context;
import org.apache.lucene.store.IOContext;
import org.apache.lucene.store.IOContext.Context;
import org.apache.lucene.store.LockFactory;
import org.apache.lucene.store.MergeInfo;
import org.apache.lucene.store.MockDirectoryWrapper.Throttling;
import org.apache.lucene.store.MockDirectoryWrapper;
import org.apache.lucene.store.MockDirectoryWrapper.Throttling;
import org.apache.lucene.store.NRTCachingDirectory;
import org.apache.lucene.store.RateLimitedDirectoryWrapper;
import org.apache.lucene.util.automaton.AutomatonTestUtil;
@ -153,16 +153,16 @@ import com.carrotsearch.randomizedtesting.annotations.Listeners;
import com.carrotsearch.randomizedtesting.annotations.SeedDecorators;
import com.carrotsearch.randomizedtesting.annotations.TestGroup;
import com.carrotsearch.randomizedtesting.annotations.TestMethodProviders;
import com.carrotsearch.randomizedtesting.annotations.ThreadLeakAction.Action;
import com.carrotsearch.randomizedtesting.annotations.ThreadLeakAction;
import com.carrotsearch.randomizedtesting.annotations.ThreadLeakAction.Action;
import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters;
import com.carrotsearch.randomizedtesting.annotations.ThreadLeakGroup.Group;
import com.carrotsearch.randomizedtesting.annotations.ThreadLeakGroup;
import com.carrotsearch.randomizedtesting.annotations.ThreadLeakGroup.Group;
import com.carrotsearch.randomizedtesting.annotations.ThreadLeakLingering;
import com.carrotsearch.randomizedtesting.annotations.ThreadLeakScope.Scope;
import com.carrotsearch.randomizedtesting.annotations.ThreadLeakScope;
import com.carrotsearch.randomizedtesting.annotations.ThreadLeakZombies.Consequence;
import com.carrotsearch.randomizedtesting.annotations.ThreadLeakScope.Scope;
import com.carrotsearch.randomizedtesting.annotations.ThreadLeakZombies;
import com.carrotsearch.randomizedtesting.annotations.ThreadLeakZombies.Consequence;
import com.carrotsearch.randomizedtesting.annotations.TimeoutSuite;
import com.carrotsearch.randomizedtesting.generators.RandomPicks;
import com.carrotsearch.randomizedtesting.rules.NoClassHooksShadowingRule;
@ -906,8 +906,6 @@ public abstract class LuceneTestCase extends Assert {
if (r.nextBoolean()) {
c.setMergeScheduler(new SerialMergeScheduler());
} else if (rarely(r)) {
int maxThreadCount = TestUtil.nextInt(r, 1, 4);
int maxMergeCount = TestUtil.nextInt(r, maxThreadCount, maxThreadCount + 4);
ConcurrentMergeScheduler cms;
if (r.nextBoolean()) {
cms = new ConcurrentMergeScheduler();
@ -918,6 +916,8 @@ public abstract class LuceneTestCase extends Assert {
}
};
}
int maxThreadCount = TestUtil.nextInt(r, 1, 4);
int maxMergeCount = TestUtil.nextInt(r, maxThreadCount, maxThreadCount + 4);
cms.setMaxMergesAndThreads(maxMergeCount, maxThreadCount);
c.setMergeScheduler(cms);
}