mirror of https://github.com/apache/lucene.git
LUCENE-5953: Restructure Directory and LockFactory APIs
git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1637665 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
f7e6d2f177
commit
c429f437f0
|
@ -174,6 +174,13 @@ API Changes
|
|||
* LUCENE-6021: FixedBitSet.nextSetBit now returns DocIdSetIterator.NO_MORE_DOCS
|
||||
instead of -1 when there are no more bits which are set. (Adrien Grand)
|
||||
|
||||
* LUCENE-5953: Directory and LockFactory APIs were restructured: Locking is
|
||||
now under the responsibility of the Directory implementation. LockFactory is
|
||||
only used by subclasses of BaseDirectory to delegate locking to an impl
|
||||
class. LockFactories are now singletons and are responsible to create a Lock
|
||||
instance based on a Directory implementation passed to the factory method.
|
||||
See MIGRATE.txt for more details. (Uwe Schindler, Robert Muir)
|
||||
|
||||
Bug Fixes
|
||||
|
||||
* LUCENE-5650: Enforce read-only access to any path outside the temporary
|
||||
|
|
|
@ -32,7 +32,6 @@ import org.apache.lucene.index.CorruptIndexException;
|
|||
import org.apache.lucene.index.MergeState.CheckAbort;
|
||||
import org.apache.lucene.index.IndexFileNames;
|
||||
import org.apache.lucene.index.SegmentInfo;
|
||||
import org.apache.lucene.store.BaseDirectory;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.store.IOContext;
|
||||
import org.apache.lucene.store.IndexInput;
|
||||
|
@ -103,7 +102,7 @@ public class SimpleTextCompoundFormat extends CompoundFormat {
|
|||
endOffsets[i] = Long.parseLong(stripPrefix(scratch, TABLEEND));
|
||||
}
|
||||
|
||||
return new BaseDirectory() {
|
||||
return new Directory() {
|
||||
|
||||
private int getIndex(String name) throws IOException {
|
||||
int index = Arrays.binarySearch(fileNames, name);
|
||||
|
@ -135,7 +134,6 @@ public class SimpleTextCompoundFormat extends CompoundFormat {
|
|||
|
||||
@Override
|
||||
public void close() throws IOException {
|
||||
isOpen = false;
|
||||
in.close();
|
||||
}
|
||||
|
||||
|
@ -155,9 +153,6 @@ public class SimpleTextCompoundFormat extends CompoundFormat {
|
|||
|
||||
@Override
|
||||
public Lock makeLock(String name) { throw new UnsupportedOperationException(); }
|
||||
|
||||
@Override
|
||||
public void clearLock(String name) { throw new UnsupportedOperationException(); }
|
||||
};
|
||||
}
|
||||
|
||||
|
|
|
@ -21,7 +21,6 @@ import org.apache.lucene.codecs.CodecUtil;
|
|||
import org.apache.lucene.index.CorruptIndexException;
|
||||
import org.apache.lucene.index.IndexFileNames;
|
||||
import org.apache.lucene.index.SegmentInfo;
|
||||
import org.apache.lucene.store.BaseDirectory;
|
||||
import org.apache.lucene.store.ChecksumIndexInput;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.store.IOContext;
|
||||
|
@ -43,7 +42,7 @@ import java.io.IOException;
|
|||
* Directory methods that would normally modify data throw an exception.
|
||||
* @lucene.experimental
|
||||
*/
|
||||
final class Lucene50CompoundReader extends BaseDirectory {
|
||||
final class Lucene50CompoundReader extends Directory {
|
||||
|
||||
/** Offset/Length for a slice inside of a compound file */
|
||||
public static final class FileEntry {
|
||||
|
@ -84,7 +83,6 @@ final class Lucene50CompoundReader extends BaseDirectory {
|
|||
IOUtils.closeWhileHandlingException(handle);
|
||||
}
|
||||
}
|
||||
this.isOpen = true;
|
||||
}
|
||||
|
||||
/** Helper method that reads CFS entries from an input stream */
|
||||
|
@ -119,7 +117,6 @@ final class Lucene50CompoundReader extends BaseDirectory {
|
|||
|
||||
@Override
|
||||
public void close() throws IOException {
|
||||
isOpen = false;
|
||||
IOUtils.close(handle);
|
||||
}
|
||||
|
||||
|
@ -186,11 +183,6 @@ final class Lucene50CompoundReader extends BaseDirectory {
|
|||
throw new UnsupportedOperationException();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void clearLock(String name) throws IOException {
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "CompoundFileDirectory(segment=\"" + segmentName + "\" in dir=" + directory + ")";
|
||||
|
|
|
@ -17,10 +17,9 @@ package org.apache.lucene.store;
|
|||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
/**
|
||||
* Base implementation for a concrete {@link Directory}.
|
||||
* Base implementation for a concrete {@link Directory} that uses a {@link LockFactory} for locking.
|
||||
* @lucene.experimental
|
||||
*/
|
||||
public abstract class BaseDirectory extends Directory {
|
||||
|
@ -29,35 +28,20 @@ public abstract class BaseDirectory extends Directory {
|
|||
|
||||
/** Holds the LockFactory instance (implements locking for
|
||||
* this Directory instance). */
|
||||
protected LockFactory lockFactory;
|
||||
protected final LockFactory lockFactory;
|
||||
|
||||
/** Sole constructor. */
|
||||
protected BaseDirectory() {
|
||||
protected BaseDirectory(LockFactory lockFactory) {
|
||||
super();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Lock makeLock(String name) {
|
||||
return lockFactory.makeLock(name);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void clearLock(String name) throws IOException {
|
||||
if (lockFactory != null) {
|
||||
lockFactory.clearLock(name);
|
||||
if (lockFactory == null) {
|
||||
throw new NullPointerException("LockFactory cannot be null, use an explicit instance!");
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setLockFactory(LockFactory lockFactory) throws IOException {
|
||||
assert lockFactory != null;
|
||||
this.lockFactory = lockFactory;
|
||||
lockFactory.setLockPrefix(this.getLockID());
|
||||
}
|
||||
|
||||
@Override
|
||||
public LockFactory getLockFactory() {
|
||||
return this.lockFactory;
|
||||
public final Lock makeLock(String name) {
|
||||
return lockFactory.makeLock(this, name);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -66,4 +50,9 @@ public abstract class BaseDirectory extends Directory {
|
|||
throw new AlreadyClosedException("this Directory is closed");
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return super.toString() + " lockFactory=" + lockFactory;
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -37,8 +37,7 @@ import org.apache.lucene.util.IOUtils;
|
|||
* </ul>
|
||||
*
|
||||
* Directory locking is implemented by an instance of {@link
|
||||
* LockFactory}, and can be changed for each Directory
|
||||
* instance using {@link #setLockFactory}.
|
||||
* LockFactory}.
|
||||
*
|
||||
*/
|
||||
public abstract class Directory implements Closeable {
|
||||
|
@ -116,53 +115,14 @@ public abstract class Directory implements Closeable {
|
|||
*/
|
||||
public abstract Lock makeLock(String name);
|
||||
|
||||
/**
|
||||
* Attempt to clear (forcefully unlock and remove) the
|
||||
* specified lock. Only call this at a time when you are
|
||||
* certain this lock is no longer in use.
|
||||
* @param name name of the lock to be cleared.
|
||||
*/
|
||||
public abstract void clearLock(String name) throws IOException;
|
||||
|
||||
/** Closes the store. */
|
||||
@Override
|
||||
public abstract void close()
|
||||
throws IOException;
|
||||
|
||||
/**
|
||||
* Set the LockFactory that this Directory instance should
|
||||
* use for its locking implementation. Each * instance of
|
||||
* LockFactory should only be used for one directory (ie,
|
||||
* do not share a single instance across multiple
|
||||
* Directories).
|
||||
*
|
||||
* @param lockFactory instance of {@link LockFactory}.
|
||||
*/
|
||||
public abstract void setLockFactory(LockFactory lockFactory) throws IOException;
|
||||
|
||||
/**
|
||||
* Get the LockFactory that this Directory instance is
|
||||
* using for its locking implementation. Note that this
|
||||
* may be null for Directory implementations that provide
|
||||
* their own locking implementation.
|
||||
*/
|
||||
public abstract LockFactory getLockFactory();
|
||||
|
||||
/**
|
||||
* Return a string identifier that uniquely differentiates
|
||||
* this Directory instance from other Directory instances.
|
||||
* This ID should be the same if two Directory instances
|
||||
* (even in different JVMs and/or on different machines)
|
||||
* are considered "the same index". This is how locking
|
||||
* "scopes" to the right index.
|
||||
*/
|
||||
public String getLockID() {
|
||||
return this.toString();
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return getClass().getSimpleName() + '@' + Integer.toHexString(hashCode()) + " lockFactory=" + getLockFactory();
|
||||
return getClass().getSimpleName() + '@' + Integer.toHexString(hashCode());
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -125,15 +125,9 @@ public abstract class FSDirectory extends BaseDirectory {
|
|||
* @throws IOException if there is a low-level I/O error
|
||||
*/
|
||||
protected FSDirectory(Path path, LockFactory lockFactory) throws IOException {
|
||||
// new ctors use always NativeFSLockFactory as default:
|
||||
if (lockFactory == null) {
|
||||
lockFactory = new NativeFSLockFactory();
|
||||
}
|
||||
|
||||
super(lockFactory);
|
||||
Files.createDirectories(path); // create directory, if it doesnt exist
|
||||
directory = path.toRealPath();
|
||||
|
||||
setLockFactory(lockFactory);
|
||||
}
|
||||
|
||||
/** Creates an FSDirectory instance, trying to pick the
|
||||
|
@ -157,7 +151,7 @@ public abstract class FSDirectory extends BaseDirectory {
|
|||
*
|
||||
* <p>See <a href="#subclasses">above</a> */
|
||||
public static FSDirectory open(Path path) throws IOException {
|
||||
return open(path, null);
|
||||
return open(path, FSLockFactory.getDefault());
|
||||
}
|
||||
|
||||
/** Just like {@link #open(Path)}, but allows you to
|
||||
|
@ -172,26 +166,6 @@ public abstract class FSDirectory extends BaseDirectory {
|
|||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setLockFactory(LockFactory lockFactory) throws IOException {
|
||||
super.setLockFactory(lockFactory);
|
||||
|
||||
// for filesystem based LockFactory, delete the lockPrefix, if the locks are placed
|
||||
// in index dir. If no index dir is given, set ourselves
|
||||
if (lockFactory instanceof FSLockFactory) {
|
||||
final FSLockFactory lf = (FSLockFactory) lockFactory;
|
||||
final Path dir = lf.getLockDir();
|
||||
// if the lock factory has no lockDir set, use the this directory as lockDir
|
||||
if (dir == null) {
|
||||
lf.setLockDir(directory);
|
||||
lf.setLockPrefix(null);
|
||||
} else if (dir.toRealPath().equals(directory)) {
|
||||
lf.setLockPrefix(null);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
/** Lists all files (not subdirectories) in the
|
||||
* directory.
|
||||
*
|
||||
|
@ -280,19 +254,6 @@ public abstract class FSDirectory extends BaseDirectory {
|
|||
IOUtils.fsync(directory, true);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getLockID() {
|
||||
ensureOpen();
|
||||
String dirName = directory.toString(); // name to be hashed
|
||||
|
||||
int digest = 0;
|
||||
for(int charIDX=0;charIDX<dirName.length();charIDX++) {
|
||||
final char ch = dirName.charAt(charIDX);
|
||||
digest = 31 * digest + ch;
|
||||
}
|
||||
return "lucene-" + Integer.toHexString(digest);
|
||||
}
|
||||
|
||||
/** Closes the store to future operations. */
|
||||
@Override
|
||||
public synchronized void close() {
|
||||
|
@ -308,7 +269,7 @@ public abstract class FSDirectory extends BaseDirectory {
|
|||
/** For debug output. */
|
||||
@Override
|
||||
public String toString() {
|
||||
return this.getClass().getSimpleName() + "@" + directory + " lockFactory=" + getLockFactory();
|
||||
return this.getClass().getSimpleName() + "@" + directory + " lockFactory=" + lockFactory;
|
||||
}
|
||||
|
||||
final class FSIndexOutput extends OutputStreamIndexOutput {
|
||||
|
|
|
@ -17,47 +17,29 @@ package org.apache.lucene.store;
|
|||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import java.io.IOException;
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.Path;
|
||||
|
||||
/**
|
||||
* Base class for file system based locking implementation.
|
||||
* This class is explicitly checking that the passed {@link Directory}
|
||||
* is an {@link FSDirectory}.
|
||||
*/
|
||||
|
||||
public abstract class FSLockFactory extends LockFactory {
|
||||
|
||||
/**
|
||||
* Directory for the lock files.
|
||||
*/
|
||||
protected Path lockDir = null;
|
||||
|
||||
/**
|
||||
* Set the lock directory. This method can be only called
|
||||
* once to initialize the lock directory. It is used by {@link FSDirectory}
|
||||
* to set the lock directory to itself.
|
||||
* Subclasses can also use this method to set the directory
|
||||
* in the constructor.
|
||||
*/
|
||||
protected final void setLockDir(Path lockDir) throws IOException {
|
||||
if (this.lockDir != null)
|
||||
throw new IllegalStateException("You can set the lock directory for this factory only once.");
|
||||
if (lockDir != null) {
|
||||
Files.createDirectories(lockDir);
|
||||
}
|
||||
this.lockDir = lockDir;
|
||||
}
|
||||
|
||||
/**
|
||||
* Retrieve the lock directory.
|
||||
/** Returns the default locking implementation for this platform.
|
||||
* This method currently returns always {@link NativeFSLockFactory}.
|
||||
*/
|
||||
public Path getLockDir() {
|
||||
return lockDir;
|
||||
public static final FSLockFactory getDefault() {
|
||||
return NativeFSLockFactory.INSTANCE;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return this.getClass().getSimpleName() + "@" + lockDir;
|
||||
public final Lock makeLock(Directory dir, String lockName) {
|
||||
if (!(dir instanceof FSDirectory)) {
|
||||
throw new UnsupportedOperationException(getClass().getSimpleName() + " can only be used with FSDirectory subclasses, got: " + dir);
|
||||
}
|
||||
return makeFSLock((FSDirectory) dir, lockName);
|
||||
}
|
||||
|
||||
/** Implement this method to create a lock for a FSDirectory instance. */
|
||||
protected abstract Lock makeFSLock(FSDirectory dir, String lockName);
|
||||
|
||||
}
|
||||
|
|
|
@ -20,13 +20,14 @@ package org.apache.lucene.store;
|
|||
import java.io.IOException;
|
||||
import java.nio.file.AtomicMoveNotSupportedException;
|
||||
import java.nio.file.NoSuchFileException;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collection;
|
||||
import java.util.List;
|
||||
import java.util.Set;
|
||||
import java.util.HashSet;
|
||||
|
||||
import org.apache.lucene.util.IOUtils;
|
||||
|
||||
|
||||
/**
|
||||
* Expert: A Directory instance that switches files between
|
||||
|
@ -38,10 +39,15 @@ import java.util.HashSet;
|
|||
* to this class, and must allow multiple threads to call
|
||||
* contains at once.</p>
|
||||
*
|
||||
* <p>Locks with a name having the specified extensions are
|
||||
* delegated to the primary directory; others are delegated
|
||||
* to the secondary directory. Ideally, both Directory
|
||||
* instances should use the same lock factory.</p>
|
||||
*
|
||||
* @lucene.experimental
|
||||
*/
|
||||
|
||||
public class FileSwitchDirectory extends BaseDirectory {
|
||||
public class FileSwitchDirectory extends Directory {
|
||||
private final Directory secondaryDir;
|
||||
private final Directory primaryDir;
|
||||
private final Set<String> primaryExtensions;
|
||||
|
@ -52,7 +58,6 @@ public class FileSwitchDirectory extends BaseDirectory {
|
|||
this.primaryDir = primaryDir;
|
||||
this.secondaryDir = secondaryDir;
|
||||
this.doClose = doClose;
|
||||
this.lockFactory = primaryDir.getLockFactory();
|
||||
}
|
||||
|
||||
/** Return the primary directory */
|
||||
|
@ -65,14 +70,15 @@ public class FileSwitchDirectory extends BaseDirectory {
|
|||
return secondaryDir;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Lock makeLock(String name) {
|
||||
return getDirectory(name).makeLock(name);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() throws IOException {
|
||||
if (doClose) {
|
||||
try {
|
||||
secondaryDir.close();
|
||||
} finally {
|
||||
primaryDir.close();
|
||||
}
|
||||
IOUtils.close(primaryDir, secondaryDir);
|
||||
doClose = false;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -85,31 +85,11 @@ public class FilterDirectory extends Directory {
|
|||
return in.makeLock(name);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void clearLock(String name) throws IOException {
|
||||
in.clearLock(name);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() throws IOException {
|
||||
in.close();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setLockFactory(LockFactory lockFactory) throws IOException {
|
||||
in.setLockFactory(lockFactory);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getLockID() {
|
||||
return in.getLockID();
|
||||
}
|
||||
|
||||
@Override
|
||||
public LockFactory getLockFactory() {
|
||||
return in.getLockFactory();
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return getClass().getSimpleName() + "(" + in.toString() + ")";
|
||||
|
|
|
@ -17,7 +17,6 @@ package org.apache.lucene.store;
|
|||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
/**
|
||||
* <p>Base class for Locking implementation. {@link Directory} uses
|
||||
|
@ -46,40 +45,10 @@ import java.io.IOException;
|
|||
|
||||
public abstract class LockFactory {
|
||||
|
||||
protected String lockPrefix = null;
|
||||
|
||||
/**
|
||||
* Set the prefix in use for all locks created in this
|
||||
* LockFactory. This is normally called once, when a
|
||||
* Directory gets this LockFactory instance. However, you
|
||||
* can also call this (after this instance is assigned to
|
||||
* a Directory) to override the prefix in use. This
|
||||
* is helpful if you're running Lucene on machines that
|
||||
* have different mount points for the same shared
|
||||
* directory.
|
||||
*/
|
||||
public void setLockPrefix(String lockPrefix) {
|
||||
this.lockPrefix = lockPrefix;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the prefix in use for all locks created in this LockFactory.
|
||||
*/
|
||||
public String getLockPrefix() {
|
||||
return this.lockPrefix;
|
||||
}
|
||||
|
||||
/**
|
||||
* Return a new Lock instance identified by lockName.
|
||||
* @param lockName name of the lock to be created.
|
||||
*/
|
||||
public abstract Lock makeLock(String lockName);
|
||||
public abstract Lock makeLock(Directory dir, String lockName);
|
||||
|
||||
/**
|
||||
* Attempt to clear (forcefully unlock and remove) the
|
||||
* specified lock. Only call this at a time when you are
|
||||
* certain this lock is no longer in use.
|
||||
* @param lockName name of the lock to be cleared.
|
||||
*/
|
||||
abstract public void clearLock(String lockName) throws IOException;
|
||||
}
|
||||
|
|
|
@ -45,8 +45,8 @@ public class LockStressTest {
|
|||
" myID = int from 0 .. 255 (should be unique for test process)\n" +
|
||||
" verifierHost = hostname that LockVerifyServer is listening on\n" +
|
||||
" verifierPort = port that LockVerifyServer is listening on\n" +
|
||||
" lockFactoryClassName = primary LockFactory class that we will use\n" +
|
||||
" lockDirName = path to the lock directory (only set for Simple/NativeFSLockFactory\n" +
|
||||
" lockFactoryClassName = primary FSLockFactory class that we will use\n" +
|
||||
" lockDirName = path to the lock directory\n" +
|
||||
" sleepTimeMS = milliseconds to pause betweeen each lock obtain/release\n" +
|
||||
" count = number of locking tries\n" +
|
||||
"\n" +
|
||||
|
@ -69,11 +69,13 @@ public class LockStressTest {
|
|||
final String verifierHost = args[arg++];
|
||||
final int verifierPort = Integer.parseInt(args[arg++]);
|
||||
final String lockFactoryClassName = args[arg++];
|
||||
final String lockDirName = args[arg++];
|
||||
final Path lockDirPath = Paths.get(args[arg++]);
|
||||
final int sleepTimeMS = Integer.parseInt(args[arg++]);
|
||||
final int count = Integer.parseInt(args[arg++]);
|
||||
|
||||
final LockFactory lockFactory = getNewLockFactory(lockFactoryClassName, lockDirName);
|
||||
final LockFactory lockFactory = getNewLockFactory(lockFactoryClassName);
|
||||
// we test the lock factory directly, so we don't need it on the directory itsself (the directory is just for testing)
|
||||
final FSDirectory lockDir = new SimpleFSDirectory(lockDirPath, NoLockFactory.INSTANCE);
|
||||
final InetSocketAddress addr = new InetSocketAddress(verifierHost, verifierPort);
|
||||
System.out.println("Connecting to server " + addr +
|
||||
" and registering as client " + myID + "...");
|
||||
|
@ -86,7 +88,7 @@ public class LockStressTest {
|
|||
out.write(myID);
|
||||
out.flush();
|
||||
LockFactory verifyLF = new VerifyingLockFactory(lockFactory, in, out);
|
||||
Lock l = verifyLF.makeLock("test.lock");
|
||||
Lock l = verifyLF.makeLock(lockDir, "test.lock");
|
||||
final Random rnd = new Random();
|
||||
|
||||
// wait for starting gun
|
||||
|
@ -103,9 +105,9 @@ public class LockStressTest {
|
|||
if (obtained) {
|
||||
if (rnd.nextInt(10) == 0) {
|
||||
if (rnd.nextBoolean()) {
|
||||
verifyLF = new VerifyingLockFactory(getNewLockFactory(lockFactoryClassName, lockDirName), in, out);
|
||||
verifyLF = new VerifyingLockFactory(getNewLockFactory(lockFactoryClassName), in, out);
|
||||
}
|
||||
final Lock secondLock = verifyLF.makeLock("test.lock");
|
||||
final Lock secondLock = verifyLF.makeLock(lockDir, "test.lock");
|
||||
if (secondLock.obtain()) {
|
||||
throw new IOException("Double Obtain");
|
||||
}
|
||||
|
@ -126,20 +128,21 @@ public class LockStressTest {
|
|||
}
|
||||
|
||||
|
||||
private static LockFactory getNewLockFactory(String lockFactoryClassName, String lockDirName) throws IOException {
|
||||
LockFactory lockFactory;
|
||||
private static FSLockFactory getNewLockFactory(String lockFactoryClassName) throws IOException {
|
||||
// try to get static INSTANCE field of class
|
||||
try {
|
||||
lockFactory = Class.forName(lockFactoryClassName).asSubclass(LockFactory.class).newInstance();
|
||||
return (FSLockFactory) Class.forName(lockFactoryClassName).getField("INSTANCE").get(null);
|
||||
} catch (ReflectiveOperationException e) {
|
||||
// fall-through
|
||||
}
|
||||
|
||||
// try to create a new instance
|
||||
try {
|
||||
return Class.forName(lockFactoryClassName).asSubclass(FSLockFactory.class).newInstance();
|
||||
} catch (IllegalAccessException | InstantiationException | ClassCastException | ClassNotFoundException e) {
|
||||
throw new IOException("Cannot instantiate lock factory " + lockFactoryClassName);
|
||||
// fall-through
|
||||
}
|
||||
|
||||
Path lockDir = Paths.get(lockDirName);
|
||||
|
||||
if (lockFactory instanceof FSLockFactory) {
|
||||
((FSLockFactory) lockFactory).setLockDir(lockDir);
|
||||
}
|
||||
lockFactory.setLockPrefix("test");
|
||||
return lockFactory;
|
||||
throw new IOException("Cannot get lock factory singleton of " + lockFactoryClassName);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -24,7 +24,6 @@ import java.nio.channels.FileChannel;
|
|||
import java.nio.channels.FileChannel.MapMode;
|
||||
import java.nio.file.Path;
|
||||
import java.nio.file.StandardOpenOption;
|
||||
|
||||
import java.security.AccessController;
|
||||
import java.security.PrivilegedExceptionAction;
|
||||
import java.security.PrivilegedActionException;
|
||||
|
@ -85,27 +84,37 @@ public class MMapDirectory extends FSDirectory {
|
|||
* Default max chunk size.
|
||||
* @see #MMapDirectory(Path, LockFactory, int)
|
||||
*/
|
||||
public static final int DEFAULT_MAX_BUFF = Constants.JRE_IS_64BIT ? (1 << 30) : (1 << 28);
|
||||
public static final int DEFAULT_MAX_CHUNK_SIZE = Constants.JRE_IS_64BIT ? (1 << 30) : (1 << 28);
|
||||
final int chunkSizePower;
|
||||
|
||||
/** Create a new MMapDirectory for the named location.
|
||||
*
|
||||
* @param path the path of the directory
|
||||
* @param lockFactory the lock factory to use, or null for the default
|
||||
* ({@link NativeFSLockFactory});
|
||||
* @param lockFactory the lock factory to use
|
||||
* @throws IOException if there is a low-level I/O error
|
||||
*/
|
||||
public MMapDirectory(Path path, LockFactory lockFactory) throws IOException {
|
||||
this(path, lockFactory, DEFAULT_MAX_BUFF);
|
||||
this(path, lockFactory, DEFAULT_MAX_CHUNK_SIZE);
|
||||
}
|
||||
|
||||
/** Create a new MMapDirectory for the named location and {@link NativeFSLockFactory}.
|
||||
*
|
||||
* @param path the path of the directory
|
||||
* @throws IOException if there is a low-level I/O error
|
||||
*/
|
||||
/** Create a new MMapDirectory for the named location and {@link FSLockFactory#getDefault()}.
|
||||
*
|
||||
* @param path the path of the directory
|
||||
* @throws IOException if there is a low-level I/O error
|
||||
*/
|
||||
public MMapDirectory(Path path) throws IOException {
|
||||
this(path, null);
|
||||
this(path, FSLockFactory.getDefault());
|
||||
}
|
||||
|
||||
/** Create a new MMapDirectory for the named location and {@link FSLockFactory#getDefault()}.
|
||||
*
|
||||
* @param path the path of the directory
|
||||
* @param maxChunkSize maximum chunk size (default is 1 GiBytes for
|
||||
* 64 bit JVMs and 256 MiBytes for 32 bit JVMs) used for memory mapping.
|
||||
* @throws IOException if there is a low-level I/O error
|
||||
*/
|
||||
public MMapDirectory(Path path, int maxChunkSize) throws IOException {
|
||||
this(path, FSLockFactory.getDefault(), maxChunkSize);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -56,21 +56,20 @@ public class NIOFSDirectory extends FSDirectory {
|
|||
/** Create a new NIOFSDirectory for the named location.
|
||||
*
|
||||
* @param path the path of the directory
|
||||
* @param lockFactory the lock factory to use, or null for the default
|
||||
* ({@link NativeFSLockFactory});
|
||||
* @param lockFactory the lock factory to use
|
||||
* @throws IOException if there is a low-level I/O error
|
||||
*/
|
||||
public NIOFSDirectory(Path path, LockFactory lockFactory) throws IOException {
|
||||
super(path, lockFactory);
|
||||
}
|
||||
|
||||
/** Create a new NIOFSDirectory for the named location and {@link NativeFSLockFactory}.
|
||||
/** Create a new NIOFSDirectory for the named location and {@link FSLockFactory#getDefault()}.
|
||||
*
|
||||
* @param path the path of the directory
|
||||
* @throws IOException if there is a low-level I/O error
|
||||
*/
|
||||
public NIOFSDirectory(Path path) throws IOException {
|
||||
super(path, null);
|
||||
this(path, FSLockFactory.getDefault());
|
||||
}
|
||||
|
||||
/** Creates an IndexInput for the file with the given name. */
|
||||
|
|
|
@ -23,7 +23,6 @@ import java.nio.channels.OverlappingFileLockException;
|
|||
import java.nio.file.Files;
|
||||
import java.nio.file.Path;
|
||||
import java.nio.file.StandardOpenOption;
|
||||
import java.io.File;
|
||||
import java.io.IOException;
|
||||
import java.util.Collections;
|
||||
import java.util.HashSet;
|
||||
|
@ -66,167 +65,149 @@ import org.apache.lucene.util.IOUtils;
|
|||
* not working properly in your environment, you can easily
|
||||
* test it by using {@link VerifyingLockFactory}, {@link
|
||||
* LockVerifyServer} and {@link LockStressTest}.</p>
|
||||
*
|
||||
* <p>This is a singleton, you have to use {@link #INSTANCE}.
|
||||
*
|
||||
* @see LockFactory
|
||||
*/
|
||||
|
||||
public class NativeFSLockFactory extends FSLockFactory {
|
||||
|
||||
public final class NativeFSLockFactory extends FSLockFactory {
|
||||
|
||||
/**
|
||||
* Create a NativeFSLockFactory instance, with null (unset)
|
||||
* lock directory. When you pass this factory to a {@link FSDirectory}
|
||||
* subclass, the lock directory is automatically set to the
|
||||
* directory itself. Be sure to create one instance for each directory
|
||||
* your create!
|
||||
* Singleton instance
|
||||
*/
|
||||
public NativeFSLockFactory() throws IOException {
|
||||
this((Path) null);
|
||||
}
|
||||
public static final NativeFSLockFactory INSTANCE = new NativeFSLockFactory();
|
||||
|
||||
/**
|
||||
* Create a NativeFSLockFactory instance, storing lock
|
||||
* files into the specified lockDir:
|
||||
*
|
||||
* @param lockDir where lock files are created.
|
||||
*/
|
||||
public NativeFSLockFactory(Path lockDir) throws IOException {
|
||||
setLockDir(lockDir);
|
||||
}
|
||||
private NativeFSLockFactory() {}
|
||||
|
||||
@Override
|
||||
public synchronized Lock makeLock(String lockName) {
|
||||
if (lockPrefix != null)
|
||||
lockName = lockPrefix + "-" + lockName;
|
||||
return new NativeFSLock(lockDir, lockName);
|
||||
protected Lock makeFSLock(FSDirectory dir, String lockName) {
|
||||
return new NativeFSLock(dir.getDirectory(), lockName);
|
||||
}
|
||||
|
||||
static final class NativeFSLock extends Lock {
|
||||
|
||||
@Override
|
||||
public void clearLock(String lockName) throws IOException {
|
||||
makeLock(lockName).close();
|
||||
}
|
||||
}
|
||||
|
||||
class NativeFSLock extends Lock {
|
||||
|
||||
private FileChannel channel;
|
||||
private FileLock lock;
|
||||
private Path path;
|
||||
private Path lockDir;
|
||||
private static final Set<String> LOCK_HELD = Collections.synchronizedSet(new HashSet<String>());
|
||||
private FileChannel channel;
|
||||
private FileLock lock;
|
||||
private Path path;
|
||||
private Path lockDir;
|
||||
private static final Set<String> LOCK_HELD = Collections.synchronizedSet(new HashSet<String>());
|
||||
|
||||
|
||||
public NativeFSLock(Path lockDir, String lockFileName) {
|
||||
this.lockDir = lockDir;
|
||||
path = lockDir.resolve(lockFileName);
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public synchronized boolean obtain() throws IOException {
|
||||
|
||||
if (lock != null) {
|
||||
// Our instance is already locked:
|
||||
return false;
|
||||
public NativeFSLock(Path lockDir, String lockFileName) {
|
||||
this.lockDir = lockDir;
|
||||
path = lockDir.resolve(lockFileName);
|
||||
}
|
||||
|
||||
// Ensure that lockDir exists and is a directory.
|
||||
Files.createDirectories(lockDir);
|
||||
try {
|
||||
Files.createFile(path);
|
||||
} catch (IOException ignore) {
|
||||
// we must create the file to have a truly canonical path.
|
||||
// if its already created, we don't care. if it cant be created, it will fail below.
|
||||
}
|
||||
final Path canonicalPath = path.toRealPath();
|
||||
// Make sure nobody else in-process has this lock held
|
||||
// already, and, mark it held if not:
|
||||
// This is a pretty crazy workaround for some documented
|
||||
// but yet awkward JVM behavior:
|
||||
//
|
||||
// On some systems, closing a channel releases all locks held by the Java virtual machine on the underlying file
|
||||
// regardless of whether the locks were acquired via that channel or via another channel open on the same file.
|
||||
// It is strongly recommended that, within a program, a unique channel be used to acquire all locks on any given
|
||||
// file.
|
||||
//
|
||||
// This essentially means if we close "A" channel for a given file all locks might be released... the odd part
|
||||
// is that we can't re-obtain the lock in the same JVM but from a different process if that happens. Nevertheless
|
||||
// this is super trappy. See LUCENE-5738
|
||||
boolean obtained = false;
|
||||
if (LOCK_HELD.add(canonicalPath.toString())) {
|
||||
|
||||
@Override
|
||||
public synchronized boolean obtain() throws IOException {
|
||||
|
||||
if (lock != null) {
|
||||
// Our instance is already locked:
|
||||
return false;
|
||||
}
|
||||
|
||||
// Ensure that lockDir exists and is a directory.
|
||||
Files.createDirectories(lockDir);
|
||||
try {
|
||||
channel = FileChannel.open(path, StandardOpenOption.CREATE, StandardOpenOption.WRITE);
|
||||
Files.createFile(path);
|
||||
} catch (IOException ignore) {
|
||||
// we must create the file to have a truly canonical path.
|
||||
// if its already created, we don't care. if it cant be created, it will fail below.
|
||||
}
|
||||
final Path canonicalPath = path.toRealPath();
|
||||
// Make sure nobody else in-process has this lock held
|
||||
// already, and, mark it held if not:
|
||||
// This is a pretty crazy workaround for some documented
|
||||
// but yet awkward JVM behavior:
|
||||
//
|
||||
// On some systems, closing a channel releases all locks held by the Java virtual machine on the underlying file
|
||||
// regardless of whether the locks were acquired via that channel or via another channel open on the same file.
|
||||
// It is strongly recommended that, within a program, a unique channel be used to acquire all locks on any given
|
||||
// file.
|
||||
//
|
||||
// This essentially means if we close "A" channel for a given file all locks might be released... the odd part
|
||||
// is that we can't re-obtain the lock in the same JVM but from a different process if that happens. Nevertheless
|
||||
// this is super trappy. See LUCENE-5738
|
||||
boolean obtained = false;
|
||||
if (LOCK_HELD.add(canonicalPath.toString())) {
|
||||
try {
|
||||
lock = channel.tryLock();
|
||||
obtained = lock != null;
|
||||
} catch (IOException | OverlappingFileLockException e) {
|
||||
// At least on OS X, we will sometimes get an
|
||||
// intermittent "Permission Denied" IOException,
|
||||
// which seems to simply mean "you failed to get
|
||||
// the lock". But other IOExceptions could be
|
||||
// "permanent" (eg, locking is not supported via
|
||||
// the filesystem). So, we record the failure
|
||||
// reason here; the timeout obtain (usually the
|
||||
// one calling us) will use this as "root cause"
|
||||
// if it fails to get the lock.
|
||||
failureReason = e;
|
||||
channel = FileChannel.open(path, StandardOpenOption.CREATE, StandardOpenOption.WRITE);
|
||||
try {
|
||||
lock = channel.tryLock();
|
||||
obtained = lock != null;
|
||||
} catch (IOException | OverlappingFileLockException e) {
|
||||
// At least on OS X, we will sometimes get an
|
||||
// intermittent "Permission Denied" IOException,
|
||||
// which seems to simply mean "you failed to get
|
||||
// the lock". But other IOExceptions could be
|
||||
// "permanent" (eg, locking is not supported via
|
||||
// the filesystem). So, we record the failure
|
||||
// reason here; the timeout obtain (usually the
|
||||
// one calling us) will use this as "root cause"
|
||||
// if it fails to get the lock.
|
||||
failureReason = e;
|
||||
}
|
||||
} finally {
|
||||
if (obtained == false) { // not successful - clear up and move out
|
||||
clearLockHeld(path);
|
||||
final FileChannel toClose = channel;
|
||||
channel = null;
|
||||
IOUtils.closeWhileHandlingException(toClose);
|
||||
}
|
||||
}
|
||||
}
|
||||
return obtained;
|
||||
}
|
||||
|
||||
@Override
|
||||
public synchronized void close() throws IOException {
|
||||
try {
|
||||
if (lock != null) {
|
||||
try {
|
||||
lock.release();
|
||||
lock = null;
|
||||
} finally {
|
||||
clearLockHeld(path);
|
||||
}
|
||||
}
|
||||
} finally {
|
||||
if (obtained == false) { // not successful - clear up and move out
|
||||
clearLockHeld(path);
|
||||
final FileChannel toClose = channel;
|
||||
channel = null;
|
||||
IOUtils.closeWhileHandlingException(toClose);
|
||||
}
|
||||
IOUtils.close(channel);
|
||||
channel = null;
|
||||
}
|
||||
}
|
||||
return obtained;
|
||||
}
|
||||
|
||||
@Override
|
||||
public synchronized void close() throws IOException {
|
||||
try {
|
||||
if (lock != null) {
|
||||
try {
|
||||
lock.release();
|
||||
lock = null;
|
||||
} finally {
|
||||
clearLockHeld(path);
|
||||
}
|
||||
}
|
||||
} finally {
|
||||
IOUtils.close(channel);
|
||||
channel = null;
|
||||
private static final void clearLockHeld(Path path) throws IOException {
|
||||
path = path.toRealPath();
|
||||
boolean remove = LOCK_HELD.remove(path.toString());
|
||||
assert remove : "Lock was cleared but never marked as held";
|
||||
}
|
||||
|
||||
@Override
|
||||
public synchronized boolean isLocked() {
|
||||
// The test for is isLocked is not directly possible with native file locks:
|
||||
|
||||
// First a shortcut, if a lock reference in this instance is available
|
||||
if (lock != null) return true;
|
||||
|
||||
// Look if lock file is definitely not present; if not, there can definitely be no lock!
|
||||
if (Files.notExists(path)) return false;
|
||||
|
||||
// Try to obtain and release (if was locked) the lock
|
||||
try {
|
||||
boolean obtained = obtain();
|
||||
if (obtained) close();
|
||||
return !obtained;
|
||||
} catch (IOException ioe) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "NativeFSLock@" + path;
|
||||
}
|
||||
}
|
||||
|
||||
private static final void clearLockHeld(Path path) throws IOException {
|
||||
path = path.toRealPath();
|
||||
boolean remove = LOCK_HELD.remove(path.toString());
|
||||
assert remove : "Lock was cleared but never marked as held";
|
||||
}
|
||||
|
||||
@Override
|
||||
public synchronized boolean isLocked() {
|
||||
// The test for is isLocked is not directly possible with native file locks:
|
||||
|
||||
// First a shortcut, if a lock reference in this instance is available
|
||||
if (lock != null) return true;
|
||||
|
||||
// Look if lock file is definitely not present; if not, there can definitely be no lock!
|
||||
if (Files.notExists(path)) return false;
|
||||
|
||||
// Try to obtain and release (if was locked) the lock
|
||||
try {
|
||||
boolean obtained = obtain();
|
||||
if (obtained) close();
|
||||
return !obtained;
|
||||
} catch (IOException ioe) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "NativeFSLock@" + path;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -21,50 +21,45 @@ import java.io.IOException;
|
|||
|
||||
/**
|
||||
* Use this {@link LockFactory} to disable locking entirely.
|
||||
* Only one instance of this lock is created. You should call {@link
|
||||
* #getNoLockFactory()} to get the instance.
|
||||
* This is a singleton, you have to use {@link #INSTANCE}.
|
||||
*
|
||||
* @see LockFactory
|
||||
*/
|
||||
|
||||
public class NoLockFactory extends LockFactory {
|
||||
public final class NoLockFactory extends LockFactory {
|
||||
|
||||
// Single instance returned whenever makeLock is called.
|
||||
private static NoLock singletonLock = new NoLock();
|
||||
private static NoLockFactory singleton = new NoLockFactory();
|
||||
/** The singleton */
|
||||
public static final NoLockFactory INSTANCE = new NoLockFactory();
|
||||
|
||||
// visible for AssertingLock!
|
||||
static final NoLock SINGLETON_LOCK = new NoLock();
|
||||
|
||||
private NoLockFactory() {}
|
||||
|
||||
public static NoLockFactory getNoLockFactory() {
|
||||
return singleton;
|
||||
@Override
|
||||
public Lock makeLock(Directory dir, String lockName) {
|
||||
return SINGLETON_LOCK;
|
||||
}
|
||||
|
||||
private static class NoLock extends Lock {
|
||||
@Override
|
||||
public boolean obtain() throws IOException {
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() {
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isLocked() {
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "NoLock";
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public Lock makeLock(String lockName) {
|
||||
return singletonLock;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void clearLock(String lockName) {}
|
||||
}
|
||||
|
||||
class NoLock extends Lock {
|
||||
@Override
|
||||
public boolean obtain() throws IOException {
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() {
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isLocked() {
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "NoLock";
|
||||
}
|
||||
}
|
||||
|
|
|
@ -33,8 +33,7 @@ import org.apache.lucene.util.Accountables;
|
|||
|
||||
/**
|
||||
* A memory-resident {@link Directory} implementation. Locking
|
||||
* implementation is by default the {@link SingleInstanceLockFactory}
|
||||
* but can be changed with {@link #setLockFactory}.
|
||||
* implementation is by default the {@link SingleInstanceLockFactory}.
|
||||
*
|
||||
* <p><b>Warning:</b> This class is not intended to work with huge
|
||||
* indexes. Everything beyond several hundred megabytes will waste
|
||||
|
@ -52,17 +51,14 @@ public class RAMDirectory extends BaseDirectory implements Accountable {
|
|||
protected final Map<String,RAMFile> fileMap = new ConcurrentHashMap<>();
|
||||
protected final AtomicLong sizeInBytes = new AtomicLong();
|
||||
|
||||
// *****
|
||||
// Lock acquisition sequence: RAMDirectory, then RAMFile
|
||||
// *****
|
||||
|
||||
/** Constructs an empty {@link Directory}. */
|
||||
public RAMDirectory() {
|
||||
try {
|
||||
setLockFactory(new SingleInstanceLockFactory());
|
||||
} catch (IOException e) {
|
||||
// Cannot happen
|
||||
}
|
||||
this(new SingleInstanceLockFactory());
|
||||
}
|
||||
|
||||
/** Constructs an empty {@link Directory} with the given {@link LockFactory}. */
|
||||
public RAMDirectory(LockFactory lockFactory) {
|
||||
super(lockFactory);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -105,11 +101,6 @@ public class RAMDirectory extends BaseDirectory implements Accountable {
|
|||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getLockID() {
|
||||
return "lucene-" + Integer.toHexString(hashCode());
|
||||
}
|
||||
|
||||
@Override
|
||||
public final String[] listAll() {
|
||||
ensureOpen();
|
||||
|
@ -156,11 +147,6 @@ public class RAMDirectory extends BaseDirectory implements Accountable {
|
|||
return Accountables.namedAccountables("file", fileMap);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return getClass().getSimpleName() + "(id=" + getLockID() + ")";
|
||||
}
|
||||
|
||||
/** Removes an existing file in the directory.
|
||||
* @throws IOException if the file does not exist
|
||||
*/
|
||||
|
|
|
@ -37,21 +37,20 @@ public class SimpleFSDirectory extends FSDirectory {
|
|||
/** Create a new SimpleFSDirectory for the named location.
|
||||
*
|
||||
* @param path the path of the directory
|
||||
* @param lockFactory the lock factory to use, or null for the default
|
||||
* ({@link NativeFSLockFactory});
|
||||
* @param lockFactory the lock factory to use
|
||||
* @throws IOException if there is a low-level I/O error
|
||||
*/
|
||||
public SimpleFSDirectory(Path path, LockFactory lockFactory) throws IOException {
|
||||
super(path, lockFactory);
|
||||
}
|
||||
|
||||
/** Create a new SimpleFSDirectory for the named location and {@link NativeFSLockFactory}.
|
||||
/** Create a new SimpleFSDirectory for the named location and {@link FSLockFactory#getDefault()}.
|
||||
*
|
||||
* @param path the path of the directory
|
||||
* @throws IOException if there is a low-level I/O error
|
||||
*/
|
||||
public SimpleFSDirectory(Path path) throws IOException {
|
||||
super(path, null);
|
||||
this(path, FSLockFactory.getDefault());
|
||||
}
|
||||
|
||||
/** Creates an IndexInput for the file with the given name. */
|
||||
|
|
|
@ -56,91 +56,71 @@ import java.nio.file.Path;
|
|||
* not working properly in your environment, you can easily
|
||||
* test it by using {@link VerifyingLockFactory}, {@link
|
||||
* LockVerifyServer} and {@link LockStressTest}.</p>
|
||||
*
|
||||
* <p>This is a singleton, you have to use {@link #INSTANCE}.
|
||||
*
|
||||
* @see LockFactory
|
||||
*/
|
||||
|
||||
public class SimpleFSLockFactory extends FSLockFactory {
|
||||
public final class SimpleFSLockFactory extends FSLockFactory {
|
||||
|
||||
/**
|
||||
* Create a SimpleFSLockFactory instance, with null (unset)
|
||||
* lock directory. When you pass this factory to a {@link FSDirectory}
|
||||
* subclass, the lock directory is automatically set to the
|
||||
* directory itself. Be sure to create one instance for each directory
|
||||
* your create!
|
||||
* Singleton instance
|
||||
*/
|
||||
public SimpleFSLockFactory() throws IOException {
|
||||
this((Path) null);
|
||||
}
|
||||
|
||||
/**
|
||||
* Instantiate using the provided directory (as a Path instance).
|
||||
* @param lockDir where lock files should be created.
|
||||
*/
|
||||
public SimpleFSLockFactory(Path lockDir) throws IOException {
|
||||
setLockDir(lockDir);
|
||||
}
|
||||
public static final SimpleFSLockFactory INSTANCE = new SimpleFSLockFactory();
|
||||
|
||||
private SimpleFSLockFactory() {}
|
||||
|
||||
@Override
|
||||
public Lock makeLock(String lockName) {
|
||||
if (lockPrefix != null) {
|
||||
lockName = lockPrefix + "-" + lockName;
|
||||
protected Lock makeFSLock(FSDirectory dir, String lockName) {
|
||||
return new SimpleFSLock(dir.getDirectory(), lockName);
|
||||
}
|
||||
|
||||
static class SimpleFSLock extends Lock {
|
||||
|
||||
Path lockFile;
|
||||
Path lockDir;
|
||||
|
||||
public SimpleFSLock(Path lockDir, String lockFileName) {
|
||||
this.lockDir = lockDir;
|
||||
lockFile = lockDir.resolve(lockFileName);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean obtain() throws IOException {
|
||||
try {
|
||||
Files.createDirectories(lockDir);
|
||||
Files.createFile(lockFile);
|
||||
return true;
|
||||
} catch (IOException ioe) {
|
||||
// On Windows, on concurrent createNewFile, the 2nd process gets "access denied".
|
||||
// In that case, the lock was not aquired successfully, so return false.
|
||||
// We record the failure reason here; the obtain with timeout (usually the
|
||||
// one calling us) will use this as "root cause" if it fails to get the lock.
|
||||
failureReason = ioe;
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() throws LockReleaseFailedException {
|
||||
// TODO: wierd that clearLock() throws the raw IOException...
|
||||
try {
|
||||
Files.deleteIfExists(lockFile);
|
||||
} catch (Throwable cause) {
|
||||
throw new LockReleaseFailedException("failed to delete " + lockFile, cause);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isLocked() {
|
||||
return Files.exists(lockFile);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "SimpleFSLock@" + lockFile;
|
||||
}
|
||||
return new SimpleFSLock(lockDir, lockName);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void clearLock(String lockName) throws IOException {
|
||||
if (lockPrefix != null) {
|
||||
lockName = lockPrefix + "-" + lockName;
|
||||
}
|
||||
Files.deleteIfExists(lockDir.resolve(lockName));
|
||||
}
|
||||
}
|
||||
|
||||
class SimpleFSLock extends Lock {
|
||||
|
||||
Path lockFile;
|
||||
Path lockDir;
|
||||
|
||||
public SimpleFSLock(Path lockDir, String lockFileName) {
|
||||
this.lockDir = lockDir;
|
||||
lockFile = lockDir.resolve(lockFileName);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean obtain() throws IOException {
|
||||
try {
|
||||
Files.createDirectories(lockDir);
|
||||
Files.createFile(lockFile);
|
||||
return true;
|
||||
} catch (IOException ioe) {
|
||||
// On Windows, on concurrent createNewFile, the 2nd process gets "access denied".
|
||||
// In that case, the lock was not aquired successfully, so return false.
|
||||
// We record the failure reason here; the obtain with timeout (usually the
|
||||
// one calling us) will use this as "root cause" if it fails to get the lock.
|
||||
failureReason = ioe;
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() throws LockReleaseFailedException {
|
||||
// TODO: wierd that clearLock() throws the raw IOException...
|
||||
try {
|
||||
Files.deleteIfExists(lockFile);
|
||||
} catch (Throwable cause) {
|
||||
throw new LockReleaseFailedException("failed to delete " + lockFile, cause);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isLocked() {
|
||||
return Files.exists(lockFile);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "SimpleFSLock@" + lockFile;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -31,61 +31,49 @@ import java.util.HashSet;
|
|||
* @see LockFactory
|
||||
*/
|
||||
|
||||
public class SingleInstanceLockFactory extends LockFactory {
|
||||
public final class SingleInstanceLockFactory extends LockFactory {
|
||||
|
||||
private HashSet<String> locks = new HashSet<>();
|
||||
private final HashSet<String> locks = new HashSet<>();
|
||||
|
||||
@Override
|
||||
public Lock makeLock(String lockName) {
|
||||
// We do not use the LockPrefix at all, because the private
|
||||
// HashSet instance effectively scopes the locking to this
|
||||
// single Directory instance.
|
||||
public Lock makeLock(Directory dir, String lockName) {
|
||||
return new SingleInstanceLock(locks, lockName);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void clearLock(String lockName) throws IOException {
|
||||
synchronized(locks) {
|
||||
if (locks.contains(lockName)) {
|
||||
private static class SingleInstanceLock extends Lock {
|
||||
|
||||
private final String lockName;
|
||||
private final HashSet<String> locks;
|
||||
|
||||
public SingleInstanceLock(HashSet<String> locks, String lockName) {
|
||||
this.locks = locks;
|
||||
this.lockName = lockName;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean obtain() throws IOException {
|
||||
synchronized(locks) {
|
||||
return locks.add(lockName);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() {
|
||||
synchronized(locks) {
|
||||
locks.remove(lockName);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
class SingleInstanceLock extends Lock {
|
||||
|
||||
String lockName;
|
||||
private HashSet<String> locks;
|
||||
|
||||
public SingleInstanceLock(HashSet<String> locks, String lockName) {
|
||||
this.locks = locks;
|
||||
this.lockName = lockName;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean obtain() throws IOException {
|
||||
synchronized(locks) {
|
||||
return locks.add(lockName);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() {
|
||||
synchronized(locks) {
|
||||
locks.remove(lockName);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isLocked() {
|
||||
synchronized(locks) {
|
||||
return locks.contains(lockName);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return super.toString() + ": " + lockName;
|
||||
|
||||
@Override
|
||||
public boolean isLocked() {
|
||||
synchronized(locks) {
|
||||
return locks.contains(lockName);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return super.toString() + ": " + lockName;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -35,7 +35,7 @@ import java.io.OutputStream;
|
|||
* @see LockStressTest
|
||||
*/
|
||||
|
||||
public class VerifyingLockFactory extends LockFactory {
|
||||
public final class VerifyingLockFactory extends LockFactory {
|
||||
|
||||
final LockFactory lf;
|
||||
final InputStream in;
|
||||
|
@ -94,13 +94,7 @@ public class VerifyingLockFactory extends LockFactory {
|
|||
}
|
||||
|
||||
@Override
|
||||
public synchronized Lock makeLock(String lockName) {
|
||||
return new CheckedLock(lf.makeLock(lockName));
|
||||
}
|
||||
|
||||
@Override
|
||||
public synchronized void clearLock(String lockName)
|
||||
throws IOException {
|
||||
lf.clearLock(lockName);
|
||||
public Lock makeLock(Directory dir, String lockName) {
|
||||
return new CheckedLock(lf.makeLock(dir, lockName));
|
||||
}
|
||||
}
|
||||
|
|
|
@ -23,6 +23,8 @@ import java.nio.file.Path;
|
|||
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.store.FSDirectory;
|
||||
import org.apache.lucene.store.FSLockFactory;
|
||||
import org.apache.lucene.store.LockFactory;
|
||||
|
||||
/**
|
||||
* Class containing some useful methods used by command line tools
|
||||
|
@ -35,15 +37,26 @@ public final class CommandLineUtil {
|
|||
}
|
||||
|
||||
/**
|
||||
* Creates a specific FSDirectory instance starting from its class name
|
||||
* Creates a specific FSDirectory instance starting from its class name, using the default lock factory
|
||||
* @param clazzName The name of the FSDirectory class to load
|
||||
* @param path The path to be used as parameter constructor
|
||||
* @return the new FSDirectory instance
|
||||
*/
|
||||
public static FSDirectory newFSDirectory(String clazzName, Path path) {
|
||||
return newFSDirectory(clazzName, path, FSLockFactory.getDefault());
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a specific FSDirectory instance starting from its class name
|
||||
* @param clazzName The name of the FSDirectory class to load
|
||||
* @param path The path to be used as parameter constructor
|
||||
* @param lf The lock factory to be used
|
||||
* @return the new FSDirectory instance
|
||||
*/
|
||||
public static FSDirectory newFSDirectory(String clazzName, Path path, LockFactory lf) {
|
||||
try {
|
||||
final Class<? extends FSDirectory> clazz = loadFSDirectoryClass(clazzName);
|
||||
return newFSDirectory(clazz, path);
|
||||
return newFSDirectory(clazz, path, lf);
|
||||
} catch (ClassNotFoundException e) {
|
||||
throw new IllegalArgumentException(FSDirectory.class.getSimpleName()
|
||||
+ " implementation not found: " + clazzName, e);
|
||||
|
@ -103,10 +116,26 @@ public final class CommandLineUtil {
|
|||
* @throws InvocationTargetException If the constructor throws an exception
|
||||
*/
|
||||
public static FSDirectory newFSDirectory(Class<? extends FSDirectory> clazz, Path path)
|
||||
throws NoSuchMethodException, InstantiationException, IllegalAccessException, InvocationTargetException {
|
||||
throws ReflectiveOperationException {
|
||||
return newFSDirectory(clazz, path, FSLockFactory.getDefault());
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a new specific FSDirectory instance
|
||||
* @param clazz The class of the object to be created
|
||||
* @param path The file to be used as parameter constructor
|
||||
* @param lf The lock factory to be used
|
||||
* @return The new FSDirectory instance
|
||||
* @throws NoSuchMethodException If the Directory does not have a constructor that takes <code>Path</code>.
|
||||
* @throws InstantiationException If the class is abstract or an interface.
|
||||
* @throws IllegalAccessException If the constructor does not have public visibility.
|
||||
* @throws InvocationTargetException If the constructor throws an exception
|
||||
*/
|
||||
public static FSDirectory newFSDirectory(Class<? extends FSDirectory> clazz, Path path, LockFactory lf)
|
||||
throws ReflectiveOperationException {
|
||||
// Assuming every FSDirectory has a ctor(Path):
|
||||
Constructor<? extends FSDirectory> ctor = clazz.getConstructor(Path.class);
|
||||
return ctor.newInstance(path);
|
||||
Constructor<? extends FSDirectory> ctor = clazz.getConstructor(Path.class, LockFactory.class);
|
||||
return ctor.newInstance(path, lf);
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -31,12 +31,10 @@ import org.apache.lucene.util.LuceneTestCase;
|
|||
public class TestCrash extends LuceneTestCase {
|
||||
|
||||
private IndexWriter initIndex(Random random, boolean initialCommit) throws IOException {
|
||||
return initIndex(random, newMockDirectory(random), initialCommit, true);
|
||||
return initIndex(random, newMockDirectory(random, NoLockFactory.INSTANCE), initialCommit, true);
|
||||
}
|
||||
|
||||
private IndexWriter initIndex(Random random, MockDirectoryWrapper dir, boolean initialCommit, boolean commitOnClose) throws IOException {
|
||||
dir.setLockFactory(NoLockFactory.getNoLockFactory());
|
||||
|
||||
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random))
|
||||
.setMaxBufferedDocs(10).setMergeScheduler(new ConcurrentMergeScheduler()).setCommitOnClose(commitOnClose));
|
||||
((ConcurrentMergeScheduler) writer.getConfig().getMergeScheduler()).setSuppressExceptions();
|
||||
|
@ -187,7 +185,7 @@ public class TestCrash extends LuceneTestCase {
|
|||
|
||||
public void testCrashAfterCloseNoWait() throws IOException {
|
||||
Random random = random();
|
||||
MockDirectoryWrapper dir = newMockDirectory(random);
|
||||
MockDirectoryWrapper dir = newMockDirectory(random, NoLockFactory.INSTANCE);
|
||||
IndexWriter writer = initIndex(random, dir, false, false);
|
||||
|
||||
try {
|
||||
|
|
|
@ -150,7 +150,6 @@ public class TestCrashCausesCorruptIndex extends LuceneTestCase {
|
|||
|
||||
public CrashAfterCreateOutput(Directory realDirectory) throws IOException {
|
||||
super(realDirectory);
|
||||
setLockFactory(realDirectory.getLockFactory());
|
||||
}
|
||||
|
||||
public void setCrashAfterCreateOutput(String name) {
|
||||
|
|
|
@ -38,6 +38,7 @@ import org.apache.lucene.document.TextField;
|
|||
import org.apache.lucene.index.IndexWriterConfig.OpenMode;
|
||||
import org.apache.lucene.search.DocIdSetIterator;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.store.FSLockFactory;
|
||||
import org.apache.lucene.store.IOContext;
|
||||
import org.apache.lucene.store.MockDirectoryWrapper;
|
||||
import org.apache.lucene.store.TrackingDirectoryWrapper;
|
||||
|
@ -111,7 +112,7 @@ public class TestDoc extends LuceneTestCase {
|
|||
StringWriter sw = new StringWriter();
|
||||
PrintWriter out = new PrintWriter(sw, true);
|
||||
|
||||
Directory directory = newFSDirectory(indexDir, null);
|
||||
Directory directory = newFSDirectory(indexDir);
|
||||
|
||||
if (directory instanceof MockDirectoryWrapper) {
|
||||
// We create unreferenced files (we don't even write
|
||||
|
@ -155,7 +156,7 @@ public class TestDoc extends LuceneTestCase {
|
|||
sw = new StringWriter();
|
||||
out = new PrintWriter(sw, true);
|
||||
|
||||
directory = newFSDirectory(indexDir, null);
|
||||
directory = newFSDirectory(indexDir);
|
||||
|
||||
if (directory instanceof MockDirectoryWrapper) {
|
||||
// We create unreferenced files (we don't even write
|
||||
|
|
|
@ -27,15 +27,13 @@ import org.apache.lucene.document.Document;
|
|||
import org.apache.lucene.document.DocumentStoredFieldVisitor;
|
||||
import org.apache.lucene.document.Field;
|
||||
import org.apache.lucene.index.IndexWriterConfig.OpenMode;
|
||||
import org.apache.lucene.store.BaseDirectory;
|
||||
import org.apache.lucene.store.BufferedIndexInput;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.store.FilterDirectory;
|
||||
import org.apache.lucene.store.IOContext;
|
||||
import org.apache.lucene.store.IndexInput;
|
||||
import org.apache.lucene.store.IndexOutput;
|
||||
import org.apache.lucene.util.IOUtils;
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.apache.lucene.util.TestUtil;
|
||||
import org.junit.AfterClass;
|
||||
import org.junit.BeforeClass;
|
||||
|
||||
|
@ -111,55 +109,18 @@ public class TestFieldsReader extends LuceneTestCase {
|
|||
}
|
||||
|
||||
|
||||
public class FaultyFSDirectory extends BaseDirectory {
|
||||
Directory fsDir;
|
||||
public class FaultyFSDirectory extends FilterDirectory {
|
||||
AtomicBoolean doFail = new AtomicBoolean();
|
||||
|
||||
public FaultyFSDirectory(Path dir) {
|
||||
fsDir = newFSDirectory(dir);
|
||||
lockFactory = fsDir.getLockFactory();
|
||||
public FaultyFSDirectory(Directory fsDir) {
|
||||
super(fsDir);
|
||||
}
|
||||
|
||||
@Override
|
||||
public IndexInput openInput(String name, IOContext context) throws IOException {
|
||||
return new FaultyIndexInput(doFail, fsDir.openInput(name, context));
|
||||
return new FaultyIndexInput(doFail, in.openInput(name, context));
|
||||
}
|
||||
|
||||
@Override
|
||||
public String[] listAll() throws IOException {
|
||||
return fsDir.listAll();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void deleteFile(String name) throws IOException {
|
||||
fsDir.deleteFile(name);
|
||||
}
|
||||
|
||||
@Override
|
||||
public long fileLength(String name) throws IOException {
|
||||
return fsDir.fileLength(name);
|
||||
}
|
||||
|
||||
@Override
|
||||
public IndexOutput createOutput(String name, IOContext context) throws IOException {
|
||||
return fsDir.createOutput(name, context);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void sync(Collection<String> names) throws IOException {
|
||||
fsDir.sync(names);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void renameFile(String source, String dest) throws IOException {
|
||||
fsDir.renameFile(source, dest);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() throws IOException {
|
||||
fsDir.close();
|
||||
}
|
||||
|
||||
public void startFailing() {
|
||||
doFail.set(true);
|
||||
}
|
||||
|
@ -228,7 +189,8 @@ public class TestFieldsReader extends LuceneTestCase {
|
|||
Path indexDir = createTempDir("testfieldswriterexceptions");
|
||||
|
||||
try {
|
||||
FaultyFSDirectory dir = new FaultyFSDirectory(indexDir);
|
||||
Directory fsDir = newFSDirectory(indexDir);
|
||||
FaultyFSDirectory dir = new FaultyFSDirectory(fsDir);
|
||||
IndexWriterConfig iwc = newIndexWriterConfig(new MockAnalyzer(random()))
|
||||
.setOpenMode(OpenMode.CREATE);
|
||||
IndexWriter writer = new IndexWriter(dir, iwc);
|
||||
|
|
|
@ -66,14 +66,11 @@ import org.apache.lucene.store.BaseDirectoryWrapper;
|
|||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.store.IOContext;
|
||||
import org.apache.lucene.store.IndexOutput;
|
||||
import org.apache.lucene.store.Lock;
|
||||
import org.apache.lucene.store.LockFactory;
|
||||
import org.apache.lucene.store.LockObtainFailedException;
|
||||
import org.apache.lucene.store.MockDirectoryWrapper;
|
||||
import org.apache.lucene.store.NoLockFactory;
|
||||
import org.apache.lucene.store.RAMDirectory;
|
||||
import org.apache.lucene.store.SimpleFSLockFactory;
|
||||
import org.apache.lucene.store.SingleInstanceLockFactory;
|
||||
import org.apache.lucene.util.Bits;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.apache.lucene.util.Constants;
|
||||
|
@ -517,45 +514,6 @@ public class TestIndexWriter extends LuceneTestCase {
|
|||
dir.close();
|
||||
}
|
||||
|
||||
// Make sure that a Directory implementation that does
|
||||
// not use LockFactory at all (ie overrides makeLock and
|
||||
// implements its own private locking) works OK. This
|
||||
// was raised on java-dev as loss of backwards
|
||||
// compatibility.
|
||||
public void testNullLockFactory() throws IOException {
|
||||
|
||||
final class MyRAMDirectory extends MockDirectoryWrapper {
|
||||
private LockFactory myLockFactory;
|
||||
MyRAMDirectory(Directory delegate) {
|
||||
super(random(), delegate);
|
||||
lockFactory = null;
|
||||
myLockFactory = new SingleInstanceLockFactory();
|
||||
}
|
||||
@Override
|
||||
public Lock makeLock(String name) {
|
||||
return myLockFactory.makeLock(name);
|
||||
}
|
||||
}
|
||||
|
||||
Directory dir = new MyRAMDirectory(new RAMDirectory());
|
||||
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random())));
|
||||
for (int i = 0; i < 100; i++) {
|
||||
addDoc(writer);
|
||||
}
|
||||
writer.close();
|
||||
Term searchTerm = new Term("content", "aaa");
|
||||
IndexReader reader = DirectoryReader.open(dir);
|
||||
IndexSearcher searcher = newSearcher(reader);
|
||||
ScoreDoc[] hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
|
||||
assertEquals("did not get right number of hits", 100, hits.length);
|
||||
reader.close();
|
||||
|
||||
writer = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random()))
|
||||
.setOpenMode(OpenMode.CREATE));
|
||||
writer.close();
|
||||
dir.close();
|
||||
}
|
||||
|
||||
public void testFlushWithNoMerging() throws IOException {
|
||||
Directory dir = newDirectory();
|
||||
IndexWriter writer = new IndexWriter(
|
||||
|
@ -1465,7 +1423,7 @@ public class TestIndexWriter extends LuceneTestCase {
|
|||
// Tests that if FSDir is opened w/ a NoLockFactory (or SingleInstanceLF),
|
||||
// then IndexWriter ctor succeeds. Previously (LUCENE-2386) it failed
|
||||
// when listAll() was called in IndexFileDeleter.
|
||||
Directory dir = newFSDirectory(createTempDir("emptyFSDirNoLock"), NoLockFactory.getNoLockFactory());
|
||||
Directory dir = newFSDirectory(createTempDir("emptyFSDirNoLock"), NoLockFactory.INSTANCE);
|
||||
new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random()))).close();
|
||||
dir.close();
|
||||
}
|
||||
|
@ -1536,8 +1494,7 @@ public class TestIndexWriter extends LuceneTestCase {
|
|||
}
|
||||
|
||||
public void testNoSegmentFile() throws IOException {
|
||||
BaseDirectoryWrapper dir = newDirectory();
|
||||
dir.setLockFactory(NoLockFactory.getNoLockFactory());
|
||||
BaseDirectoryWrapper dir = newDirectory(random(), NoLockFactory.INSTANCE);
|
||||
IndexWriter w = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random()))
|
||||
.setMaxBufferedDocs(2));
|
||||
|
||||
|
@ -1779,11 +1736,10 @@ public class TestIndexWriter extends LuceneTestCase {
|
|||
}
|
||||
|
||||
public void testWhetherDeleteAllDeletesWriteLock() throws Exception {
|
||||
Directory d = newFSDirectory(createTempDir("TestIndexWriter.testWhetherDeleteAllDeletesWriteLock"));
|
||||
// Must use SimpleFSLockFactory... NativeFSLockFactory
|
||||
// somehow "knows" a lock is held against write.lock
|
||||
// even if you remove that file:
|
||||
d.setLockFactory(new SimpleFSLockFactory());
|
||||
Directory d = newFSDirectory(createTempDir("TestIndexWriter.testWhetherDeleteAllDeletesWriteLock"), SimpleFSLockFactory.INSTANCE);
|
||||
RandomIndexWriter w1 = new RandomIndexWriter(random(), d);
|
||||
w1.deleteAll();
|
||||
try {
|
||||
|
|
|
@ -20,7 +20,6 @@ package org.apache.lucene.store;
|
|||
import java.io.IOException;
|
||||
import java.nio.file.Path;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collection;
|
||||
import java.util.List;
|
||||
import java.util.Random;
|
||||
|
||||
|
@ -31,15 +30,14 @@ import org.apache.lucene.index.DirectoryReader;
|
|||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.index.IndexWriter;
|
||||
import org.apache.lucene.index.IndexWriterConfig;
|
||||
import org.apache.lucene.index.Term;
|
||||
import org.apache.lucene.index.IndexWriterConfig.OpenMode;
|
||||
import org.apache.lucene.index.Term;
|
||||
import org.apache.lucene.search.IndexSearcher;
|
||||
import org.apache.lucene.search.ScoreDoc;
|
||||
import org.apache.lucene.search.TermQuery;
|
||||
import org.apache.lucene.util.ArrayUtil;
|
||||
import org.apache.lucene.util.IOUtils;
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.apache.lucene.util.TestUtil;
|
||||
import org.apache.lucene.util.ArrayUtil;
|
||||
|
||||
public class TestBufferedIndexInput extends LuceneTestCase {
|
||||
|
||||
|
@ -267,18 +265,14 @@ public class TestBufferedIndexInput extends LuceneTestCase {
|
|||
}
|
||||
}
|
||||
|
||||
private static class MockFSDirectory extends BaseDirectory {
|
||||
private static class MockFSDirectory extends FilterDirectory {
|
||||
|
||||
List<IndexInput> allIndexInputs = new ArrayList<>();
|
||||
|
||||
Random rand;
|
||||
|
||||
private Directory dir;
|
||||
final List<IndexInput> allIndexInputs = new ArrayList<>();
|
||||
final Random rand;
|
||||
|
||||
public MockFSDirectory(Path path, Random rand) throws IOException {
|
||||
super(new SimpleFSDirectory(path));
|
||||
this.rand = rand;
|
||||
lockFactory = NoLockFactory.getNoLockFactory();
|
||||
dir = new SimpleFSDirectory(path, null);
|
||||
}
|
||||
|
||||
public void tweakBufferSizes() {
|
||||
|
@ -296,46 +290,9 @@ public class TestBufferedIndexInput extends LuceneTestCase {
|
|||
public IndexInput openInput(String name, IOContext context) throws IOException {
|
||||
// Make random changes to buffer size
|
||||
//bufferSize = 1+Math.abs(rand.nextInt() % 10);
|
||||
IndexInput f = dir.openInput(name, context);
|
||||
IndexInput f = super.openInput(name, context);
|
||||
allIndexInputs.add(f);
|
||||
return f;
|
||||
}
|
||||
|
||||
@Override
|
||||
public IndexOutput createOutput(String name, IOContext context) throws IOException {
|
||||
return dir.createOutput(name, context);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() throws IOException {
|
||||
dir.close();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void deleteFile(String name)
|
||||
throws IOException
|
||||
{
|
||||
dir.deleteFile(name);
|
||||
}
|
||||
@Override
|
||||
public String[] listAll()
|
||||
throws IOException
|
||||
{
|
||||
return dir.listAll();
|
||||
}
|
||||
@Override
|
||||
public void sync(Collection<String> names) throws IOException {
|
||||
dir.sync(names);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void renameFile(String source, String dest) throws IOException {
|
||||
dir.renameFile(source, dest);
|
||||
}
|
||||
|
||||
@Override
|
||||
public long fileLength(String name) throws IOException {
|
||||
return dir.fileLength(name);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -17,17 +17,12 @@ package org.apache.lucene.store;
|
|||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import java.io.File;
|
||||
import java.io.FileNotFoundException;
|
||||
import java.io.IOException;
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.NoSuchFileException;
|
||||
import java.nio.file.Path;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
|
||||
import org.apache.lucene.util.IOUtils;
|
||||
import org.apache.lucene.util.TestUtil;
|
||||
|
||||
public class TestDirectory extends BaseDirectoryTestCase {
|
||||
|
||||
|
@ -63,9 +58,9 @@ public class TestDirectory extends BaseDirectoryTestCase {
|
|||
}
|
||||
|
||||
final FSDirectory[] dirs = new FSDirectory[] {
|
||||
new SimpleFSDirectory(path, null),
|
||||
new NIOFSDirectory(path, null),
|
||||
new MMapDirectory(path, null)
|
||||
new SimpleFSDirectory(path),
|
||||
new NIOFSDirectory(path),
|
||||
new MMapDirectory(path)
|
||||
};
|
||||
|
||||
for (int i=0; i<dirs.length; i++) {
|
||||
|
@ -142,11 +137,12 @@ public class TestDirectory extends BaseDirectoryTestCase {
|
|||
}
|
||||
|
||||
// LUCENE-1468
|
||||
@SuppressWarnings("resource")
|
||||
public void testCopySubdir() throws Throwable {
|
||||
Path path = createTempDir("testsubdir");
|
||||
try {
|
||||
Files.createDirectory(path.resolve("subdir"));
|
||||
Directory fsDir = new SimpleFSDirectory(path, null);
|
||||
Directory fsDir = new SimpleFSDirectory(path);
|
||||
assertEquals(0, new RAMDirectory(fsDir, newIOContext(random())).listAll().length);
|
||||
} finally {
|
||||
IOUtils.rm(path);
|
||||
|
@ -156,13 +152,13 @@ public class TestDirectory extends BaseDirectoryTestCase {
|
|||
// LUCENE-1468
|
||||
public void testNotDirectory() throws Throwable {
|
||||
Path path = createTempDir("testnotdir");
|
||||
Directory fsDir = new SimpleFSDirectory(path, null);
|
||||
Directory fsDir = new SimpleFSDirectory(path);
|
||||
try {
|
||||
IndexOutput out = fsDir.createOutput("afile", newIOContext(random()));
|
||||
out.close();
|
||||
assertTrue(slowFileExists(fsDir, "afile"));
|
||||
try {
|
||||
new SimpleFSDirectory(path.resolve("afile"), null);
|
||||
new SimpleFSDirectory(path.resolve("afile"));
|
||||
fail("did not hit expected exception");
|
||||
} catch (IOException nsde) {
|
||||
// Expected
|
||||
|
|
|
@ -45,12 +45,8 @@ public class TestLockFactory extends LuceneTestCase {
|
|||
// methods are called at the right time, locks are created, etc.
|
||||
|
||||
public void testCustomLockFactory() throws IOException {
|
||||
Directory dir = new MockDirectoryWrapper(random(), new RAMDirectory());
|
||||
MockLockFactory lf = new MockLockFactory();
|
||||
dir.setLockFactory(lf);
|
||||
|
||||
// Lock prefix should have been set:
|
||||
assertTrue("lock prefix was not set by the RAMDirectory", lf.lockPrefixSet);
|
||||
Directory dir = new MockDirectoryWrapper(random(), new RAMDirectory(lf));
|
||||
|
||||
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(new MockAnalyzer(random())));
|
||||
|
||||
|
@ -78,11 +74,8 @@ public class TestLockFactory extends LuceneTestCase {
|
|||
// exceptions raised:
|
||||
// Verify: NoLockFactory allows two IndexWriters
|
||||
public void testRAMDirectoryNoLocking() throws IOException {
|
||||
MockDirectoryWrapper dir = new MockDirectoryWrapper(random(), new RAMDirectory());
|
||||
dir.setLockFactory(NoLockFactory.getNoLockFactory());
|
||||
dir.setWrapLockFactory(false); // we are gonna explicitly test we get this back
|
||||
assertTrue("RAMDirectory.setLockFactory did not take",
|
||||
NoLockFactory.class.isInstance(dir.getLockFactory()));
|
||||
MockDirectoryWrapper dir = new MockDirectoryWrapper(random(), new RAMDirectory(NoLockFactory.INSTANCE));
|
||||
dir.setAssertLocks(false); // we are gonna explicitly test we get this back
|
||||
|
||||
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(new MockAnalyzer(random())));
|
||||
writer.commit(); // required so the second open succeed
|
||||
|
@ -105,10 +98,10 @@ public class TestLockFactory extends LuceneTestCase {
|
|||
// Verify: SingleInstanceLockFactory is the default lock for RAMDirectory
|
||||
// Verify: RAMDirectory does basic locking correctly (can't create two IndexWriters)
|
||||
public void testDefaultRAMDirectory() throws IOException {
|
||||
Directory dir = new RAMDirectory();
|
||||
RAMDirectory dir = new RAMDirectory();
|
||||
|
||||
assertTrue("RAMDirectory did not use correct LockFactory: got " + dir.getLockFactory(),
|
||||
SingleInstanceLockFactory.class.isInstance(dir.getLockFactory()));
|
||||
assertTrue("RAMDirectory did not use correct LockFactory: got " + dir.lockFactory,
|
||||
dir.lockFactory instanceof SingleInstanceLockFactory);
|
||||
|
||||
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(new MockAnalyzer(random())));
|
||||
|
||||
|
@ -141,7 +134,7 @@ public class TestLockFactory extends LuceneTestCase {
|
|||
@Nightly
|
||||
public void testStressLocksNativeFSLockFactory() throws Exception {
|
||||
Path dir = createTempDir("index.TestLockFactory7");
|
||||
_testStressLocks(new NativeFSLockFactory(dir), dir);
|
||||
_testStressLocks(NativeFSLockFactory.INSTANCE, dir);
|
||||
}
|
||||
|
||||
public void _testStressLocks(LockFactory lockFactory, Path indexDir) throws Exception {
|
||||
|
@ -171,11 +164,10 @@ public class TestLockFactory extends LuceneTestCase {
|
|||
|
||||
// Verify: NativeFSLockFactory works correctly
|
||||
public void testNativeFSLockFactory() throws IOException {
|
||||
NativeFSLockFactory f = new NativeFSLockFactory(createTempDir(LuceneTestCase.getTestClass().getSimpleName()));
|
||||
Directory dir = FSDirectory.open(createTempDir(LuceneTestCase.getTestClass().getSimpleName()), NativeFSLockFactory.INSTANCE);
|
||||
|
||||
f.setLockPrefix("test");
|
||||
Lock l = f.makeLock("commit");
|
||||
Lock l2 = f.makeLock("commit");
|
||||
Lock l = dir.makeLock("commit");
|
||||
Lock l2 = dir.makeLock("commit");
|
||||
|
||||
assertTrue("failed to obtain lock", l.obtain());
|
||||
assertTrue("succeeded in obtaining lock twice", !l2.obtain());
|
||||
|
@ -200,55 +192,14 @@ public class TestLockFactory extends LuceneTestCase {
|
|||
Path lockFile = tempDir.resolve("test.lock");
|
||||
Files.createFile(lockFile);
|
||||
|
||||
Lock l = new NativeFSLockFactory(tempDir).makeLock("test.lock");
|
||||
Directory dir = FSDirectory.open(tempDir, NativeFSLockFactory.INSTANCE);
|
||||
Lock l = dir.makeLock("test.lock");
|
||||
assertTrue("failed to obtain lock", l.obtain());
|
||||
l.close();
|
||||
assertFalse("failed to release lock", l.isLocked());
|
||||
Files.deleteIfExists(lockFile);
|
||||
}
|
||||
|
||||
// Verify: NativeFSLockFactory assigns null as lockPrefix if the lockDir is inside directory
|
||||
public void testNativeFSLockFactoryPrefix() throws IOException {
|
||||
|
||||
Path fdir1 = createTempDir("TestLockFactory.8");
|
||||
Path fdir2 = createTempDir("TestLockFactory.8.Lockdir");
|
||||
Directory dir1 = newFSDirectory(fdir1, new NativeFSLockFactory(fdir1));
|
||||
// same directory, but locks are stored somewhere else. The prefix of the lock factory should != null
|
||||
Directory dir2 = newFSDirectory(fdir1, new NativeFSLockFactory(fdir2));
|
||||
|
||||
String prefix1 = dir1.getLockFactory().getLockPrefix();
|
||||
assertNull("Lock prefix for lockDir same as directory should be null", prefix1);
|
||||
|
||||
String prefix2 = dir2.getLockFactory().getLockPrefix();
|
||||
assertNotNull("Lock prefix for lockDir outside of directory should be not null", prefix2);
|
||||
|
||||
dir1.close();
|
||||
dir2.close();
|
||||
IOUtils.rm(fdir1, fdir2);
|
||||
}
|
||||
|
||||
// Verify: default LockFactory has no prefix (ie
|
||||
// write.lock is stored in index):
|
||||
public void testDefaultFSLockFactoryPrefix() throws IOException {
|
||||
|
||||
// Make sure we get null prefix, which wont happen if setLockFactory is ever called.
|
||||
Path dirName = createTempDir("TestLockFactory.10");
|
||||
|
||||
Directory dir = new SimpleFSDirectory(dirName);
|
||||
assertNull("Default lock prefix should be null", dir.getLockFactory().getLockPrefix());
|
||||
dir.close();
|
||||
|
||||
dir = new MMapDirectory(dirName);
|
||||
assertNull("Default lock prefix should be null", dir.getLockFactory().getLockPrefix());
|
||||
dir.close();
|
||||
|
||||
dir = new NIOFSDirectory(dirName);
|
||||
assertNull("Default lock prefix should be null", dir.getLockFactory().getLockPrefix());
|
||||
dir.close();
|
||||
|
||||
IOUtils.rm(dirName);
|
||||
}
|
||||
|
||||
private class WriterThread extends Thread {
|
||||
private Directory dir;
|
||||
private int numIteration;
|
||||
|
@ -349,29 +300,19 @@ public class TestLockFactory extends LuceneTestCase {
|
|||
}
|
||||
}
|
||||
|
||||
public class MockLockFactory extends LockFactory {
|
||||
class MockLockFactory extends LockFactory {
|
||||
|
||||
public boolean lockPrefixSet;
|
||||
public Map<String,Lock> locksCreated = Collections.synchronizedMap(new HashMap<String,Lock>());
|
||||
public int makeLockCount = 0;
|
||||
|
||||
@Override
|
||||
public void setLockPrefix(String lockPrefix) {
|
||||
super.setLockPrefix(lockPrefix);
|
||||
lockPrefixSet = true;
|
||||
}
|
||||
|
||||
@Override
|
||||
synchronized public Lock makeLock(String lockName) {
|
||||
public synchronized Lock makeLock(Directory dir, String lockName) {
|
||||
Lock lock = new MockLock();
|
||||
locksCreated.put(lockName, lock);
|
||||
makeLockCount++;
|
||||
return lock;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void clearLock(String specificLockName) {}
|
||||
|
||||
public class MockLock extends Lock {
|
||||
public int lockAttempts;
|
||||
|
||||
|
|
|
@ -39,8 +39,7 @@ public class TestMockDirectoryWrapper extends LuceneTestCase {
|
|||
}
|
||||
|
||||
public void testFailIfIndexWriterNotClosedChangeLockFactory() throws IOException {
|
||||
MockDirectoryWrapper dir = newMockDirectory();
|
||||
dir.setLockFactory(new SingleInstanceLockFactory());
|
||||
MockDirectoryWrapper dir = newMockDirectory(random(), new SingleInstanceLockFactory());
|
||||
IndexWriter iw = new IndexWriter(dir, new IndexWriterConfig(null));
|
||||
try {
|
||||
dir.close();
|
||||
|
|
|
@ -40,7 +40,7 @@ public class TestMultiMMap extends BaseDirectoryTestCase {
|
|||
|
||||
@Override
|
||||
protected Directory getDirectory(Path path) throws IOException {
|
||||
return new MMapDirectory(path, null, 1<<TestUtil.nextInt(random(), 10, 28));
|
||||
return new MMapDirectory(path, 1<<TestUtil.nextInt(random(), 10, 28));
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -178,7 +178,7 @@ public class TestMultiMMap extends BaseDirectoryTestCase {
|
|||
|
||||
public void testSeekZero() throws Exception {
|
||||
for (int i = 0; i < 31; i++) {
|
||||
MMapDirectory mmapDir = new MMapDirectory(createTempDir("testSeekZero"), null, 1<<i);
|
||||
MMapDirectory mmapDir = new MMapDirectory(createTempDir("testSeekZero"), 1<<i);
|
||||
IndexOutput io = mmapDir.createOutput("zeroBytes", newIOContext(random()));
|
||||
io.close();
|
||||
IndexInput ii = mmapDir.openInput("zeroBytes", newIOContext(random()));
|
||||
|
@ -190,7 +190,7 @@ public class TestMultiMMap extends BaseDirectoryTestCase {
|
|||
|
||||
public void testSeekSliceZero() throws Exception {
|
||||
for (int i = 0; i < 31; i++) {
|
||||
MMapDirectory mmapDir = new MMapDirectory(createTempDir("testSeekSliceZero"), null, 1<<i);
|
||||
MMapDirectory mmapDir = new MMapDirectory(createTempDir("testSeekSliceZero"), 1<<i);
|
||||
IndexOutput io = mmapDir.createOutput("zeroBytes", newIOContext(random()));
|
||||
io.close();
|
||||
IndexInput slicer = mmapDir.openInput("zeroBytes", newIOContext(random()));
|
||||
|
@ -204,7 +204,7 @@ public class TestMultiMMap extends BaseDirectoryTestCase {
|
|||
|
||||
public void testSeekEnd() throws Exception {
|
||||
for (int i = 0; i < 17; i++) {
|
||||
MMapDirectory mmapDir = new MMapDirectory(createTempDir("testSeekEnd"), null, 1<<i);
|
||||
MMapDirectory mmapDir = new MMapDirectory(createTempDir("testSeekEnd"), 1<<i);
|
||||
IndexOutput io = mmapDir.createOutput("bytes", newIOContext(random()));
|
||||
byte bytes[] = new byte[1<<i];
|
||||
random().nextBytes(bytes);
|
||||
|
@ -222,7 +222,7 @@ public class TestMultiMMap extends BaseDirectoryTestCase {
|
|||
|
||||
public void testSeekSliceEnd() throws Exception {
|
||||
for (int i = 0; i < 17; i++) {
|
||||
MMapDirectory mmapDir = new MMapDirectory(createTempDir("testSeekSliceEnd"), null, 1<<i);
|
||||
MMapDirectory mmapDir = new MMapDirectory(createTempDir("testSeekSliceEnd"), 1<<i);
|
||||
IndexOutput io = mmapDir.createOutput("bytes", newIOContext(random()));
|
||||
byte bytes[] = new byte[1<<i];
|
||||
random().nextBytes(bytes);
|
||||
|
@ -242,7 +242,7 @@ public class TestMultiMMap extends BaseDirectoryTestCase {
|
|||
|
||||
public void testSeeking() throws Exception {
|
||||
for (int i = 0; i < 10; i++) {
|
||||
MMapDirectory mmapDir = new MMapDirectory(createTempDir("testSeeking"), null, 1<<i);
|
||||
MMapDirectory mmapDir = new MMapDirectory(createTempDir("testSeeking"), 1<<i);
|
||||
IndexOutput io = mmapDir.createOutput("bytes", newIOContext(random()));
|
||||
byte bytes[] = new byte[1<<(i+1)]; // make sure we switch buffers
|
||||
random().nextBytes(bytes);
|
||||
|
@ -269,7 +269,7 @@ public class TestMultiMMap extends BaseDirectoryTestCase {
|
|||
// the various offset+length and just does readBytes.
|
||||
public void testSlicedSeeking() throws Exception {
|
||||
for (int i = 0; i < 10; i++) {
|
||||
MMapDirectory mmapDir = new MMapDirectory(createTempDir("testSlicedSeeking"), null, 1<<i);
|
||||
MMapDirectory mmapDir = new MMapDirectory(createTempDir("testSlicedSeeking"), 1<<i);
|
||||
IndexOutput io = mmapDir.createOutput("bytes", newIOContext(random()));
|
||||
byte bytes[] = new byte[1<<(i+1)]; // make sure we switch buffers
|
||||
random().nextBytes(bytes);
|
||||
|
@ -293,7 +293,7 @@ public class TestMultiMMap extends BaseDirectoryTestCase {
|
|||
|
||||
public void testSliceOfSlice() throws Exception {
|
||||
for (int i = 0; i < 10; i++) {
|
||||
MMapDirectory mmapDir = new MMapDirectory(createTempDir("testSliceOfSlice"), null, 1<<i);
|
||||
MMapDirectory mmapDir = new MMapDirectory(createTempDir("testSliceOfSlice"), 1<<i);
|
||||
IndexOutput io = mmapDir.createOutput("bytes", newIOContext(random()));
|
||||
byte bytes[] = new byte[1<<(i+1)]; // make sure we switch buffers
|
||||
random().nextBytes(bytes);
|
||||
|
@ -329,13 +329,14 @@ public class TestMultiMMap extends BaseDirectoryTestCase {
|
|||
|
||||
public void testRandomChunkSizes() throws Exception {
|
||||
int num = atLeast(10);
|
||||
for (int i = 0; i < num; i++)
|
||||
for (int i = 0; i < num; i++) {
|
||||
assertChunking(random(), TestUtil.nextInt(random(), 20, 100));
|
||||
}
|
||||
}
|
||||
|
||||
private void assertChunking(Random random, int chunkSize) throws Exception {
|
||||
Path path = createTempDir("mmap" + chunkSize);
|
||||
MMapDirectory mmapDir = new MMapDirectory(path, null, chunkSize);
|
||||
MMapDirectory mmapDir = new MMapDirectory(path, chunkSize);
|
||||
// we will map a lot, try to turn on the unmap hack
|
||||
if (MMapDirectory.UNMAP_SUPPORTED)
|
||||
mmapDir.setUseUnmap(true);
|
||||
|
@ -368,7 +369,7 @@ public class TestMultiMMap extends BaseDirectoryTestCase {
|
|||
public void testImplementations() throws Exception {
|
||||
for (int i = 2; i < 12; i++) {
|
||||
final int chunkSize = 1<<i;
|
||||
MMapDirectory mmapDir = new MMapDirectory(createTempDir("testImplementations"), null, chunkSize);
|
||||
MMapDirectory mmapDir = new MMapDirectory(createTempDir("testImplementations"), chunkSize);
|
||||
IndexOutput io = mmapDir.createOutput("bytes", newIOContext(random()));
|
||||
int size = random().nextInt(chunkSize * 2) + 3; // add some buffer of 3 for slice tests
|
||||
byte bytes[] = new byte[size];
|
||||
|
|
|
@ -65,7 +65,7 @@ public class TestWindowsMMap extends LuceneTestCase {
|
|||
// may take some time until the files are finally dereferenced. So clean the
|
||||
// directory up front, or otherwise new IndexWriter will fail.
|
||||
Path dirPath = createTempDir("testLuceneMmap");
|
||||
MMapDirectory dir = new MMapDirectory(dirPath, null);
|
||||
MMapDirectory dir = new MMapDirectory(dirPath);
|
||||
|
||||
// plan to add a set of useful stopwords, consider changing some of the
|
||||
// interior filters.
|
||||
|
|
|
@ -90,6 +90,7 @@ public class NativeUnixDirectory extends FSDirectory {
|
|||
/** Create a new NIOFSDirectory for the named location.
|
||||
*
|
||||
* @param path the path of the directory
|
||||
* @param lockFactory to use
|
||||
* @param mergeBufferSize Size of buffer to use for
|
||||
* merging. See {@link #DEFAULT_MERGE_BUFFER_SIZE}.
|
||||
* @param minBytesDirect Merges, or files to be opened for
|
||||
|
@ -99,8 +100,8 @@ public class NativeUnixDirectory extends FSDirectory {
|
|||
* @param delegate fallback Directory for non-merges
|
||||
* @throws IOException If there is a low-level I/O error
|
||||
*/
|
||||
public NativeUnixDirectory(Path path, int mergeBufferSize, long minBytesDirect, Directory delegate) throws IOException {
|
||||
super(path, delegate.getLockFactory());
|
||||
public NativeUnixDirectory(Path path, int mergeBufferSize, long minBytesDirect, LockFactory lockFactory, Directory delegate) throws IOException {
|
||||
super(path, lockFactory);
|
||||
if ((mergeBufferSize & ALIGN) != 0) {
|
||||
throw new IllegalArgumentException("mergeBufferSize must be 0 mod " + ALIGN + " (got: " + mergeBufferSize + ")");
|
||||
}
|
||||
|
@ -110,13 +111,24 @@ public class NativeUnixDirectory extends FSDirectory {
|
|||
}
|
||||
|
||||
/** Create a new NIOFSDirectory for the named location.
|
||||
*
|
||||
* @param path the path of the directory
|
||||
* @param lockFactory the lock factory to use
|
||||
* @param delegate fallback Directory for non-merges
|
||||
* @throws IOException If there is a low-level I/O error
|
||||
*/
|
||||
public NativeUnixDirectory(Path path, LockFactory lockFactory, Directory delegate) throws IOException {
|
||||
this(path, DEFAULT_MERGE_BUFFER_SIZE, DEFAULT_MIN_BYTES_DIRECT, lockFactory, delegate);
|
||||
}
|
||||
|
||||
/** Create a new NIOFSDirectory for the named location with {@link FSLockFactory#getDefault()}.
|
||||
*
|
||||
* @param path the path of the directory
|
||||
* @param delegate fallback Directory for non-merges
|
||||
* @throws IOException If there is a low-level I/O error
|
||||
*/
|
||||
public NativeUnixDirectory(Path path, Directory delegate) throws IOException {
|
||||
this(path, DEFAULT_MERGE_BUFFER_SIZE, DEFAULT_MIN_BYTES_DIRECT, delegate);
|
||||
this(path, DEFAULT_MERGE_BUFFER_SIZE, DEFAULT_MIN_BYTES_DIRECT, FSLockFactory.getDefault(), delegate);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -40,8 +40,7 @@ public class RAFDirectory extends FSDirectory {
|
|||
/** Create a new RAFDirectory for the named location.
|
||||
*
|
||||
* @param path the path of the directory
|
||||
* @param lockFactory the lock factory to use, or null for the default
|
||||
* ({@link NativeFSLockFactory});
|
||||
* @param lockFactory the lock factory to use
|
||||
* @throws IOException if there is a low-level I/O error
|
||||
*/
|
||||
public RAFDirectory(Path path, LockFactory lockFactory) throws IOException {
|
||||
|
@ -49,14 +48,13 @@ public class RAFDirectory extends FSDirectory {
|
|||
path.toFile(); // throw exception if we can't get a File
|
||||
}
|
||||
|
||||
/** Create a new SimpleFSDirectory for the named location and {@link NativeFSLockFactory}.
|
||||
/** Create a new SimpleFSDirectory for the named location and {@link FSLockFactory#getDefault()}.
|
||||
*
|
||||
* @param path the path of the directory
|
||||
* @throws IOException if there is a low-level I/O error
|
||||
*/
|
||||
public RAFDirectory(Path path) throws IOException {
|
||||
super(path, null);
|
||||
path.toFile(); // throw exception if we can't get a File
|
||||
this(path, FSLockFactory.getDefault());
|
||||
}
|
||||
|
||||
/** Creates an IndexInput for the file with the given name. */
|
||||
|
|
|
@ -22,7 +22,6 @@ import java.io.EOFException;
|
|||
import java.nio.file.Path;
|
||||
|
||||
import org.apache.lucene.store.Directory; // javadoc
|
||||
import org.apache.lucene.store.NativeFSLockFactory; // javadoc
|
||||
|
||||
/**
|
||||
* Native {@link Directory} implementation for Microsoft Windows.
|
||||
|
@ -52,21 +51,20 @@ public class WindowsDirectory extends FSDirectory {
|
|||
/** Create a new WindowsDirectory for the named location.
|
||||
*
|
||||
* @param path the path of the directory
|
||||
* @param lockFactory the lock factory to use, or null for the default
|
||||
* ({@link NativeFSLockFactory});
|
||||
* @param lockFactory the lock factory to use
|
||||
* @throws IOException If there is a low-level I/O error
|
||||
*/
|
||||
public WindowsDirectory(Path path, LockFactory lockFactory) throws IOException {
|
||||
super(path, lockFactory);
|
||||
}
|
||||
|
||||
/** Create a new WindowsDirectory for the named location and {@link NativeFSLockFactory}.
|
||||
/** Create a new WindowsDirectory for the named location and {@link FSLockFactory#getDefault()}.
|
||||
*
|
||||
* @param path the path of the directory
|
||||
* @throws IOException If there is a low-level I/O error
|
||||
*/
|
||||
public WindowsDirectory(Path path) throws IOException {
|
||||
super(path, null);
|
||||
this(path, FSLockFactory.getDefault());
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -317,28 +317,6 @@ public abstract class BaseCompoundFormatTestCase extends BaseIndexFileFormatTest
|
|||
dir.close();
|
||||
}
|
||||
|
||||
// test that cfs reader is read-only
|
||||
public void testClearLockDisabled() throws IOException {
|
||||
final String testfile = "_123.test";
|
||||
|
||||
Directory dir = newDirectory();
|
||||
IndexOutput out = dir.createOutput(testfile, IOContext.DEFAULT);
|
||||
out.writeInt(3);
|
||||
out.close();
|
||||
|
||||
SegmentInfo si = newSegmentInfo(dir, "_123");
|
||||
si.getCodec().compoundFormat().write(dir, si, Collections.<String>emptyList(), MergeState.CheckAbort.NONE, IOContext.DEFAULT);
|
||||
Directory cfs = si.getCodec().compoundFormat().getCompoundReader(dir, si, IOContext.DEFAULT);
|
||||
try {
|
||||
cfs.clearLock("foobar");
|
||||
fail("didn't get expected exception");
|
||||
} catch (UnsupportedOperationException expected) {
|
||||
// expected UOE
|
||||
}
|
||||
cfs.close();
|
||||
dir.close();
|
||||
}
|
||||
|
||||
/**
|
||||
* This test creates a compound file based on a large number of files of
|
||||
* various length. The file content is generated randomly. The sizes range
|
||||
|
|
|
@ -74,7 +74,7 @@ public class MockDirectoryWrapper extends BaseDirectoryWrapper {
|
|||
boolean assertNoDeleteOpenFile = false;
|
||||
boolean preventDoubleWrite = true;
|
||||
boolean trackDiskUsage = false;
|
||||
boolean wrapLockFactory = true;
|
||||
boolean wrapLocking = true;
|
||||
boolean useSlowOpenClosers = true;
|
||||
boolean enableVirusScanner = true;
|
||||
boolean allowRandomFileNotFoundException = true;
|
||||
|
@ -86,7 +86,6 @@ public class MockDirectoryWrapper extends BaseDirectoryWrapper {
|
|||
volatile boolean crashed;
|
||||
private ThrottledIndexOutput throttledOutput;
|
||||
private Throttling throttling = Throttling.SOMETIMES;
|
||||
protected LockFactory lockFactory;
|
||||
|
||||
final AtomicInteger inputCloneCount = new AtomicInteger();
|
||||
|
||||
|
@ -129,8 +128,6 @@ public class MockDirectoryWrapper extends BaseDirectoryWrapper {
|
|||
this.randomState = new Random(random.nextInt());
|
||||
this.throttledOutput = new ThrottledIndexOutput(ThrottledIndexOutput
|
||||
.mBitsToBytes(40 + randomState.nextInt(10)), 5 + randomState.nextInt(5), null);
|
||||
// force wrapping of lockfactory
|
||||
this.lockFactory = new MockLockFactoryWrapper(this, delegate.getLockFactory());
|
||||
init();
|
||||
}
|
||||
|
||||
|
@ -709,16 +706,16 @@ public class MockDirectoryWrapper extends BaseDirectoryWrapper {
|
|||
}
|
||||
|
||||
/**
|
||||
* Set to false if you want to return the pure lockfactory
|
||||
* and not wrap it with MockLockFactoryWrapper.
|
||||
* Set to false if you want to return the pure {@link LockFactory} and not
|
||||
* wrap all lock with {@code AssertingLock}.
|
||||
* <p>
|
||||
* Be careful if you turn this off: MockDirectoryWrapper might
|
||||
* no longer be able to detect if you forget to close an IndexWriter,
|
||||
* Be careful if you turn this off: {@code MockDirectoryWrapper} might
|
||||
* no longer be able to detect if you forget to close an {@link IndexWriter},
|
||||
* and spit out horribly scary confusing exceptions instead of
|
||||
* simply telling you that.
|
||||
*/
|
||||
public void setWrapLockFactory(boolean v) {
|
||||
this.wrapLockFactory = v;
|
||||
public void setAssertLocks(boolean v) {
|
||||
this.wrapLocking = v;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -987,39 +984,43 @@ public class MockDirectoryWrapper extends BaseDirectoryWrapper {
|
|||
@Override
|
||||
public synchronized Lock makeLock(String name) {
|
||||
maybeYield();
|
||||
return getLockFactory().makeLock(name);
|
||||
}
|
||||
|
||||
@Override
|
||||
public synchronized void clearLock(String name) throws IOException {
|
||||
maybeYield();
|
||||
getLockFactory().clearLock(name);
|
||||
}
|
||||
|
||||
@Override
|
||||
public synchronized void setLockFactory(LockFactory lockFactory) throws IOException {
|
||||
maybeYield();
|
||||
// sneaky: we must pass the original this way to the dir, because
|
||||
// some impls (e.g. FSDir) do instanceof here.
|
||||
in.setLockFactory(lockFactory);
|
||||
// now set our wrapped factory here
|
||||
this.lockFactory = new MockLockFactoryWrapper(this, lockFactory);
|
||||
}
|
||||
|
||||
@Override
|
||||
public synchronized LockFactory getLockFactory() {
|
||||
maybeYield();
|
||||
if (wrapLockFactory) {
|
||||
return lockFactory;
|
||||
if (wrapLocking) {
|
||||
return new AssertingLock(super.makeLock(name), name);
|
||||
} else {
|
||||
return in.getLockFactory();
|
||||
return super.makeLock(name);
|
||||
}
|
||||
}
|
||||
|
||||
private final class AssertingLock extends Lock {
|
||||
private final Lock delegateLock;
|
||||
private final String name;
|
||||
|
||||
AssertingLock(Lock delegate, String name) {
|
||||
this.delegateLock = delegate;
|
||||
this.name = name;
|
||||
}
|
||||
|
||||
@Override
|
||||
public synchronized String getLockID() {
|
||||
maybeYield();
|
||||
return in.getLockID();
|
||||
@Override
|
||||
public boolean obtain() throws IOException {
|
||||
if (delegateLock.obtain()) {
|
||||
assert delegateLock == NoLockFactory.SINGLETON_LOCK || !openLocks.containsKey(name);
|
||||
openLocks.put(name, new RuntimeException("lock \"" + name + "\" was not released"));
|
||||
return true;
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() throws IOException {
|
||||
delegateLock.close();
|
||||
openLocks.remove(name);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isLocked() throws IOException {
|
||||
return delegateLock.isLocked();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -1,92 +0,0 @@
|
|||
package org.apache.lucene.store;
|
||||
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
/**
|
||||
* Used by MockDirectoryWrapper to wrap another factory
|
||||
* and track open locks.
|
||||
*/
|
||||
public class MockLockFactoryWrapper extends LockFactory {
|
||||
MockDirectoryWrapper dir;
|
||||
LockFactory delegate;
|
||||
|
||||
public MockLockFactoryWrapper(MockDirectoryWrapper dir, LockFactory delegate) {
|
||||
this.dir = dir;
|
||||
this.delegate = delegate;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setLockPrefix(String lockPrefix) {
|
||||
delegate.setLockPrefix(lockPrefix);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getLockPrefix() {
|
||||
return delegate.getLockPrefix();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Lock makeLock(String lockName) {
|
||||
return new MockLock(delegate.makeLock(lockName), lockName);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void clearLock(String lockName) throws IOException {
|
||||
delegate.clearLock(lockName);
|
||||
dir.openLocks.remove(lockName);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "MockLockFactoryWrapper(" + delegate.toString() + ")";
|
||||
}
|
||||
|
||||
private class MockLock extends Lock {
|
||||
private Lock delegateLock;
|
||||
private String name;
|
||||
|
||||
MockLock(Lock delegate, String name) {
|
||||
this.delegateLock = delegate;
|
||||
this.name = name;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean obtain() throws IOException {
|
||||
if (delegateLock.obtain()) {
|
||||
assert (delegate instanceof NoLockFactory) || dir.openLocks.containsKey(name) == false;
|
||||
dir.openLocks.put(name, new RuntimeException("lock \"" + name + "\" was not released"));
|
||||
return true;
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() throws IOException {
|
||||
delegateLock.close();
|
||||
dir.openLocks.remove(name);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isLocked() throws IOException {
|
||||
return delegateLock.isLocked();
|
||||
}
|
||||
}
|
||||
}
|
|
@ -114,6 +114,7 @@ import org.apache.lucene.search.QueryUtils.FCInvisibleMultiReader;
|
|||
import org.apache.lucene.store.BaseDirectoryWrapper;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.store.FSDirectory;
|
||||
import org.apache.lucene.store.FSLockFactory;
|
||||
import org.apache.lucene.store.FlushInfo;
|
||||
import org.apache.lucene.store.IOContext;
|
||||
import org.apache.lucene.store.IOContext.Context;
|
||||
|
@ -1208,6 +1209,14 @@ public abstract class LuceneTestCase extends Assert {
|
|||
return wrapDirectory(r, newDirectoryImpl(r, TEST_DIRECTORY), rarely(r));
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns a new Directory instance, using the specified random.
|
||||
* See {@link #newDirectory()} for more information.
|
||||
*/
|
||||
public static BaseDirectoryWrapper newDirectory(Random r, LockFactory lf) {
|
||||
return wrapDirectory(r, newDirectoryImpl(r, TEST_DIRECTORY, lf), rarely(r));
|
||||
}
|
||||
|
||||
public static MockDirectoryWrapper newMockDirectory() {
|
||||
return newMockDirectory(random());
|
||||
}
|
||||
|
@ -1216,8 +1225,16 @@ public abstract class LuceneTestCase extends Assert {
|
|||
return (MockDirectoryWrapper) wrapDirectory(r, newDirectoryImpl(r, TEST_DIRECTORY), false);
|
||||
}
|
||||
|
||||
public static MockDirectoryWrapper newMockDirectory(Random r, LockFactory lf) {
|
||||
return (MockDirectoryWrapper) wrapDirectory(r, newDirectoryImpl(r, TEST_DIRECTORY, lf), false);
|
||||
}
|
||||
|
||||
public static MockDirectoryWrapper newMockFSDirectory(Path f) {
|
||||
return (MockDirectoryWrapper) newFSDirectory(f, null, false);
|
||||
return (MockDirectoryWrapper) newFSDirectory(f, FSLockFactory.getDefault(), false);
|
||||
}
|
||||
|
||||
public static MockDirectoryWrapper newMockFSDirectory(Path f, LockFactory lf) {
|
||||
return (MockDirectoryWrapper) newFSDirectory(f, lf, false);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -1231,7 +1248,7 @@ public abstract class LuceneTestCase extends Assert {
|
|||
|
||||
/** Returns a new FSDirectory instance over the given file, which must be a folder. */
|
||||
public static BaseDirectoryWrapper newFSDirectory(Path f) {
|
||||
return newFSDirectory(f, null);
|
||||
return newFSDirectory(f, FSLockFactory.getDefault());
|
||||
}
|
||||
|
||||
/** Returns a new FSDirectory instance over the given file, which must be a folder. */
|
||||
|
@ -1255,11 +1272,8 @@ public abstract class LuceneTestCase extends Assert {
|
|||
clazz = CommandLineUtil.loadFSDirectoryClass(fsdirClass);
|
||||
}
|
||||
|
||||
Directory fsdir = newFSDirectoryImpl(clazz, f);
|
||||
Directory fsdir = newFSDirectoryImpl(clazz, f, lf);
|
||||
BaseDirectoryWrapper wrapped = wrapDirectory(random(), fsdir, bare);
|
||||
if (lf != null) {
|
||||
wrapped.setLockFactory(lf);
|
||||
}
|
||||
return wrapped;
|
||||
} catch (Exception e) {
|
||||
Rethrow.rethrow(e);
|
||||
|
@ -1454,17 +1468,21 @@ public abstract class LuceneTestCase extends Assert {
|
|||
}
|
||||
}
|
||||
|
||||
private static Directory newFSDirectoryImpl(Class<? extends FSDirectory> clazz, Path path) throws IOException {
|
||||
private static Directory newFSDirectoryImpl(Class<? extends FSDirectory> clazz, Path path, LockFactory lf) throws IOException {
|
||||
FSDirectory d = null;
|
||||
try {
|
||||
d = CommandLineUtil.newFSDirectory(clazz, path);
|
||||
} catch (NoSuchMethodException | InstantiationException | IllegalAccessException | InvocationTargetException e) {
|
||||
d = CommandLineUtil.newFSDirectory(clazz, path, lf);
|
||||
} catch (ReflectiveOperationException e) {
|
||||
Rethrow.rethrow(e);
|
||||
}
|
||||
return d;
|
||||
}
|
||||
|
||||
static Directory newDirectoryImpl(Random random, String clazzName) {
|
||||
return newDirectoryImpl(random, clazzName, FSLockFactory.getDefault());
|
||||
}
|
||||
|
||||
static Directory newDirectoryImpl(Random random, String clazzName, LockFactory lf) {
|
||||
if (clazzName.equals("random")) {
|
||||
if (rarely(random)) {
|
||||
clazzName = RandomPicks.randomFrom(random, CORE_DIRECTORIES);
|
||||
|
@ -1478,21 +1496,27 @@ public abstract class LuceneTestCase extends Assert {
|
|||
// If it is a FSDirectory type, try its ctor(Path)
|
||||
if (FSDirectory.class.isAssignableFrom(clazz)) {
|
||||
final Path dir = createTempDir("index-" + clazzName);
|
||||
return newFSDirectoryImpl(clazz.asSubclass(FSDirectory.class), dir);
|
||||
return newFSDirectoryImpl(clazz.asSubclass(FSDirectory.class), dir, lf);
|
||||
}
|
||||
|
||||
// See if it has a Path ctor even though it's not an
|
||||
// See if it has a Path/LockFactory ctor even though it's not an
|
||||
// FSDir subclass:
|
||||
Constructor<? extends Directory> pathCtor = null;
|
||||
try {
|
||||
pathCtor = clazz.getConstructor(Path.class);
|
||||
Constructor<? extends Directory> pathCtor = clazz.getConstructor(Path.class, LockFactory.class);
|
||||
final Path dir = createTempDir("index");
|
||||
return pathCtor.newInstance(dir, lf);
|
||||
} catch (NoSuchMethodException nsme) {
|
||||
// Ignore
|
||||
}
|
||||
|
||||
if (pathCtor != null) {
|
||||
final Path dir = createTempDir("index");
|
||||
return pathCtor.newInstance(dir);
|
||||
|
||||
// the remaining dirs are no longer filesystem based, so we must check that the passedLockFactory is not file based:
|
||||
if (!(lf instanceof FSLockFactory)) {
|
||||
// try ctor with only LockFactory (e.g. RAMDirectory)
|
||||
try {
|
||||
return clazz.getConstructor(LockFactory.class).newInstance(lf);
|
||||
} catch (NoSuchMethodException nsme) {
|
||||
// Ignore
|
||||
}
|
||||
}
|
||||
|
||||
// try empty ctor
|
||||
|
|
|
@ -215,9 +215,9 @@ public final class TestUtil {
|
|||
* look for any other corruption. */
|
||||
public static CheckIndex.Status checkIndex(Directory dir, boolean crossCheckTermVectors, boolean failFast) throws IOException {
|
||||
ByteArrayOutputStream bos = new ByteArrayOutputStream(1024);
|
||||
// TODO: actually use the dir's lock factory, unless test uses a special method?
|
||||
// TODO: actually use the dir's locking, unless test uses a special method?
|
||||
// some tests e.g. exception tests become much more complicated if they have to close the writer
|
||||
try (CheckIndex checker = new CheckIndex(dir, NoLockFactory.getNoLockFactory().makeLock("bogus"))) {
|
||||
try (CheckIndex checker = new CheckIndex(dir, NoLockFactory.INSTANCE.makeLock(dir, "bogus"))) {
|
||||
checker.setCrossCheckTermVectors(crossCheckTermVectors);
|
||||
checker.setFailFast(failFast);
|
||||
checker.setInfoStream(new PrintStream(bos, false, IOUtils.UTF_8), false);
|
||||
|
|
|
@ -94,7 +94,7 @@ public class TreeMergeOutputFormat extends FileOutputFormat<Text, NullWritable>
|
|||
writeShardNumberFile(context);
|
||||
heartBeater.needHeartBeat();
|
||||
try {
|
||||
Directory mergedIndex = new HdfsDirectory(workDir, NoLockFactory.getNoLockFactory(), context.getConfiguration());
|
||||
Directory mergedIndex = new HdfsDirectory(workDir, NoLockFactory.INSTANCE, context.getConfiguration());
|
||||
|
||||
// TODO: shouldn't we pull the Version from the solrconfig.xml?
|
||||
IndexWriterConfig writerConfig = new IndexWriterConfig(null)
|
||||
|
@ -128,7 +128,7 @@ public class TreeMergeOutputFormat extends FileOutputFormat<Text, NullWritable>
|
|||
|
||||
Directory[] indexes = new Directory[shards.size()];
|
||||
for (int i = 0; i < shards.size(); i++) {
|
||||
indexes[i] = new HdfsDirectory(shards.get(i), NoLockFactory.getNoLockFactory(), context.getConfiguration());
|
||||
indexes[i] = new HdfsDirectory(shards.get(i), NoLockFactory.INSTANCE, context.getConfiguration());
|
||||
}
|
||||
|
||||
context.setStatus("Logically merging " + shards.size() + " shards into one shard");
|
||||
|
|
|
@ -347,7 +347,7 @@ public abstract class CachingDirectoryFactory extends DirectoryFactory {
|
|||
}
|
||||
|
||||
if (directory == null) {
|
||||
directory = create(fullPath, createLockFactory(fullPath, rawLockType), dirContext);
|
||||
directory = create(fullPath, createLockFactory(rawLockType), dirContext);
|
||||
boolean success = false;
|
||||
try {
|
||||
directory = rateLimit(directory);
|
||||
|
|
|
@ -81,11 +81,10 @@ public abstract class DirectoryFactory implements NamedListInitializedPlugin,
|
|||
|
||||
/**
|
||||
* Creates a new LockFactory for a given path.
|
||||
* @param lockPath the path of the index directory
|
||||
* @param rawLockType A string value as passed in config. Every factory should at least support 'none' to disable locking.
|
||||
* @throws IOException If there is a low-level I/O error.
|
||||
*/
|
||||
protected abstract LockFactory createLockFactory(String lockPath, String rawLockType) throws IOException;
|
||||
protected abstract LockFactory createLockFactory(String rawLockType) throws IOException;
|
||||
|
||||
/**
|
||||
* Returns true if a Directory exists for a given path.
|
||||
|
|
|
@ -17,25 +17,21 @@ package org.apache.solr.core;
|
|||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import java.io.File;
|
||||
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.net.URI;
|
||||
import java.net.URLEncoder;
|
||||
import java.util.Locale;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
|
||||
import static org.apache.hadoop.fs.CommonConfigurationKeys.HADOOP_SECURITY_AUTHENTICATION;
|
||||
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.security.UserGroupInformation;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.store.LockFactory;
|
||||
import org.apache.lucene.store.NRTCachingDirectory;
|
||||
import org.apache.lucene.store.NativeFSLockFactory;
|
||||
import org.apache.lucene.store.NoLockFactory;
|
||||
import org.apache.lucene.store.SimpleFSLockFactory;
|
||||
import org.apache.lucene.store.SingleInstanceLockFactory;
|
||||
import org.apache.solr.cloud.ZkController;
|
||||
import org.apache.solr.common.SolrException;
|
||||
|
@ -108,7 +104,7 @@ public class HdfsDirectoryFactory extends CachingDirectoryFactory {
|
|||
}
|
||||
|
||||
@Override
|
||||
protected LockFactory createLockFactory(String lockPath, String rawLockType) throws IOException {
|
||||
protected LockFactory createLockFactory(String rawLockType) throws IOException {
|
||||
if (null == rawLockType) {
|
||||
LOG.warn("No lockType configured, assuming 'hdfs'.");
|
||||
rawLockType = "hdfs";
|
||||
|
@ -116,11 +112,11 @@ public class HdfsDirectoryFactory extends CachingDirectoryFactory {
|
|||
final String lockType = rawLockType.toLowerCase(Locale.ROOT).trim();
|
||||
switch (lockType) {
|
||||
case "hdfs":
|
||||
return new HdfsLockFactory(new Path(lockPath), getConf());
|
||||
return HdfsLockFactory.INSTANCE;
|
||||
case "single":
|
||||
return new SingleInstanceLockFactory();
|
||||
case "none":
|
||||
return NoLockFactory.getNoLockFactory();
|
||||
return NoLockFactory.INSTANCE;
|
||||
default:
|
||||
throw new SolrException(SolrException.ErrorCode.SERVER_ERROR,
|
||||
"Unrecognized lockType: " + rawLockType);
|
||||
|
|
|
@ -49,7 +49,7 @@ public class MMapDirectoryFactory extends StandardDirectoryFactory {
|
|||
public void init(NamedList args) {
|
||||
super.init(args);
|
||||
SolrParams params = SolrParams.toSolrParams( args );
|
||||
maxChunk = params.getInt("maxChunkSize", MMapDirectory.DEFAULT_MAX_BUFF);
|
||||
maxChunk = params.getInt("maxChunkSize", MMapDirectory.DEFAULT_MAX_CHUNK_SIZE);
|
||||
if (maxChunk <= 0){
|
||||
throw new IllegalArgumentException("maxChunk must be greater than 0");
|
||||
}
|
||||
|
|
|
@ -36,7 +36,7 @@ public class RAMDirectoryFactory extends EphemeralDirectoryFactory {
|
|||
public static Logger LOG = LoggerFactory.getLogger(RAMDirectoryFactory.class);
|
||||
|
||||
@Override
|
||||
protected LockFactory createLockFactory(String lockPath, String rawLockType) throws IOException {
|
||||
protected LockFactory createLockFactory(String rawLockType) throws IOException {
|
||||
if (!(rawLockType == null || "single".equalsIgnoreCase(rawLockType.trim()))) {
|
||||
throw new SolrException(ErrorCode.FORBIDDEN,
|
||||
"RAMDirectory can only be used with the 'single' lock factory type.");
|
||||
|
@ -46,9 +46,7 @@ public class RAMDirectoryFactory extends EphemeralDirectoryFactory {
|
|||
|
||||
@Override
|
||||
protected Directory create(String path, LockFactory lockFactory, DirContext dirContext) throws IOException {
|
||||
final Directory dir = new RAMDirectory();
|
||||
dir.setLockFactory(lockFactory); // more or less a no-op, just for completeness
|
||||
return dir;
|
||||
return new RAMDirectory(lockFactory);
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -32,7 +32,6 @@ import org.apache.lucene.store.RateLimitedDirectoryWrapper;
|
|||
import org.apache.lucene.store.SimpleFSLockFactory;
|
||||
import org.apache.lucene.store.SingleInstanceLockFactory;
|
||||
import org.apache.solr.common.SolrException;
|
||||
import org.apache.solr.common.SolrException.ErrorCode;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
|
@ -55,7 +54,7 @@ public class StandardDirectoryFactory extends CachingDirectoryFactory {
|
|||
}
|
||||
|
||||
@Override
|
||||
protected LockFactory createLockFactory(String lockPath, String rawLockType) throws IOException {
|
||||
protected LockFactory createLockFactory(String rawLockType) throws IOException {
|
||||
if (null == rawLockType) {
|
||||
// we default to "native"
|
||||
log.warn("No lockType configured, assuming 'native'.");
|
||||
|
@ -64,13 +63,13 @@ public class StandardDirectoryFactory extends CachingDirectoryFactory {
|
|||
final String lockType = rawLockType.toLowerCase(Locale.ROOT).trim();
|
||||
switch (lockType) {
|
||||
case "simple":
|
||||
return new SimpleFSLockFactory(new File(lockPath).toPath());
|
||||
return SimpleFSLockFactory.INSTANCE;
|
||||
case "native":
|
||||
return new NativeFSLockFactory(new File(lockPath).toPath());
|
||||
return NativeFSLockFactory.INSTANCE;
|
||||
case "single":
|
||||
return new SingleInstanceLockFactory();
|
||||
case "none":
|
||||
return NoLockFactory.getNoLockFactory();
|
||||
return NoLockFactory.INSTANCE;
|
||||
default:
|
||||
throw new SolrException(SolrException.ErrorCode.SERVER_ERROR,
|
||||
"Unrecognized lockType: " + rawLockType);
|
||||
|
|
|
@ -32,6 +32,7 @@ import org.apache.lucene.index.IndexCommit;
|
|||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.store.FSDirectory;
|
||||
import org.apache.lucene.store.Lock;
|
||||
import org.apache.lucene.store.SimpleFSDirectory;
|
||||
import org.apache.lucene.store.SimpleFSLockFactory;
|
||||
import org.apache.solr.common.SolrException;
|
||||
import org.apache.solr.common.util.NamedList;
|
||||
|
@ -52,10 +53,9 @@ public class SnapShooter {
|
|||
private static final Logger LOG = LoggerFactory.getLogger(SnapShooter.class.getName());
|
||||
private String snapDir = null;
|
||||
private SolrCore solrCore;
|
||||
private SimpleFSLockFactory lockFactory;
|
||||
private String snapshotName = null;
|
||||
private String directoryName = null;
|
||||
private File snapShotDir = null;
|
||||
private FSDirectory snapShotDir = null;
|
||||
private Lock lock = null;
|
||||
|
||||
public SnapShooter(SolrCore core, String location, String snapshotName) {
|
||||
|
@ -67,11 +67,6 @@ public class SnapShooter {
|
|||
File dir = new File(snapDir);
|
||||
if (!dir.exists()) dir.mkdirs();
|
||||
}
|
||||
try {
|
||||
lockFactory = new SimpleFSLockFactory(new File(snapDir).toPath());
|
||||
} catch (IOException e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
this.snapshotName = snapshotName;
|
||||
|
||||
if(snapshotName != null) {
|
||||
|
@ -122,19 +117,20 @@ public class SnapShooter {
|
|||
}
|
||||
|
||||
void validateCreateSnapshot() throws IOException {
|
||||
Lock lock = lockFactory.makeLock(directoryName + ".lock");
|
||||
snapShotDir = new File(snapDir, directoryName);
|
||||
final File snapShotFile = new File(snapDir, directoryName);
|
||||
if (snapShotFile.exists()) {
|
||||
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
|
||||
"Snapshot directory already exists: " + snapShotFile.getAbsolutePath());
|
||||
}
|
||||
if (!snapShotFile.mkdirs()) {
|
||||
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
|
||||
"Unable to create snapshot directory: " + snapShotFile.getAbsolutePath());
|
||||
}
|
||||
snapShotDir = new SimpleFSDirectory(snapShotFile.toPath(), SimpleFSLockFactory.INSTANCE);
|
||||
Lock lock = snapShotDir.makeLock("write.lock");
|
||||
if (lock.isLocked()) {
|
||||
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
|
||||
"Unable to acquire lock for snapshot directory: " + snapShotDir.getAbsolutePath());
|
||||
}
|
||||
if (snapShotDir.exists()) {
|
||||
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
|
||||
"Snapshot directory already exists: " + snapShotDir.getAbsolutePath());
|
||||
}
|
||||
if (!snapShotDir.mkdirs()) {
|
||||
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
|
||||
"Unable to create snapshot directory: " + snapShotDir.getAbsolutePath());
|
||||
"Unable to acquire lock for snapshot directory: " + snapShotFile.getAbsolutePath());
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -146,11 +142,10 @@ public class SnapShooter {
|
|||
|
||||
try {
|
||||
Collection<String> files = indexCommit.getFileNames();
|
||||
FileCopier fileCopier = new FileCopier();
|
||||
|
||||
Directory dir = solrCore.getDirectoryFactory().get(solrCore.getIndexDir(), DirContext.DEFAULT, solrCore.getSolrConfig().indexConfig.lockType);
|
||||
try {
|
||||
fileCopier.copyFiles(dir, files, snapShotDir);
|
||||
copyFiles(dir, files, snapShotDir);
|
||||
} finally {
|
||||
solrCore.getDirectoryFactory().release(dir);
|
||||
}
|
||||
|
@ -161,7 +156,7 @@ public class SnapShooter {
|
|||
details.add("snapshotName", snapshotName);
|
||||
LOG.info("Done creating backup snapshot: " + (snapshotName == null ? "<not named>" : snapshotName));
|
||||
} catch (Exception e) {
|
||||
SnapPuller.delTree(snapShotDir);
|
||||
SnapPuller.delTree(snapShotDir.getDirectory().toFile());
|
||||
LOG.error("Exception while creating snapshot", e);
|
||||
details.add("snapShootException", e.getMessage());
|
||||
} finally {
|
||||
|
@ -245,35 +240,13 @@ public class SnapShooter {
|
|||
public static final String DATE_FMT = "yyyyMMddHHmmssSSS";
|
||||
|
||||
|
||||
private class FileCopier {
|
||||
|
||||
public void copyFiles(Directory sourceDir, Collection<String> files,
|
||||
File destDir) throws IOException {
|
||||
// does destinations directory exist ?
|
||||
if (destDir != null && !destDir.exists()) {
|
||||
destDir.mkdirs();
|
||||
}
|
||||
|
||||
FSDirectory dir = FSDirectory.open(destDir.toPath());
|
||||
try {
|
||||
for (String indexFile : files) {
|
||||
copyFile(sourceDir, indexFile, new File(destDir, indexFile), dir);
|
||||
}
|
||||
} finally {
|
||||
dir.close();
|
||||
}
|
||||
}
|
||||
|
||||
public void copyFile(Directory sourceDir, String indexFile, File destination, Directory destDir)
|
||||
throws IOException {
|
||||
|
||||
// make sure we can write to destination
|
||||
if (destination.exists() && !destination.canWrite()) {
|
||||
String message = "Unable to open file " + destination + " for writing.";
|
||||
throw new IOException(message);
|
||||
}
|
||||
|
||||
sourceDir.copy(destDir, indexFile, indexFile, DirectoryFactory.IOCONTEXT_NO_CACHE);
|
||||
private void copyFiles(Directory sourceDir, Collection<String> files, Directory destDir) throws IOException {
|
||||
for (String indexFile : files) {
|
||||
copyFile(sourceDir, indexFile, destDir);
|
||||
}
|
||||
}
|
||||
|
||||
private void copyFile(Directory sourceDir, String indexFile, Directory destDir) throws IOException {
|
||||
sourceDir.copy(destDir, indexFile, indexFile, DirectoryFactory.IOCONTEXT_NO_CACHE);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -20,17 +20,15 @@ package org.apache.solr.store.blockcache;
|
|||
import java.io.File;
|
||||
import java.io.FileNotFoundException;
|
||||
import java.io.IOException;
|
||||
import java.util.Collection;
|
||||
import java.util.Set;
|
||||
|
||||
import org.apache.lucene.index.IndexFileNames;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.store.FSDirectory;
|
||||
import org.apache.lucene.store.FilterDirectory;
|
||||
import org.apache.lucene.store.IOContext;
|
||||
import org.apache.lucene.store.IndexInput;
|
||||
import org.apache.lucene.store.IndexOutput;
|
||||
import org.apache.lucene.store.Lock;
|
||||
import org.apache.lucene.store.LockFactory;
|
||||
import org.apache.solr.store.hdfs.HdfsDirectory;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
@ -38,7 +36,7 @@ import org.slf4j.LoggerFactory;
|
|||
/**
|
||||
* @lucene.experimental
|
||||
*/
|
||||
public class BlockDirectory extends Directory {
|
||||
public class BlockDirectory extends FilterDirectory {
|
||||
public static Logger LOG = LoggerFactory.getLogger(BlockDirectory.class);
|
||||
|
||||
public static final long BLOCK_SHIFT = 13; // 2^13 = 8,192 bytes per block
|
||||
|
@ -86,7 +84,6 @@ public class BlockDirectory extends Directory {
|
|||
public void releaseResources() {}
|
||||
};
|
||||
|
||||
private final Directory directory;
|
||||
private final int blockSize;
|
||||
private final String dirName;
|
||||
private final Cache cache;
|
||||
|
@ -97,8 +94,8 @@ public class BlockDirectory extends Directory {
|
|||
public BlockDirectory(String dirName, Directory directory, Cache cache,
|
||||
Set<String> blockCacheFileTypes, boolean blockCacheReadEnabled,
|
||||
boolean blockCacheWriteEnabled) throws IOException {
|
||||
super(directory);
|
||||
this.dirName = dirName;
|
||||
this.directory = directory;
|
||||
blockSize = BLOCK_SIZE;
|
||||
this.cache = cache;
|
||||
if (blockCacheFileTypes == null || blockCacheFileTypes.isEmpty()) {
|
||||
|
@ -114,14 +111,11 @@ public class BlockDirectory extends Directory {
|
|||
if (!blockCacheWriteEnabled) {
|
||||
LOG.info("Block cache on write is disabled");
|
||||
}
|
||||
if (directory.getLockFactory() != null) {
|
||||
setLockFactory(directory.getLockFactory());
|
||||
}
|
||||
}
|
||||
|
||||
private IndexInput openInput(String name, int bufferSize, IOContext context)
|
||||
throws IOException {
|
||||
final IndexInput source = directory.openInput(name, context);
|
||||
final IndexInput source = super.openInput(name, context);
|
||||
if (useReadCache(name, context)) {
|
||||
return new CachedIndexInput(source, blockSize, name,
|
||||
getFileCacheName(name), cache, bufferSize);
|
||||
|
@ -241,7 +235,7 @@ public class BlockDirectory extends Directory {
|
|||
} catch (FileNotFoundException e) {
|
||||
// the local file system folder may be gone
|
||||
} finally {
|
||||
directory.close();
|
||||
super.close();
|
||||
cache.releaseResources();
|
||||
}
|
||||
}
|
||||
|
@ -251,24 +245,20 @@ public class BlockDirectory extends Directory {
|
|||
}
|
||||
|
||||
private long getFileModified(String name) throws IOException {
|
||||
if (directory instanceof FSDirectory) {
|
||||
File directory = ((FSDirectory) this.directory).getDirectory().toFile();
|
||||
if (in instanceof FSDirectory) {
|
||||
File directory = ((FSDirectory) in).getDirectory().toFile();
|
||||
File file = new File(directory, name);
|
||||
if (!file.exists()) {
|
||||
throw new FileNotFoundException("File [" + name + "] not found");
|
||||
}
|
||||
return file.lastModified();
|
||||
} else if (directory instanceof HdfsDirectory) {
|
||||
return ((HdfsDirectory) directory).fileModified(name);
|
||||
} else if (in instanceof HdfsDirectory) {
|
||||
return ((HdfsDirectory) in).fileModified(name);
|
||||
} else {
|
||||
throw new RuntimeException("Not supported");
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
}
|
||||
|
||||
public void clearLock(String name) throws IOException {
|
||||
directory.clearLock(name);
|
||||
}
|
||||
|
||||
String getFileCacheLocation(String name) {
|
||||
return dirName + "/" + name;
|
||||
}
|
||||
|
@ -282,42 +272,6 @@ public class BlockDirectory extends Directory {
|
|||
return cache;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void copy(Directory to, String src, String dest, IOContext context)
|
||||
throws IOException {
|
||||
directory.copy(to, src, dest, context);
|
||||
}
|
||||
|
||||
public LockFactory getLockFactory() {
|
||||
return directory.getLockFactory();
|
||||
}
|
||||
|
||||
public String getLockID() {
|
||||
return directory.getLockID();
|
||||
}
|
||||
|
||||
public Lock makeLock(String name) {
|
||||
return directory.makeLock(name);
|
||||
}
|
||||
|
||||
public void setLockFactory(LockFactory lockFactory) throws IOException {
|
||||
directory.setLockFactory(lockFactory);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void sync(Collection<String> names) throws IOException {
|
||||
directory.sync(names);
|
||||
}
|
||||
|
||||
// @SuppressWarnings("deprecation")
|
||||
// public void sync(String name) throws IOException {
|
||||
// _directory.sync(name);
|
||||
// }
|
||||
|
||||
public String toString() {
|
||||
return directory.toString();
|
||||
}
|
||||
|
||||
/**
|
||||
* Determine whether read caching should be used for a particular
|
||||
* file/context.
|
||||
|
@ -363,47 +317,18 @@ public class BlockDirectory extends Directory {
|
|||
@Override
|
||||
public IndexOutput createOutput(String name, IOContext context)
|
||||
throws IOException {
|
||||
IndexOutput dest = directory.createOutput(name, context);
|
||||
final IndexOutput dest = super.createOutput(name, context);
|
||||
if (useWriteCache(name, context)) {
|
||||
return new CachedIndexOutput(this, dest, blockSize, name, cache,
|
||||
blockSize);
|
||||
return new CachedIndexOutput(this, dest, blockSize, name, cache, blockSize);
|
||||
}
|
||||
return dest;
|
||||
}
|
||||
|
||||
public void deleteFile(String name) throws IOException {
|
||||
cache.delete(getFileCacheName(name));
|
||||
directory.deleteFile(name);
|
||||
super.deleteFile(name);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void renameFile(String source, String dest) throws IOException {
|
||||
directory.renameFile(source, dest);
|
||||
}
|
||||
|
||||
public long fileLength(String name) throws IOException {
|
||||
return directory.fileLength(name);
|
||||
}
|
||||
|
||||
// @SuppressWarnings("deprecation")
|
||||
// public long fileModified(String name) throws IOException {
|
||||
// return _directory.fileModified(name);
|
||||
// }
|
||||
|
||||
public String[] listAll() throws IOException {
|
||||
return directory.listAll();
|
||||
}
|
||||
|
||||
// @SuppressWarnings("deprecation")
|
||||
// public void touchFile(String name) throws IOException {
|
||||
// _directory.touchFile(name);
|
||||
// }
|
||||
|
||||
public Directory getDirectory() {
|
||||
return directory;
|
||||
}
|
||||
|
||||
|
||||
|
||||
public boolean isBlockCacheReadEnabled() {
|
||||
return blockCacheReadEnabled;
|
||||
}
|
||||
|
|
|
@ -45,15 +45,19 @@ public class HdfsDirectory extends BaseDirectory {
|
|||
public static final int BUFFER_SIZE = 8192;
|
||||
|
||||
private static final String LF_EXT = ".lf";
|
||||
protected Path hdfsDirPath;
|
||||
protected Configuration configuration;
|
||||
protected final Path hdfsDirPath;
|
||||
protected final Configuration configuration;
|
||||
|
||||
private final FileSystem fileSystem;
|
||||
private final FileContext fileContext;
|
||||
|
||||
public HdfsDirectory(Path hdfsDirPath, Configuration configuration) throws IOException {
|
||||
this(hdfsDirPath, HdfsLockFactory.INSTANCE, configuration);
|
||||
}
|
||||
|
||||
public HdfsDirectory(Path hdfsDirPath, LockFactory lockFactory, Configuration configuration)
|
||||
throws IOException {
|
||||
setLockFactory(lockFactory);
|
||||
super(lockFactory);
|
||||
this.hdfsDirPath = hdfsDirPath;
|
||||
this.configuration = configuration;
|
||||
fileSystem = FileSystem.newInstance(hdfsDirPath.toUri(), configuration);
|
||||
|
|
|
@ -25,6 +25,7 @@ import org.apache.hadoop.fs.FileAlreadyExistsException;
|
|||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.ipc.RemoteException;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.store.Lock;
|
||||
import org.apache.lucene.store.LockFactory;
|
||||
import org.apache.lucene.store.LockReleaseFailedException;
|
||||
|
@ -35,76 +36,24 @@ import org.slf4j.LoggerFactory;
|
|||
public class HdfsLockFactory extends LockFactory {
|
||||
public static Logger log = LoggerFactory.getLogger(HdfsLockFactory.class);
|
||||
|
||||
private Path lockPath;
|
||||
private Configuration configuration;
|
||||
public static final HdfsLockFactory INSTANCE = new HdfsLockFactory();
|
||||
|
||||
public HdfsLockFactory(Path lockPath, Configuration configuration) {
|
||||
this.lockPath = lockPath;
|
||||
this.configuration = configuration;
|
||||
}
|
||||
private HdfsLockFactory() {}
|
||||
|
||||
@Override
|
||||
public Lock makeLock(String lockName) {
|
||||
|
||||
if (lockPrefix != null) {
|
||||
lockName = lockPrefix + "-" + lockName;
|
||||
public Lock makeLock(Directory dir, String lockName) {
|
||||
if (!(dir instanceof HdfsDirectory)) {
|
||||
throw new UnsupportedOperationException("HdfsLockFactory can only be used with HdfsDirectory subclasses, got: " + dir);
|
||||
}
|
||||
|
||||
HdfsLock lock = new HdfsLock(lockPath, lockName, configuration);
|
||||
|
||||
return lock;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void clearLock(String lockName) throws IOException {
|
||||
FileSystem fs = null;
|
||||
try {
|
||||
fs = FileSystem.newInstance(lockPath.toUri(), configuration);
|
||||
while (true) {
|
||||
if (fs.exists(lockPath)) {
|
||||
if (lockPrefix != null) {
|
||||
lockName = lockPrefix + "-" + lockName;
|
||||
}
|
||||
|
||||
Path lockFile = new Path(lockPath, lockName);
|
||||
try {
|
||||
if (fs.exists(lockFile) && !fs.delete(lockFile, false)) {
|
||||
throw new IOException("Cannot delete " + lockFile);
|
||||
}
|
||||
} catch (RemoteException e) {
|
||||
if (e.getClassName().equals(
|
||||
"org.apache.hadoop.hdfs.server.namenode.SafeModeException")) {
|
||||
log.warn("The NameNode is in SafeMode - Solr will wait 5 seconds and try again.");
|
||||
try {
|
||||
Thread.sleep(5000);
|
||||
} catch (InterruptedException e1) {
|
||||
Thread.interrupted();
|
||||
}
|
||||
continue;
|
||||
}
|
||||
throw e;
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
} finally {
|
||||
IOUtils.closeQuietly(fs);
|
||||
}
|
||||
}
|
||||
|
||||
public Path getLockPath() {
|
||||
return lockPath;
|
||||
}
|
||||
|
||||
public void setLockPath(Path lockPath) {
|
||||
this.lockPath = lockPath;
|
||||
final HdfsDirectory hdfsDir = (HdfsDirectory) dir;
|
||||
return new HdfsLock(hdfsDir.getHdfsDirPath(), lockName, hdfsDir.getConfiguration());
|
||||
}
|
||||
|
||||
static class HdfsLock extends Lock {
|
||||
|
||||
private Path lockPath;
|
||||
private String lockName;
|
||||
private Configuration conf;
|
||||
private final Path lockPath;
|
||||
private final String lockName;
|
||||
private final Configuration conf;
|
||||
|
||||
public HdfsLock(Path lockPath, String lockName, Configuration conf) {
|
||||
this.lockPath = lockPath;
|
||||
|
|
|
@ -48,7 +48,7 @@ public class SolrCoreCheckLockOnStartupTest extends SolrTestCaseJ4 {
|
|||
@Test
|
||||
public void testSimpleLockErrorOnStartup() throws Exception {
|
||||
|
||||
Directory directory = newFSDirectory(new File(initCoreDataDir, "index").toPath(), new SimpleFSLockFactory());
|
||||
Directory directory = newFSDirectory(new File(initCoreDataDir, "index").toPath(), SimpleFSLockFactory.INSTANCE);
|
||||
//creates a new IndexWriter without releasing the lock yet
|
||||
IndexWriter indexWriter = new IndexWriter(directory, new IndexWriterConfig(null));
|
||||
|
||||
|
@ -74,7 +74,7 @@ public class SolrCoreCheckLockOnStartupTest extends SolrTestCaseJ4 {
|
|||
|
||||
File indexDir = new File(initCoreDataDir, "index");
|
||||
log.info("Acquiring lock on {}", indexDir.getAbsolutePath());
|
||||
Directory directory = newFSDirectory(indexDir.toPath(), new NativeFSLockFactory());
|
||||
Directory directory = newFSDirectory(indexDir.toPath(), NativeFSLockFactory.INSTANCE);
|
||||
//creates a new IndexWriter without releasing the lock yet
|
||||
IndexWriter indexWriter = new IndexWriter(directory, new IndexWriterConfig(null));
|
||||
|
||||
|
|
|
@ -74,7 +74,7 @@ public class HdfsDirectoryTest extends SolrTestCaseJ4 {
|
|||
Configuration conf = new Configuration();
|
||||
conf.set("dfs.permissions.enabled", "false");
|
||||
|
||||
directory = new HdfsDirectory(new Path(dfsCluster.getURI().toString() + createTempDir().toFile().getAbsolutePath() + "/hdfs"), NoLockFactory.getNoLockFactory(), conf);
|
||||
directory = new HdfsDirectory(new Path(dfsCluster.getURI().toString() + createTempDir().toFile().getAbsolutePath() + "/hdfs"), NoLockFactory.INSTANCE, conf);
|
||||
|
||||
random = random();
|
||||
}
|
||||
|
|
|
@ -65,8 +65,9 @@ public class HdfsLockFactoryTest extends SolrTestCaseJ4 {
|
|||
public void testBasic() throws IOException {
|
||||
URI uri = dfsCluster.getURI();
|
||||
Path lockPath = new Path(uri.toString(), "/basedir/lock");
|
||||
HdfsLockFactory lockFactory = new HdfsLockFactory(lockPath, new Configuration());
|
||||
Lock lock = lockFactory.makeLock("testlock");
|
||||
Configuration conf = new Configuration();
|
||||
HdfsDirectory dir = new HdfsDirectory(lockPath, conf);
|
||||
Lock lock = dir.makeLock("testlock");
|
||||
boolean success = lock.obtain();
|
||||
assertTrue("We could not get the lock when it should be available", success);
|
||||
success = lock.obtain();
|
||||
|
@ -76,6 +77,7 @@ public class HdfsLockFactoryTest extends SolrTestCaseJ4 {
|
|||
assertTrue("We could not get the lock when it should be available", success);
|
||||
success = lock.obtain();
|
||||
assertFalse("We got the lock but it should be unavailble", success);
|
||||
dir.close();
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -38,8 +38,8 @@ public class MockDirectoryFactory extends EphemeralDirectoryFactory {
|
|||
private boolean allowReadingFilesStillOpenForWrite = Boolean.getBoolean(SOLR_TESTS_ALLOW_READING_FILES_STILL_OPEN_FOR_WRITE);
|
||||
|
||||
@Override
|
||||
protected LockFactory createLockFactory(String lockPath, String rawLockType) throws IOException {
|
||||
return NoLockFactory.getNoLockFactory(); // dummy, actually unused
|
||||
protected LockFactory createLockFactory(String rawLockType) throws IOException {
|
||||
return NoLockFactory.INSTANCE; // dummy, actually unused
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
Loading…
Reference in New Issue