mirror of https://github.com/apache/lucene.git
LUCENE-6508: Simplify directory/lock API
git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1683606 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
44d4a8adb2
commit
fe6c3dc939
|
@ -54,6 +54,14 @@ New Features
|
|||
and queries, for fast "bbox/polygon contains lat/lon points" (Mike
|
||||
McCandless)
|
||||
|
||||
API Changes
|
||||
|
||||
* LUCENE-6508: Simplify Lock api, there is now just
|
||||
Directory.obtainLock() which returns a Lock that can be
|
||||
released (or fails with exception). Add lock verification
|
||||
to IndexWriter. Improve exception messages when locking fails.
|
||||
(Uwe Schindler, Mike McCandless, Robert Muir)
|
||||
|
||||
Bug fixes
|
||||
|
||||
* LUCENE-6500: ParallelCompositeReader did not always call
|
||||
|
|
|
@ -151,7 +151,7 @@ public class SimpleTextCompoundFormat extends CompoundFormat {
|
|||
public void renameFile(String source, String dest) { throw new UnsupportedOperationException(); }
|
||||
|
||||
@Override
|
||||
public Lock makeLock(String name) { throw new UnsupportedOperationException(); }
|
||||
public Lock obtainLock(String name) { throw new UnsupportedOperationException(); }
|
||||
};
|
||||
}
|
||||
|
||||
|
|
|
@ -179,7 +179,7 @@ final class Lucene50CompoundReader extends Directory {
|
|||
}
|
||||
|
||||
@Override
|
||||
public Lock makeLock(String name) {
|
||||
public Lock obtainLock(String name) {
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
|
||||
|
|
|
@ -47,7 +47,6 @@ import org.apache.lucene.store.FSDirectory;
|
|||
import org.apache.lucene.store.IOContext;
|
||||
import org.apache.lucene.store.IndexInput;
|
||||
import org.apache.lucene.store.Lock;
|
||||
import org.apache.lucene.store.LockObtainFailedException;
|
||||
import org.apache.lucene.util.Accountables;
|
||||
import org.apache.lucene.util.Bits;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
|
@ -356,7 +355,7 @@ public class CheckIndex implements Closeable {
|
|||
|
||||
/** Create a new CheckIndex on the directory. */
|
||||
public CheckIndex(Directory dir) throws IOException {
|
||||
this(dir, dir.makeLock(IndexWriter.WRITE_LOCK_NAME));
|
||||
this(dir, dir.obtainLock(IndexWriter.WRITE_LOCK_NAME));
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -370,9 +369,6 @@ public class CheckIndex implements Closeable {
|
|||
this.dir = dir;
|
||||
this.writeLock = writeLock;
|
||||
this.infoStream = null;
|
||||
if (!writeLock.obtain(IndexWriterConfig.WRITE_LOCK_TIMEOUT)) { // obtain write lock
|
||||
throw new LockObtainFailedException("Index locked for write: " + writeLock);
|
||||
}
|
||||
}
|
||||
|
||||
private void ensureOpen() {
|
||||
|
|
|
@ -95,6 +95,7 @@ import org.apache.lucene.util.InfoStream;
|
|||
*/
|
||||
|
||||
final class DocumentsWriter implements Closeable, Accountable {
|
||||
private final Directory directoryOrig; // no wrapping, for infos
|
||||
private final Directory directory;
|
||||
|
||||
private volatile boolean closed;
|
||||
|
@ -123,7 +124,8 @@ final class DocumentsWriter implements Closeable, Accountable {
|
|||
private final Queue<Event> events;
|
||||
|
||||
|
||||
DocumentsWriter(IndexWriter writer, LiveIndexWriterConfig config, Directory directory) {
|
||||
DocumentsWriter(IndexWriter writer, LiveIndexWriterConfig config, Directory directoryOrig, Directory directory) {
|
||||
this.directoryOrig = directoryOrig;
|
||||
this.directory = directory;
|
||||
this.config = config;
|
||||
this.infoStream = config.getInfoStream();
|
||||
|
@ -393,7 +395,7 @@ final class DocumentsWriter implements Closeable, Accountable {
|
|||
if (state.isActive() && state.dwpt == null) {
|
||||
final FieldInfos.Builder infos = new FieldInfos.Builder(
|
||||
writer.globalFieldNumberMap);
|
||||
state.dwpt = new DocumentsWriterPerThread(writer.newSegmentName(),
|
||||
state.dwpt = new DocumentsWriterPerThread(writer.newSegmentName(), directoryOrig,
|
||||
directory, config, infoStream, deleteQueue, infos,
|
||||
writer.pendingNumDocs, writer.enableTestPoints);
|
||||
}
|
||||
|
|
|
@ -158,9 +158,9 @@ class DocumentsWriterPerThread {
|
|||
private final LiveIndexWriterConfig indexWriterConfig;
|
||||
private final boolean enableTestPoints;
|
||||
|
||||
public DocumentsWriterPerThread(String segmentName, Directory directory, LiveIndexWriterConfig indexWriterConfig, InfoStream infoStream, DocumentsWriterDeleteQueue deleteQueue,
|
||||
public DocumentsWriterPerThread(String segmentName, Directory directoryOrig, Directory directory, LiveIndexWriterConfig indexWriterConfig, InfoStream infoStream, DocumentsWriterDeleteQueue deleteQueue,
|
||||
FieldInfos.Builder fieldInfos, AtomicLong pendingNumDocs, boolean enableTestPoints) throws IOException {
|
||||
this.directoryOrig = directory;
|
||||
this.directoryOrig = directoryOrig;
|
||||
this.directory = new TrackingDirectoryWrapper(directory);
|
||||
this.fieldInfos = fieldInfos;
|
||||
this.indexWriterConfig = indexWriterConfig;
|
||||
|
|
|
@ -102,8 +102,9 @@ final class IndexFileDeleter implements Closeable {
|
|||
private List<CommitPoint> commitsToDelete = new ArrayList<>();
|
||||
|
||||
private final InfoStream infoStream;
|
||||
private Directory directory;
|
||||
private IndexDeletionPolicy policy;
|
||||
private final Directory directoryOrig; // for commit point metadata
|
||||
private final Directory directory;
|
||||
private final IndexDeletionPolicy policy;
|
||||
|
||||
final boolean startingCommitDeleted;
|
||||
private SegmentInfos lastSegmentInfos;
|
||||
|
@ -126,7 +127,7 @@ final class IndexFileDeleter implements Closeable {
|
|||
* any files not referenced by any of the commits.
|
||||
* @throws IOException if there is a low-level IO error
|
||||
*/
|
||||
public IndexFileDeleter(Directory directory, IndexDeletionPolicy policy, SegmentInfos segmentInfos,
|
||||
public IndexFileDeleter(Directory directoryOrig, Directory directory, IndexDeletionPolicy policy, SegmentInfos segmentInfos,
|
||||
InfoStream infoStream, IndexWriter writer, boolean initialIndexExists) throws IOException {
|
||||
Objects.requireNonNull(writer);
|
||||
this.infoStream = infoStream;
|
||||
|
@ -139,6 +140,7 @@ final class IndexFileDeleter implements Closeable {
|
|||
}
|
||||
|
||||
this.policy = policy;
|
||||
this.directoryOrig = directoryOrig;
|
||||
this.directory = directory;
|
||||
|
||||
// First pass: walk the files and initialize our ref
|
||||
|
@ -165,7 +167,7 @@ final class IndexFileDeleter implements Closeable {
|
|||
}
|
||||
SegmentInfos sis = null;
|
||||
try {
|
||||
sis = SegmentInfos.readCommit(directory, fileName);
|
||||
sis = SegmentInfos.readCommit(directoryOrig, fileName);
|
||||
} catch (FileNotFoundException | NoSuchFileException e) {
|
||||
// LUCENE-948: on NFS (and maybe others), if
|
||||
// you have writers switching back and forth
|
||||
|
@ -179,7 +181,7 @@ final class IndexFileDeleter implements Closeable {
|
|||
}
|
||||
}
|
||||
if (sis != null) {
|
||||
final CommitPoint commitPoint = new CommitPoint(commitsToDelete, directory, sis);
|
||||
final CommitPoint commitPoint = new CommitPoint(commitsToDelete, directoryOrig, sis);
|
||||
if (sis.getGeneration() == segmentInfos.getGeneration()) {
|
||||
currentCommitPoint = commitPoint;
|
||||
}
|
||||
|
@ -205,14 +207,14 @@ final class IndexFileDeleter implements Closeable {
|
|||
// try now to explicitly open this commit point:
|
||||
SegmentInfos sis = null;
|
||||
try {
|
||||
sis = SegmentInfos.readCommit(directory, currentSegmentsFile);
|
||||
sis = SegmentInfos.readCommit(directoryOrig, currentSegmentsFile);
|
||||
} catch (IOException e) {
|
||||
throw new CorruptIndexException("unable to read current segments_N file", currentSegmentsFile, e);
|
||||
}
|
||||
if (infoStream.isEnabled("IFD")) {
|
||||
infoStream.message("IFD", "forced open of current segments file " + segmentInfos.getSegmentsFileName());
|
||||
}
|
||||
currentCommitPoint = new CommitPoint(commitsToDelete, directory, sis);
|
||||
currentCommitPoint = new CommitPoint(commitsToDelete, directoryOrig, sis);
|
||||
commits.add(currentCommitPoint);
|
||||
incRef(sis, true);
|
||||
}
|
||||
|
@ -557,7 +559,7 @@ final class IndexFileDeleter implements Closeable {
|
|||
|
||||
if (isCommit) {
|
||||
// Append to our commits list:
|
||||
commits.add(new CommitPoint(commitsToDelete, directory, segmentInfos));
|
||||
commits.add(new CommitPoint(commitsToDelete, directoryOrig, segmentInfos));
|
||||
|
||||
// Tell policy so it can remove commits:
|
||||
policy.onCommit(commits);
|
||||
|
@ -780,14 +782,14 @@ final class IndexFileDeleter implements Closeable {
|
|||
Collection<String> files;
|
||||
String segmentsFileName;
|
||||
boolean deleted;
|
||||
Directory directory;
|
||||
Directory directoryOrig;
|
||||
Collection<CommitPoint> commitsToDelete;
|
||||
long generation;
|
||||
final Map<String,String> userData;
|
||||
private final int segmentCount;
|
||||
|
||||
public CommitPoint(Collection<CommitPoint> commitsToDelete, Directory directory, SegmentInfos segmentInfos) throws IOException {
|
||||
this.directory = directory;
|
||||
public CommitPoint(Collection<CommitPoint> commitsToDelete, Directory directoryOrig, SegmentInfos segmentInfos) throws IOException {
|
||||
this.directoryOrig = directoryOrig;
|
||||
this.commitsToDelete = commitsToDelete;
|
||||
userData = segmentInfos.getUserData();
|
||||
segmentsFileName = segmentInfos.getSegmentsFileName();
|
||||
|
@ -818,7 +820,7 @@ final class IndexFileDeleter implements Closeable {
|
|||
|
||||
@Override
|
||||
public Directory getDirectory() {
|
||||
return directory;
|
||||
return directoryOrig;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -60,6 +60,7 @@ import org.apache.lucene.store.LockObtainFailedException;
|
|||
import org.apache.lucene.store.MergeInfo;
|
||||
import org.apache.lucene.store.RateLimitedIndexOutput;
|
||||
import org.apache.lucene.store.TrackingDirectoryWrapper;
|
||||
import org.apache.lucene.store.LockValidatingDirectoryWrapper;
|
||||
import org.apache.lucene.util.Accountable;
|
||||
import org.apache.lucene.util.Bits;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
|
@ -118,9 +119,7 @@ import org.apache.lucene.util.Version;
|
|||
|
||||
<p>Opening an <code>IndexWriter</code> creates a lock file for the directory in use. Trying to open
|
||||
another <code>IndexWriter</code> on the same directory will lead to a
|
||||
{@link LockObtainFailedException}. The {@link LockObtainFailedException}
|
||||
is also thrown if an IndexReader on the same directory is used to delete documents
|
||||
from the index.</p>
|
||||
{@link LockObtainFailedException}.</p>
|
||||
|
||||
<a name="deletionPolicy"></a>
|
||||
<p>Expert: <code>IndexWriter</code> allows an optional
|
||||
|
@ -254,8 +253,9 @@ public class IndexWriter implements Closeable, TwoPhaseCommit, Accountable {
|
|||
// when unrecoverable disaster strikes, we populate this with the reason that we had to close IndexWriter
|
||||
volatile Throwable tragedy;
|
||||
|
||||
private final Directory directory; // where this index resides
|
||||
private final Directory mergeDirectory; // used for merging
|
||||
private final Directory directoryOrig; // original user directory
|
||||
private final Directory directory; // wrapped with additional checks
|
||||
private final Directory mergeDirectory; // wrapped with throttling: used for merging
|
||||
private final Analyzer analyzer; // how to analyze text
|
||||
|
||||
private final AtomicLong changeCount = new AtomicLong(); // increments every time a change is completed
|
||||
|
@ -645,7 +645,7 @@ public class IndexWriter implements Closeable, TwoPhaseCommit, Accountable {
|
|||
// Make sure no new readers can be opened if another thread just closed us:
|
||||
ensureOpen(false);
|
||||
|
||||
assert info.info.dir == directory: "info.dir=" + info.info.dir + " vs " + directory;
|
||||
assert info.info.dir == directoryOrig: "info.dir=" + info.info.dir + " vs " + directoryOrig;
|
||||
|
||||
ReadersAndUpdates rld = readerMap.get(info);
|
||||
if (rld == null) {
|
||||
|
@ -754,15 +754,30 @@ public class IndexWriter implements Closeable, TwoPhaseCommit, Accountable {
|
|||
public IndexWriter(Directory d, IndexWriterConfig conf) throws IOException {
|
||||
conf.setIndexWriter(this); // prevent reuse by other instances
|
||||
config = conf;
|
||||
infoStream = config.getInfoStream();
|
||||
|
||||
directory = d;
|
||||
// obtain the write.lock. If the user configured a timeout,
|
||||
// we wrap with a sleeper and this might take some time.
|
||||
long timeout = config.getWriteLockTimeout();
|
||||
final Directory lockDir;
|
||||
if (timeout == 0) {
|
||||
// user doesn't want sleep/retries
|
||||
lockDir = d;
|
||||
} else {
|
||||
lockDir = new SleepingLockWrapper(d, timeout);
|
||||
}
|
||||
writeLock = lockDir.obtainLock(WRITE_LOCK_NAME);
|
||||
|
||||
boolean success = false;
|
||||
try {
|
||||
directoryOrig = d;
|
||||
directory = new LockValidatingDirectoryWrapper(d, writeLock);
|
||||
|
||||
// Directory we use for merging, so we can abort running merges, and so
|
||||
// merge schedulers can optionally rate-limit per-merge IO:
|
||||
mergeDirectory = addMergeRateLimiters(d);
|
||||
mergeDirectory = addMergeRateLimiters(directory);
|
||||
|
||||
analyzer = config.getAnalyzer();
|
||||
infoStream = config.getInfoStream();
|
||||
mergeScheduler = config.getMergeScheduler();
|
||||
mergeScheduler.setInfoStream(infoStream);
|
||||
codec = config.getCodec();
|
||||
|
@ -770,13 +785,6 @@ public class IndexWriter implements Closeable, TwoPhaseCommit, Accountable {
|
|||
bufferedUpdatesStream = new BufferedUpdatesStream(infoStream);
|
||||
poolReaders = config.getReaderPooling();
|
||||
|
||||
writeLock = directory.makeLock(WRITE_LOCK_NAME);
|
||||
|
||||
if (!writeLock.obtain(config.getWriteLockTimeout())) // obtain write lock
|
||||
throw new LockObtainFailedException("Index locked for write: " + writeLock);
|
||||
|
||||
boolean success = false;
|
||||
try {
|
||||
OpenMode mode = config.getOpenMode();
|
||||
boolean create;
|
||||
if (mode == OpenMode.CREATE) {
|
||||
|
@ -822,7 +830,7 @@ public class IndexWriter implements Closeable, TwoPhaseCommit, Accountable {
|
|||
|
||||
// Do not use SegmentInfos.read(Directory) since the spooky
|
||||
// retrying it does is not necessary here (we hold the write lock):
|
||||
segmentInfos = SegmentInfos.readCommit(directory, lastSegmentsFile);
|
||||
segmentInfos = SegmentInfos.readCommit(directoryOrig, lastSegmentsFile);
|
||||
|
||||
IndexCommit commit = config.getIndexCommit();
|
||||
if (commit != null) {
|
||||
|
@ -831,9 +839,9 @@ public class IndexWriter implements Closeable, TwoPhaseCommit, Accountable {
|
|||
// preserve write-once. This is important if
|
||||
// readers are open against the future commit
|
||||
// points.
|
||||
if (commit.getDirectory() != directory)
|
||||
throw new IllegalArgumentException("IndexCommit's directory doesn't match my directory");
|
||||
SegmentInfos oldInfos = SegmentInfos.readCommit(directory, commit.getSegmentsFileName());
|
||||
if (commit.getDirectory() != directoryOrig)
|
||||
throw new IllegalArgumentException("IndexCommit's directory doesn't match my directory, expected=" + directoryOrig + ", got=" + commit.getDirectory());
|
||||
SegmentInfos oldInfos = SegmentInfos.readCommit(directoryOrig, commit.getSegmentsFileName());
|
||||
segmentInfos.replace(oldInfos);
|
||||
changed();
|
||||
if (infoStream.isEnabled("IW")) {
|
||||
|
@ -848,13 +856,13 @@ public class IndexWriter implements Closeable, TwoPhaseCommit, Accountable {
|
|||
// start with previous field numbers, but new FieldInfos
|
||||
globalFieldNumberMap = getFieldNumberMap();
|
||||
config.getFlushPolicy().init(config);
|
||||
docWriter = new DocumentsWriter(this, config, directory);
|
||||
docWriter = new DocumentsWriter(this, config, directoryOrig, directory);
|
||||
eventQueue = docWriter.eventQueue();
|
||||
|
||||
// Default deleter (for backwards compatibility) is
|
||||
// KeepOnlyLastCommitDeleter:
|
||||
synchronized(this) {
|
||||
deleter = new IndexFileDeleter(directory,
|
||||
deleter = new IndexFileDeleter(directoryOrig, directory,
|
||||
config.getIndexDeletionPolicy(),
|
||||
segmentInfos, infoStream, this,
|
||||
initialIndexExists);
|
||||
|
@ -937,7 +945,7 @@ public class IndexWriter implements Closeable, TwoPhaseCommit, Accountable {
|
|||
private void messageState() {
|
||||
if (infoStream.isEnabled("IW") && didMessageState == false) {
|
||||
didMessageState = true;
|
||||
infoStream.message("IW", "\ndir=" + directory + "\n" +
|
||||
infoStream.message("IW", "\ndir=" + directoryOrig + "\n" +
|
||||
"index=" + segString() + "\n" +
|
||||
"version=" + Version.LATEST.toString() + "\n" +
|
||||
config.toString());
|
||||
|
@ -1036,7 +1044,8 @@ public class IndexWriter implements Closeable, TwoPhaseCommit, Accountable {
|
|||
|
||||
/** Returns the Directory used by this index. */
|
||||
public Directory getDirectory() {
|
||||
return directory;
|
||||
// return the original directory the user supplied, unwrapped.
|
||||
return directoryOrig;
|
||||
}
|
||||
|
||||
/** Returns the analyzer used by this index. */
|
||||
|
@ -2274,7 +2283,7 @@ public class IndexWriter implements Closeable, TwoPhaseCommit, Accountable {
|
|||
for(int i=0;i<dirs.length;i++) {
|
||||
if (dups.contains(dirs[i]))
|
||||
throw new IllegalArgumentException("Directory " + dirs[i] + " appears more than once");
|
||||
if (dirs[i] == directory)
|
||||
if (dirs[i] == directoryOrig)
|
||||
throw new IllegalArgumentException("Cannot add directory to itself");
|
||||
dups.add(dirs[i]);
|
||||
}
|
||||
|
@ -2288,13 +2297,13 @@ public class IndexWriter implements Closeable, TwoPhaseCommit, Accountable {
|
|||
for(int i=0;i<dirs.length;i++) {
|
||||
boolean success = false;
|
||||
try {
|
||||
Lock lock = dirs[i].makeLock(WRITE_LOCK_NAME);
|
||||
Lock lock = dirs[i].obtainLock(WRITE_LOCK_NAME);
|
||||
locks.add(lock);
|
||||
lock.obtain(config.getWriteLockTimeout());
|
||||
success = true;
|
||||
} finally {
|
||||
if (success == false) {
|
||||
// Release all previously acquired locks:
|
||||
// TODO: addSuppressed? it could be many...
|
||||
IOUtils.closeWhileHandlingException(locks);
|
||||
}
|
||||
}
|
||||
|
@ -2334,8 +2343,6 @@ public class IndexWriter implements Closeable, TwoPhaseCommit, Accountable {
|
|||
*
|
||||
* @throws CorruptIndexException if the index is corrupt
|
||||
* @throws IOException if there is a low-level IO error
|
||||
* @throws LockObtainFailedException if we were unable to
|
||||
* acquire the write lock in at least one directory
|
||||
* @throws IllegalArgumentException if addIndexes would cause
|
||||
* the index to exceed {@link #MAX_DOCS}
|
||||
*/
|
||||
|
@ -2496,7 +2503,7 @@ public class IndexWriter implements Closeable, TwoPhaseCommit, Accountable {
|
|||
// abortable so that IW.close(false) is able to stop it
|
||||
TrackingDirectoryWrapper trackingDir = new TrackingDirectoryWrapper(directory);
|
||||
|
||||
SegmentInfo info = new SegmentInfo(directory, Version.LATEST, mergedName, -1,
|
||||
SegmentInfo info = new SegmentInfo(directoryOrig, Version.LATEST, mergedName, -1,
|
||||
false, codec, Collections.emptyMap(), StringHelper.randomId(), new HashMap<>());
|
||||
|
||||
SegmentMerger merger = new SegmentMerger(Arrays.asList(readers), info, infoStream, trackingDir,
|
||||
|
@ -2600,7 +2607,7 @@ public class IndexWriter implements Closeable, TwoPhaseCommit, Accountable {
|
|||
|
||||
//System.out.println("copy seg=" + info.info.name + " version=" + info.info.getVersion());
|
||||
// Same SI as before but we change directory and name
|
||||
SegmentInfo newInfo = new SegmentInfo(directory, info.info.getVersion(), segName, info.info.maxDoc(),
|
||||
SegmentInfo newInfo = new SegmentInfo(directoryOrig, info.info.getVersion(), segName, info.info.maxDoc(),
|
||||
info.info.getUseCompoundFile(), info.info.getCodec(),
|
||||
info.info.getDiagnostics(), info.info.getId(), info.info.getAttributes());
|
||||
SegmentCommitInfo newInfoPerCommit = new SegmentCommitInfo(newInfo, info.getDelCount(), info.getDelGen(),
|
||||
|
@ -3075,7 +3082,7 @@ public class IndexWriter implements Closeable, TwoPhaseCommit, Accountable {
|
|||
private synchronized void ensureValidMerge(MergePolicy.OneMerge merge) {
|
||||
for(SegmentCommitInfo info : merge.segments) {
|
||||
if (!segmentInfos.contains(info)) {
|
||||
throw new MergePolicy.MergeException("MergePolicy selected a segment (" + info.info.name + ") that is not in the current index " + segString(), directory);
|
||||
throw new MergePolicy.MergeException("MergePolicy selected a segment (" + info.info.name + ") that is not in the current index " + segString(), directoryOrig);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -3599,7 +3606,7 @@ public class IndexWriter implements Closeable, TwoPhaseCommit, Accountable {
|
|||
}
|
||||
return false;
|
||||
}
|
||||
if (info.info.dir != directory) {
|
||||
if (info.info.dir != directoryOrig) {
|
||||
isExternal = true;
|
||||
}
|
||||
if (segmentsToMerge.containsKey(info)) {
|
||||
|
@ -3732,7 +3739,7 @@ public class IndexWriter implements Closeable, TwoPhaseCommit, Accountable {
|
|||
// ConcurrentMergePolicy we keep deterministic segment
|
||||
// names.
|
||||
final String mergeSegmentName = newSegmentName();
|
||||
SegmentInfo si = new SegmentInfo(directory, Version.LATEST, mergeSegmentName, -1, false, codec, Collections.emptyMap(), StringHelper.randomId(), new HashMap<>());
|
||||
SegmentInfo si = new SegmentInfo(directoryOrig, Version.LATEST, mergeSegmentName, -1, false, codec, Collections.emptyMap(), StringHelper.randomId(), new HashMap<>());
|
||||
Map<String,String> details = new HashMap<>();
|
||||
details.put("mergeMaxNumSegments", "" + merge.maxNumSegments);
|
||||
details.put("mergeFactor", Integer.toString(merge.segments.size()));
|
||||
|
@ -4365,9 +4372,17 @@ public class IndexWriter implements Closeable, TwoPhaseCommit, Accountable {
|
|||
* currently locked.
|
||||
* @param directory the directory to check for a lock
|
||||
* @throws IOException if there is a low-level IO error
|
||||
* @deprecated Use of this method can only lead to race conditions. Try
|
||||
* to actually obtain a lock instead.
|
||||
*/
|
||||
@Deprecated
|
||||
public static boolean isLocked(Directory directory) throws IOException {
|
||||
return directory.makeLock(WRITE_LOCK_NAME).isLocked();
|
||||
try {
|
||||
directory.obtainLock(WRITE_LOCK_NAME).close();
|
||||
return false;
|
||||
} catch (LockObtainFailedException failed) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
/** If {@link DirectoryReader#open(IndexWriter,boolean)} has
|
||||
|
|
|
@ -265,7 +265,8 @@ public final class IndexWriterConfig extends LiveIndexWriterConfig {
|
|||
/**
|
||||
* Sets the maximum time to wait for a write lock (in milliseconds) for this
|
||||
* instance. You can change the default value for all instances by calling
|
||||
* {@link #setDefaultWriteLockTimeout(long)}.
|
||||
* {@link #setDefaultWriteLockTimeout(long)}. Note that the value can be zero,
|
||||
* for no sleep/retry behavior.
|
||||
*
|
||||
* <p>Only takes effect when IndexWriter is first created. */
|
||||
public IndexWriterConfig setWriteLockTimeout(long writeLockTimeout) {
|
||||
|
|
|
@ -441,7 +441,6 @@ public final class SegmentInfos implements Cloneable, Iterable<SegmentCommitInfo
|
|||
segnOutput.writeInt(e.getKey());
|
||||
segnOutput.writeSetOfStrings(e.getValue());
|
||||
}
|
||||
assert si.dir == directory;
|
||||
}
|
||||
segnOutput.writeMapOfStrings(userData);
|
||||
CodecUtil.writeFooter(segnOutput);
|
||||
|
|
|
@ -0,0 +1,113 @@
|
|||
package org.apache.lucene.index;
|
||||
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.store.FilterDirectory;
|
||||
import org.apache.lucene.store.Lock;
|
||||
import org.apache.lucene.store.LockObtainFailedException;
|
||||
import org.apache.lucene.util.ThreadInterruptedException;
|
||||
|
||||
/**
|
||||
* Directory that wraps another, and that sleeps and retries
|
||||
* if obtaining the lock fails.
|
||||
* <p>
|
||||
* This is not a good idea.
|
||||
*/
|
||||
final class SleepingLockWrapper extends FilterDirectory {
|
||||
|
||||
/**
|
||||
* Pass this lockWaitTimeout to try forever to obtain the lock.
|
||||
*/
|
||||
public static final long LOCK_OBTAIN_WAIT_FOREVER = -1;
|
||||
|
||||
/**
|
||||
* How long {@link #obtainLock} waits, in milliseconds,
|
||||
* in between attempts to acquire the lock.
|
||||
*/
|
||||
public static long DEFAULT_POLL_INTERVAL = 1000;
|
||||
|
||||
private final long lockWaitTimeout;
|
||||
private final long pollInterval;
|
||||
|
||||
/**
|
||||
* Create a new SleepingLockFactory
|
||||
* @param delegate underlying directory to wrap
|
||||
* @param lockWaitTimeout length of time to wait in milliseconds
|
||||
* or {@link #LOCK_OBTAIN_WAIT_FOREVER} to retry forever.
|
||||
*/
|
||||
public SleepingLockWrapper(Directory delegate, long lockWaitTimeout) {
|
||||
this(delegate, lockWaitTimeout, DEFAULT_POLL_INTERVAL);
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a new SleepingLockFactory
|
||||
* @param delegate underlying directory to wrap
|
||||
* @param lockWaitTimeout length of time to wait in milliseconds
|
||||
* or {@link #LOCK_OBTAIN_WAIT_FOREVER} to retry forever.
|
||||
* @param pollInterval poll once per this interval in milliseconds until
|
||||
* {@code lockWaitTimeout} is exceeded.
|
||||
*/
|
||||
public SleepingLockWrapper(Directory delegate, long lockWaitTimeout, long pollInterval) {
|
||||
super(delegate);
|
||||
this.lockWaitTimeout = lockWaitTimeout;
|
||||
this.pollInterval = pollInterval;
|
||||
if (lockWaitTimeout < 0 && lockWaitTimeout != LOCK_OBTAIN_WAIT_FOREVER) {
|
||||
throw new IllegalArgumentException("lockWaitTimeout should be LOCK_OBTAIN_WAIT_FOREVER or a non-negative number (got " + lockWaitTimeout + ")");
|
||||
}
|
||||
if (pollInterval < 0) {
|
||||
throw new IllegalArgumentException("pollInterval must be a non-negative number (got " + pollInterval + ")");
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public Lock obtainLock(String lockName) throws IOException {
|
||||
LockObtainFailedException failureReason = null;
|
||||
long maxSleepCount = lockWaitTimeout / pollInterval;
|
||||
long sleepCount = 0;
|
||||
|
||||
do {
|
||||
try {
|
||||
return in.obtainLock(lockName);
|
||||
} catch (LockObtainFailedException failed) {
|
||||
if (failureReason == null) {
|
||||
failureReason = failed;
|
||||
}
|
||||
}
|
||||
try {
|
||||
Thread.sleep(pollInterval);
|
||||
} catch (InterruptedException ie) {
|
||||
throw new ThreadInterruptedException(ie);
|
||||
}
|
||||
} while (sleepCount++ < maxSleepCount || lockWaitTimeout == LOCK_OBTAIN_WAIT_FOREVER);
|
||||
|
||||
// we failed to obtain the lock in the required time
|
||||
String reason = "Lock obtain timed out: " + this.toString();
|
||||
if (failureReason != null) {
|
||||
reason += ": " + failureReason;
|
||||
}
|
||||
throw new LockObtainFailedException(reason, failureReason);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "SleepingLockWrapper(" + in + ")";
|
||||
}
|
||||
}
|
|
@ -17,6 +17,7 @@ package org.apache.lucene.store;
|
|||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
/**
|
||||
* Base implementation for a concrete {@link Directory} that uses a {@link LockFactory} for locking.
|
||||
|
@ -40,8 +41,8 @@ public abstract class BaseDirectory extends Directory {
|
|||
}
|
||||
|
||||
@Override
|
||||
public final Lock makeLock(String name) {
|
||||
return lockFactory.makeLock(this, name);
|
||||
public final Lock obtainLock(String name) throws IOException {
|
||||
return lockFactory.obtainLock(this, name);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -50,8 +50,7 @@ public abstract class Directory implements Closeable {
|
|||
public abstract String[] listAll() throws IOException;
|
||||
|
||||
/** Removes an existing file in the directory. */
|
||||
public abstract void deleteFile(String name)
|
||||
throws IOException;
|
||||
public abstract void deleteFile(String name) throws IOException;
|
||||
|
||||
/**
|
||||
* Returns the length of a file in the directory. This method follows the
|
||||
|
@ -110,10 +109,14 @@ public abstract class Directory implements Closeable {
|
|||
return new BufferedChecksumIndexInput(openInput(name, context));
|
||||
}
|
||||
|
||||
/** Construct a {@link Lock}.
|
||||
/**
|
||||
* Returns an obtained {@link Lock}.
|
||||
* @param name the name of the lock file
|
||||
* @throws LockObtainFailedException (optional specific exception) if the lock could
|
||||
* not be obtained because it is currently held elsewhere.
|
||||
* @throws IOException if any i/o error occurs attempting to gain the lock
|
||||
*/
|
||||
public abstract Lock makeLock(String name);
|
||||
public abstract Lock obtainLock(String name) throws IOException;
|
||||
|
||||
/** Closes the store. */
|
||||
@Override
|
||||
|
|
|
@ -17,6 +17,8 @@ package org.apache.lucene.store;
|
|||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
/**
|
||||
* Base class for file system based locking implementation.
|
||||
* This class is explicitly checking that the passed {@link Directory}
|
||||
|
@ -32,14 +34,17 @@ public abstract class FSLockFactory extends LockFactory {
|
|||
}
|
||||
|
||||
@Override
|
||||
public final Lock makeLock(Directory dir, String lockName) {
|
||||
public final Lock obtainLock(Directory dir, String lockName) throws IOException {
|
||||
if (!(dir instanceof FSDirectory)) {
|
||||
throw new UnsupportedOperationException(getClass().getSimpleName() + " can only be used with FSDirectory subclasses, got: " + dir);
|
||||
}
|
||||
return makeFSLock((FSDirectory) dir, lockName);
|
||||
return obtainFSLock((FSDirectory) dir, lockName);
|
||||
}
|
||||
|
||||
/** Implement this method to create a lock for a FSDirectory instance. */
|
||||
protected abstract Lock makeFSLock(FSDirectory dir, String lockName);
|
||||
/**
|
||||
* Implement this method to obtain a lock for a FSDirectory instance.
|
||||
* @throws IOException if the lock could not be obtained.
|
||||
*/
|
||||
protected abstract Lock obtainFSLock(FSDirectory dir, String lockName) throws IOException;
|
||||
|
||||
}
|
||||
|
|
|
@ -71,8 +71,8 @@ public class FileSwitchDirectory extends Directory {
|
|||
}
|
||||
|
||||
@Override
|
||||
public Lock makeLock(String name) {
|
||||
return getDirectory(name).makeLock(name);
|
||||
public Lock obtainLock(String name) throws IOException {
|
||||
return getDirectory(name).obtainLock(name);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -90,8 +90,8 @@ public class FilterDirectory extends Directory {
|
|||
}
|
||||
|
||||
@Override
|
||||
public Lock makeLock(String name) {
|
||||
return in.makeLock(name);
|
||||
public Lock obtainLock(String name) throws IOException {
|
||||
return in.obtainLock(name);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -20,126 +20,39 @@ package org.apache.lucene.store;
|
|||
import java.io.Closeable;
|
||||
import java.io.IOException;
|
||||
|
||||
import org.apache.lucene.util.ThreadInterruptedException;
|
||||
|
||||
/** An interprocess mutex lock.
|
||||
* <p>Typical use might look like:<pre class="prettyprint">
|
||||
* new Lock.With(directory.makeLock("my.lock")) {
|
||||
* public Object doBody() {
|
||||
* <i>... code to execute while locked ...</i>
|
||||
* try (final Lock lock = directory.obtainLock("my.lock")) {
|
||||
* // ... code to execute while locked ...
|
||||
* }
|
||||
* }.run();
|
||||
* </pre>
|
||||
*
|
||||
* @see Directory#makeLock(String)
|
||||
* @see Directory#obtainLock(String)
|
||||
*
|
||||
* @lucene.internal
|
||||
*/
|
||||
public abstract class Lock implements Closeable {
|
||||
|
||||
/** How long {@link #obtain(long)} waits, in milliseconds,
|
||||
* in between attempts to acquire the lock. */
|
||||
public static long LOCK_POLL_INTERVAL = 1000;
|
||||
|
||||
/** Pass this value to {@link #obtain(long)} to try
|
||||
* forever to obtain the lock. */
|
||||
public static final long LOCK_OBTAIN_WAIT_FOREVER = -1;
|
||||
|
||||
/** Attempts to obtain exclusive access and immediately return
|
||||
* upon success or failure. Use {@link #close} to
|
||||
* release the lock.
|
||||
* @return true iff exclusive access is obtained
|
||||
*/
|
||||
public abstract boolean obtain() throws IOException;
|
||||
|
||||
/**
|
||||
* If a lock obtain called, this failureReason may be set
|
||||
* with the "root cause" Exception as to why the lock was
|
||||
* not obtained.
|
||||
* Releases exclusive access.
|
||||
* <p>
|
||||
* Note that exceptions thrown from close may require
|
||||
* human intervention, as it may mean the lock was no
|
||||
* longer valid, or that fs permissions prevent removal
|
||||
* of the lock file, or other reasons.
|
||||
* <p>
|
||||
* {@inheritDoc}
|
||||
* @throws LockReleaseFailedException optional specific exception) if
|
||||
* the lock could not be properly released.
|
||||
*/
|
||||
protected Throwable failureReason;
|
||||
|
||||
/** Attempts to obtain an exclusive lock within amount of
|
||||
* time given. Polls once per {@link #LOCK_POLL_INTERVAL}
|
||||
* (currently 1000) milliseconds until lockWaitTimeout is
|
||||
* passed.
|
||||
* @param lockWaitTimeout length of time to wait in
|
||||
* milliseconds or {@link
|
||||
* #LOCK_OBTAIN_WAIT_FOREVER} to retry forever
|
||||
* @return true if lock was obtained
|
||||
* @throws LockObtainFailedException if lock wait times out
|
||||
* @throws IllegalArgumentException if lockWaitTimeout is
|
||||
* out of bounds
|
||||
* @throws IOException if obtain() throws IOException
|
||||
*/
|
||||
public final boolean obtain(long lockWaitTimeout) throws IOException {
|
||||
failureReason = null;
|
||||
boolean locked = obtain();
|
||||
if (lockWaitTimeout < 0 && lockWaitTimeout != LOCK_OBTAIN_WAIT_FOREVER)
|
||||
throw new IllegalArgumentException("lockWaitTimeout should be LOCK_OBTAIN_WAIT_FOREVER or a non-negative number (got " + lockWaitTimeout + ")");
|
||||
|
||||
long maxSleepCount = lockWaitTimeout / LOCK_POLL_INTERVAL;
|
||||
long sleepCount = 0;
|
||||
while (!locked) {
|
||||
if (lockWaitTimeout != LOCK_OBTAIN_WAIT_FOREVER && sleepCount++ >= maxSleepCount) {
|
||||
String reason = "Lock obtain timed out: " + this.toString();
|
||||
if (failureReason != null) {
|
||||
reason += ": " + failureReason;
|
||||
}
|
||||
throw new LockObtainFailedException(reason, failureReason);
|
||||
}
|
||||
try {
|
||||
Thread.sleep(LOCK_POLL_INTERVAL);
|
||||
} catch (InterruptedException ie) {
|
||||
throw new ThreadInterruptedException(ie);
|
||||
}
|
||||
locked = obtain();
|
||||
}
|
||||
return locked;
|
||||
}
|
||||
|
||||
/** Releases exclusive access. */
|
||||
public abstract void close() throws IOException;
|
||||
|
||||
/** Returns true if the resource is currently locked. Note that one must
|
||||
* still call {@link #obtain()} before using the resource. */
|
||||
public abstract boolean isLocked() throws IOException;
|
||||
|
||||
|
||||
/** Utility class for executing code with exclusive access. */
|
||||
public abstract static class With {
|
||||
private Lock lock;
|
||||
private long lockWaitTimeout;
|
||||
|
||||
|
||||
/** Constructs an executor that will grab the named lock. */
|
||||
public With(Lock lock, long lockWaitTimeout) {
|
||||
this.lock = lock;
|
||||
this.lockWaitTimeout = lockWaitTimeout;
|
||||
}
|
||||
|
||||
/** Code to execute with exclusive access. */
|
||||
protected abstract Object doBody() throws IOException;
|
||||
|
||||
/** Calls {@link #doBody} while <i>lock</i> is obtained. Blocks if lock
|
||||
* cannot be obtained immediately. Retries to obtain lock once per second
|
||||
* until it is obtained, or until it has tried ten times. Lock is released when
|
||||
* {@link #doBody} exits.
|
||||
* @throws LockObtainFailedException if lock could not
|
||||
* be obtained
|
||||
* @throws IOException if {@link Lock#obtain} throws IOException
|
||||
/**
|
||||
* Best effort check that this lock is still valid. Locks
|
||||
* could become invalidated externally for a number of reasons,
|
||||
* for example if a user deletes the lock file manually or
|
||||
* when a network filesystem is in use.
|
||||
* @throws IOException if the lock is no longer valid.
|
||||
*/
|
||||
public Object run() throws IOException {
|
||||
boolean locked = false;
|
||||
try {
|
||||
locked = lock.obtain(lockWaitTimeout);
|
||||
return doBody();
|
||||
} finally {
|
||||
if (locked) {
|
||||
lock.close();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public abstract void ensureValid() throws IOException;
|
||||
}
|
||||
|
|
|
@ -17,6 +17,7 @@ package org.apache.lucene.store;
|
|||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
/**
|
||||
* <p>Base class for Locking implementation. {@link Directory} uses
|
||||
|
@ -46,9 +47,12 @@ package org.apache.lucene.store;
|
|||
public abstract class LockFactory {
|
||||
|
||||
/**
|
||||
* Return a new Lock instance identified by lockName.
|
||||
* Return a new obtained Lock instance identified by lockName.
|
||||
* @param lockName name of the lock to be created.
|
||||
* @throws LockObtainFailedException (optional specific exception) if the lock could
|
||||
* not be obtained because it is currently held elsewhere.
|
||||
* @throws IOException if any i/o error occurs attempting to gain the lock
|
||||
*/
|
||||
public abstract Lock makeLock(Directory dir, String lockName);
|
||||
public abstract Lock obtainLock(Directory dir, String lockName) throws IOException;
|
||||
|
||||
}
|
||||
|
|
|
@ -24,7 +24,7 @@ import java.io.IOException;
|
|||
* could not be acquired. This
|
||||
* happens when a writer tries to open an index
|
||||
* that another writer already has open.
|
||||
* @see Lock#obtain(long)
|
||||
* @see LockFactory#obtainLock(Directory, String)
|
||||
*/
|
||||
public class LockObtainFailedException extends IOException {
|
||||
public LockObtainFailedException(String message) {
|
||||
|
|
|
@ -39,9 +39,11 @@ import org.apache.lucene.util.SuppressForbidden;
|
|||
|
||||
public class LockStressTest {
|
||||
|
||||
@SuppressForbidden(reason = "System.out required: command line tool")
|
||||
public static void main(String[] args) throws Exception {
|
||||
static final String LOCK_FILE_NAME = "test.lock";
|
||||
|
||||
@SuppressForbidden(reason = "System.out required: command line tool")
|
||||
@SuppressWarnings("try")
|
||||
public static void main(String[] args) throws Exception {
|
||||
if (args.length != 7) {
|
||||
System.out.println("Usage: java org.apache.lucene.store.LockStressTest myID verifierHost verifierPort lockFactoryClassName lockDirName sleepTimeMS count\n" +
|
||||
"\n" +
|
||||
|
@ -91,7 +93,6 @@ public class LockStressTest {
|
|||
out.write(myID);
|
||||
out.flush();
|
||||
LockFactory verifyLF = new VerifyingLockFactory(lockFactory, in, out);
|
||||
Lock l = verifyLF.makeLock(lockDir, "test.lock");
|
||||
final Random rnd = new Random();
|
||||
|
||||
// wait for starting gun
|
||||
|
@ -100,23 +101,20 @@ public class LockStressTest {
|
|||
}
|
||||
|
||||
for (int i = 0; i < count; i++) {
|
||||
boolean obtained = false;
|
||||
try {
|
||||
obtained = l.obtain(rnd.nextInt(100) + 10);
|
||||
} catch (LockObtainFailedException e) {}
|
||||
|
||||
if (obtained) {
|
||||
try (final Lock l = verifyLF.obtainLock(lockDir, LOCK_FILE_NAME)) {
|
||||
if (rnd.nextInt(10) == 0) {
|
||||
if (rnd.nextBoolean()) {
|
||||
verifyLF = new VerifyingLockFactory(getNewLockFactory(lockFactoryClassName), in, out);
|
||||
}
|
||||
final Lock secondLock = verifyLF.makeLock(lockDir, "test.lock");
|
||||
if (secondLock.obtain()) {
|
||||
throw new IOException("Double Obtain");
|
||||
try (final Lock secondLock = verifyLF.obtainLock(lockDir, LOCK_FILE_NAME)) {
|
||||
throw new IOException("Double obtain");
|
||||
} catch (LockObtainFailedException loe) {
|
||||
// pass
|
||||
}
|
||||
}
|
||||
Thread.sleep(sleepTimeMS);
|
||||
l.close();
|
||||
} catch (LockObtainFailedException loe) {
|
||||
// obtain failed
|
||||
}
|
||||
|
||||
if (i % 500 == 0) {
|
||||
|
|
|
@ -0,0 +1,64 @@
|
|||
package org.apache.lucene.store;
|
||||
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Collection;
|
||||
|
||||
/**
|
||||
* This class makes a best-effort check that a provided {@link Lock}
|
||||
* is valid before any destructive filesystem operation.
|
||||
*/
|
||||
public final class LockValidatingDirectoryWrapper extends FilterDirectory {
|
||||
private final Lock writeLock;
|
||||
|
||||
public LockValidatingDirectoryWrapper(Directory in, Lock writeLock) {
|
||||
super(in);
|
||||
this.writeLock = writeLock;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void deleteFile(String name) throws IOException {
|
||||
writeLock.ensureValid();
|
||||
in.deleteFile(name);
|
||||
}
|
||||
|
||||
@Override
|
||||
public IndexOutput createOutput(String name, IOContext context) throws IOException {
|
||||
writeLock.ensureValid();
|
||||
return in.createOutput(name, context);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void copyFrom(Directory from, String src, String dest, IOContext context) throws IOException {
|
||||
writeLock.ensureValid();
|
||||
in.copyFrom(from, src, dest, context);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void renameFile(String source, String dest) throws IOException {
|
||||
writeLock.ensureValid();
|
||||
in.renameFile(source, dest);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void sync(Collection<String> names) throws IOException {
|
||||
writeLock.ensureValid();
|
||||
in.sync(names);
|
||||
}
|
||||
}
|
|
@ -18,10 +18,12 @@ package org.apache.lucene.store;
|
|||
*/
|
||||
|
||||
import java.nio.channels.FileChannel;
|
||||
import java.nio.channels.OverlappingFileLockException;
|
||||
import java.nio.channels.FileLock;
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.Path;
|
||||
import java.nio.file.StandardOpenOption;
|
||||
import java.nio.file.attribute.BasicFileAttributes;
|
||||
import java.nio.file.attribute.FileTime;
|
||||
import java.io.IOException;
|
||||
import java.util.Collections;
|
||||
import java.util.HashSet;
|
||||
|
@ -77,136 +79,127 @@ public final class NativeFSLockFactory extends FSLockFactory {
|
|||
*/
|
||||
public static final NativeFSLockFactory INSTANCE = new NativeFSLockFactory();
|
||||
|
||||
private static final Set<String> LOCK_HELD = Collections.synchronizedSet(new HashSet<String>());
|
||||
|
||||
private NativeFSLockFactory() {}
|
||||
|
||||
@Override
|
||||
protected Lock makeFSLock(FSDirectory dir, String lockName) {
|
||||
return new NativeFSLock(dir.getDirectory(), lockName);
|
||||
}
|
||||
|
||||
static final class NativeFSLock extends Lock {
|
||||
|
||||
private final Path path;
|
||||
private final Path lockDir;
|
||||
private static final Set<String> LOCK_HELD = Collections.synchronizedSet(new HashSet<String>());
|
||||
|
||||
private FileChannel channel; // set when we have the lock
|
||||
private Path realPath; // unconditionally set in obtain(), for use in close()
|
||||
|
||||
public NativeFSLock(Path lockDir, String lockFileName) {
|
||||
this.lockDir = lockDir;
|
||||
path = lockDir.resolve(lockFileName);
|
||||
}
|
||||
|
||||
@Override
|
||||
public synchronized boolean obtain() throws IOException {
|
||||
|
||||
if (channel != null) {
|
||||
// Our instance is already locked:
|
||||
assert channel.isOpen();
|
||||
assert realPath != null;
|
||||
throw new LockObtainFailedException("this lock instance was already obtained");
|
||||
}
|
||||
protected Lock obtainFSLock(FSDirectory dir, String lockName) throws IOException {
|
||||
Path lockDir = dir.getDirectory();
|
||||
|
||||
// Ensure that lockDir exists and is a directory.
|
||||
// note: this will fail if lockDir is a symlink
|
||||
Files.createDirectories(lockDir);
|
||||
|
||||
Path lockFile = lockDir.resolve(lockName);
|
||||
|
||||
try {
|
||||
Files.createFile(path);
|
||||
Files.createFile(lockFile);
|
||||
} catch (IOException ignore) {
|
||||
// we must create the file to have a truly canonical path.
|
||||
// if it's already created, we don't care. if it cant be created, it will fail below.
|
||||
}
|
||||
realPath = path.toRealPath();
|
||||
// Make sure nobody else in-process has this lock held
|
||||
// already, and, mark it held if not:
|
||||
// This is a pretty crazy workaround for some documented
|
||||
// but yet awkward JVM behavior:
|
||||
//
|
||||
// On some systems, closing a channel releases all locks held by the Java virtual machine on the underlying file
|
||||
// regardless of whether the locks were acquired via that channel or via another channel open on the same file.
|
||||
// It is strongly recommended that, within a program, a unique channel be used to acquire all locks on any given
|
||||
// file.
|
||||
//
|
||||
// This essentially means if we close "A" channel for a given file all locks might be released... the odd part
|
||||
// is that we can't re-obtain the lock in the same JVM but from a different process if that happens. Nevertheless
|
||||
// this is super trappy. See LUCENE-5738
|
||||
boolean obtained = false;
|
||||
|
||||
// fails if the lock file does not exist
|
||||
final Path realPath = lockFile.toRealPath();
|
||||
|
||||
// used as a best-effort check, to see if the underlying file has changed
|
||||
final FileTime creationTime = Files.readAttributes(realPath, BasicFileAttributes.class).creationTime();
|
||||
|
||||
if (LOCK_HELD.add(realPath.toString())) {
|
||||
FileChannel ch = null;
|
||||
FileChannel channel = null;
|
||||
FileLock lock = null;
|
||||
try {
|
||||
ch = FileChannel.open(realPath, StandardOpenOption.CREATE, StandardOpenOption.WRITE);
|
||||
try {
|
||||
if (ch.tryLock() != null) {
|
||||
channel = ch;
|
||||
obtained = true;
|
||||
}
|
||||
} catch (IOException | OverlappingFileLockException e) {
|
||||
// At least on OS X, we will sometimes get an
|
||||
// intermittent "Permission Denied" IOException,
|
||||
// which seems to simply mean "you failed to get
|
||||
// the lock". But other IOExceptions could be
|
||||
// "permanent" (eg, locking is not supported via
|
||||
// the filesystem). So, we record the failure
|
||||
// reason here; the timeout obtain (usually the
|
||||
// one calling us) will use this as "root cause"
|
||||
// if it fails to get the lock.
|
||||
failureReason = e;
|
||||
channel = FileChannel.open(realPath, StandardOpenOption.CREATE, StandardOpenOption.WRITE);
|
||||
lock = channel.tryLock();
|
||||
if (lock != null) {
|
||||
return new NativeFSLock(lock, channel, realPath, creationTime);
|
||||
} else {
|
||||
throw new LockObtainFailedException("Lock held by another program: " + realPath);
|
||||
}
|
||||
} finally {
|
||||
if (obtained == false) { // not successful - clear up and move out
|
||||
IOUtils.closeWhileHandlingException(ch);
|
||||
if (lock == null) { // not successful - clear up and move out
|
||||
IOUtils.closeWhileHandlingException(channel); // TODO: addSuppressed
|
||||
clearLockHeld(realPath); // clear LOCK_HELD last
|
||||
}
|
||||
}
|
||||
} else {
|
||||
throw new LockObtainFailedException("Lock held by this virtual machine: " + realPath);
|
||||
}
|
||||
}
|
||||
|
||||
private static final void clearLockHeld(Path path) throws IOException {
|
||||
boolean remove = LOCK_HELD.remove(path.toString());
|
||||
if (remove == false) {
|
||||
throw new AlreadyClosedException("Lock path was cleared but never marked as held: " + path);
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: kind of bogus we even pass channel:
|
||||
// FileLock has an accessor, but mockfs doesnt yet mock the locks, too scary atm.
|
||||
|
||||
static final class NativeFSLock extends Lock {
|
||||
final FileLock lock;
|
||||
final FileChannel channel;
|
||||
final Path path;
|
||||
final FileTime creationTime;
|
||||
volatile boolean closed;
|
||||
|
||||
NativeFSLock(FileLock lock, FileChannel channel, Path path, FileTime creationTime) {
|
||||
this.lock = lock;
|
||||
this.channel = channel;
|
||||
this.path = path;
|
||||
this.creationTime = creationTime;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void ensureValid() throws IOException {
|
||||
if (closed) {
|
||||
throw new AlreadyClosedException("Lock instance already released: " + this);
|
||||
}
|
||||
// check we are still in the locks map (some debugger or something crazy didn't remove us)
|
||||
if (!LOCK_HELD.contains(path.toString())) {
|
||||
throw new AlreadyClosedException("Lock path unexpectedly cleared from map: " + this);
|
||||
}
|
||||
// check our lock wasn't invalidated.
|
||||
if (!lock.isValid()) {
|
||||
throw new AlreadyClosedException("FileLock invalidated by an external force: " + this);
|
||||
}
|
||||
// try to validate the underlying file descriptor.
|
||||
// this will throw IOException if something is wrong.
|
||||
long size = channel.size();
|
||||
if (size != 0) {
|
||||
throw new AlreadyClosedException("Unexpected lock file size: " + size + ", (lock=" + this + ")");
|
||||
}
|
||||
// try to validate the backing file name, that it still exists,
|
||||
// and has the same creation time as when we obtained the lock.
|
||||
// if it differs, someone deleted our lock file (and we are ineffective)
|
||||
FileTime ctime = Files.readAttributes(path, BasicFileAttributes.class).creationTime();
|
||||
if (!creationTime.equals(ctime)) {
|
||||
throw new AlreadyClosedException("Underlying file changed by an external force at " + creationTime + ", (lock=" + this + ")");
|
||||
}
|
||||
return obtained;
|
||||
}
|
||||
|
||||
@Override
|
||||
public synchronized void close() throws IOException {
|
||||
if (channel != null) {
|
||||
try {
|
||||
IOUtils.close(channel);
|
||||
if (closed) {
|
||||
return;
|
||||
}
|
||||
// NOTE: we don't validate, as unlike SimpleFSLockFactory, we can't break others locks
|
||||
// first release the lock, then the channel
|
||||
try (FileChannel channel = this.channel;
|
||||
FileLock lock = this.lock) {
|
||||
assert lock != null;
|
||||
assert channel != null;
|
||||
} finally {
|
||||
channel = null;
|
||||
clearLockHeld(realPath); // clear LOCK_HELD last
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private static final void clearLockHeld(Path path) {
|
||||
boolean remove = LOCK_HELD.remove(path.toString());
|
||||
assert remove : "Lock was cleared but never marked as held";
|
||||
}
|
||||
|
||||
@Override
|
||||
public synchronized boolean isLocked() {
|
||||
// The test for is isLocked is not directly possible with native file locks:
|
||||
|
||||
// First a shortcut, if a lock reference in this instance is available
|
||||
if (channel != null) {
|
||||
return true;
|
||||
}
|
||||
|
||||
// Look if lock file is definitely not present; if not, there can definitely be no lock!
|
||||
if (Files.notExists(path)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Try to obtain and release (if was locked) the lock
|
||||
try {
|
||||
boolean obtained = obtain();
|
||||
if (obtained) close();
|
||||
return !obtained;
|
||||
} catch (IOException ioe) {
|
||||
return false;
|
||||
closed = true;
|
||||
clearLockHeld(path);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "NativeFSLock@" + path;
|
||||
return "NativeFSLock(path=" + path + ",impl=" + lock + ",ctime=" + creationTime + ")";
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -37,23 +37,17 @@ public final class NoLockFactory extends LockFactory {
|
|||
private NoLockFactory() {}
|
||||
|
||||
@Override
|
||||
public Lock makeLock(Directory dir, String lockName) {
|
||||
public Lock obtainLock(Directory dir, String lockName) {
|
||||
return SINGLETON_LOCK;
|
||||
}
|
||||
|
||||
private static class NoLock extends Lock {
|
||||
@Override
|
||||
public boolean obtain() throws IOException {
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() {
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isLocked() {
|
||||
return false;
|
||||
public void ensureValid() throws IOException {
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -61,5 +55,4 @@ public final class NoLockFactory extends LockFactory {
|
|||
return "NoLock";
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -17,26 +17,24 @@ package org.apache.lucene.store;
|
|||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import java.io.File;
|
||||
import java.io.IOException;
|
||||
import java.nio.file.AccessDeniedException;
|
||||
import java.nio.file.FileAlreadyExistsException;
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.Path;
|
||||
import java.nio.file.attribute.BasicFileAttributes;
|
||||
import java.nio.file.attribute.FileTime;
|
||||
|
||||
/**
|
||||
* <p>Implements {@link LockFactory} using {@link
|
||||
* Files#createFile}.</p>
|
||||
*
|
||||
* <p><b>NOTE:</b> the {@linkplain File#createNewFile() javadocs
|
||||
* for <code>File.createNewFile()</code>} contain a vague
|
||||
* yet spooky warning about not using the API for file
|
||||
* locking. This warning was added due to <a target="_top"
|
||||
* href="http://bugs.sun.com/bugdatabase/view_bug.do?bug_id=4676183">this
|
||||
* bug</a>, and in fact the only known problem with using
|
||||
* this API for locking is that the Lucene write lock may
|
||||
* not be released when the JVM exits abnormally.</p>
|
||||
|
||||
* <p>When this happens, a {@link LockObtainFailedException}
|
||||
* is hit when trying to create a writer, in which case you
|
||||
* <p>The main downside with using this API for locking is
|
||||
* that the Lucene write lock may not be released when
|
||||
* the JVM exits abnormally.</p>
|
||||
*
|
||||
* <p>When this happens, an {@link LockObtainFailedException}
|
||||
* is hit when trying to create a writer, in which case you may
|
||||
* need to explicitly clear the lock file first by
|
||||
* manually removing the file. But, first be certain that
|
||||
* no writer is in fact writing to the index otherwise you
|
||||
|
@ -70,66 +68,83 @@ public final class SimpleFSLockFactory extends FSLockFactory {
|
|||
private SimpleFSLockFactory() {}
|
||||
|
||||
@Override
|
||||
protected Lock makeFSLock(FSDirectory dir, String lockName) {
|
||||
return new SimpleFSLock(dir.getDirectory(), lockName);
|
||||
}
|
||||
protected Lock obtainFSLock(FSDirectory dir, String lockName) throws IOException {
|
||||
Path lockDir = dir.getDirectory();
|
||||
|
||||
static class SimpleFSLock extends Lock {
|
||||
|
||||
Path lockFile;
|
||||
Path lockDir;
|
||||
boolean obtained = false;
|
||||
|
||||
public SimpleFSLock(Path lockDir, String lockFileName) {
|
||||
this.lockDir = lockDir;
|
||||
lockFile = lockDir.resolve(lockFileName);
|
||||
}
|
||||
|
||||
@Override
|
||||
public synchronized boolean obtain() throws IOException {
|
||||
if (obtained) {
|
||||
// Our instance is already locked:
|
||||
throw new LockObtainFailedException("this lock instance was already obtained");
|
||||
}
|
||||
|
||||
try {
|
||||
// Ensure that lockDir exists and is a directory.
|
||||
// note: this will fail if lockDir is a symlink
|
||||
Files.createDirectories(lockDir);
|
||||
Files.createFile(lockFile);
|
||||
obtained = true;
|
||||
} catch (IOException ioe) {
|
||||
// On Windows, on concurrent createNewFile, the 2nd process gets "access denied".
|
||||
// In that case, the lock was not aquired successfully, so return false.
|
||||
// We record the failure reason here; the obtain with timeout (usually the
|
||||
// one calling us) will use this as "root cause" if it fails to get the lock.
|
||||
failureReason = ioe;
|
||||
}
|
||||
|
||||
return obtained;
|
||||
}
|
||||
Path lockFile = lockDir.resolve(lockName);
|
||||
|
||||
@Override
|
||||
public synchronized void close() throws LockReleaseFailedException {
|
||||
// TODO: wierd that clearLock() throws the raw IOException...
|
||||
if (obtained) {
|
||||
// create the file: this will fail if it already exists
|
||||
try {
|
||||
Files.deleteIfExists(lockFile);
|
||||
} catch (Throwable cause) {
|
||||
throw new LockReleaseFailedException("failed to delete " + lockFile, cause);
|
||||
} finally {
|
||||
obtained = false;
|
||||
Files.createFile(lockFile);
|
||||
} catch (FileAlreadyExistsException | AccessDeniedException e) {
|
||||
// convert optional specific exception to our optional specific exception
|
||||
throw new LockObtainFailedException("Lock held elsewhere: " + lockFile, e);
|
||||
}
|
||||
|
||||
// used as a best-effort check, to see if the underlying file has changed
|
||||
final FileTime creationTime = Files.readAttributes(lockFile, BasicFileAttributes.class).creationTime();
|
||||
|
||||
return new SimpleFSLock(lockFile, creationTime);
|
||||
}
|
||||
|
||||
static final class SimpleFSLock extends Lock {
|
||||
private final Path path;
|
||||
private final FileTime creationTime;
|
||||
private volatile boolean closed;
|
||||
|
||||
SimpleFSLock(Path path, FileTime creationTime) throws IOException {
|
||||
this.path = path;
|
||||
this.creationTime = creationTime;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void ensureValid() throws IOException {
|
||||
if (closed) {
|
||||
throw new AlreadyClosedException("Lock instance already released: " + this);
|
||||
}
|
||||
// try to validate the backing file name, that it still exists,
|
||||
// and has the same creation time as when we obtained the lock.
|
||||
// if it differs, someone deleted our lock file (and we are ineffective)
|
||||
FileTime ctime = Files.readAttributes(path, BasicFileAttributes.class).creationTime();
|
||||
if (!creationTime.equals(ctime)) {
|
||||
throw new AlreadyClosedException("Underlying file changed by an external force at " + creationTime + ", (lock=" + this + ")");
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isLocked() {
|
||||
return Files.exists(lockFile);
|
||||
public synchronized void close() throws IOException {
|
||||
if (closed) {
|
||||
return;
|
||||
}
|
||||
try {
|
||||
// NOTE: unlike NativeFSLockFactory, we can potentially delete someone else's
|
||||
// lock if things have gone wrong. we do best-effort check (ensureValid) to
|
||||
// avoid doing this.
|
||||
try {
|
||||
ensureValid();
|
||||
} catch (Throwable exc) {
|
||||
// notify the user they may need to intervene.
|
||||
throw new LockReleaseFailedException("Lock file cannot be safely removed. Manual intervention is recommended.", exc);
|
||||
}
|
||||
// we did a best effort check, now try to remove the file. if something goes wrong,
|
||||
// we need to make it clear to the user that the directory may still remain locked.
|
||||
try {
|
||||
Files.delete(path);
|
||||
} catch (Throwable exc) {
|
||||
throw new LockReleaseFailedException("Unable to remove lock file. Manual intervention is recommended", exc);
|
||||
}
|
||||
} finally {
|
||||
closed = true;
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "SimpleFSLock@" + lockFile;
|
||||
return "SimpleFSLock(path=" + path + ",ctime=" + creationTime + ")";
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -24,7 +24,7 @@ import java.util.HashSet;
|
|||
* Implements {@link LockFactory} for a single in-process instance,
|
||||
* meaning all locking will take place through this one instance.
|
||||
* Only use this {@link LockFactory} when you are certain all
|
||||
* IndexReaders and IndexWriters for a given index are running
|
||||
* IndexWriters for a given index are running
|
||||
* against a single shared in-process Directory instance. This is
|
||||
* currently the default locking for RAMDirectory.
|
||||
*
|
||||
|
@ -33,51 +33,53 @@ import java.util.HashSet;
|
|||
|
||||
public final class SingleInstanceLockFactory extends LockFactory {
|
||||
|
||||
private final HashSet<String> locks = new HashSet<>();
|
||||
final HashSet<String> locks = new HashSet<>();
|
||||
|
||||
@Override
|
||||
public Lock makeLock(Directory dir, String lockName) {
|
||||
return new SingleInstanceLock(locks, lockName);
|
||||
public Lock obtainLock(Directory dir, String lockName) throws IOException {
|
||||
synchronized (locks) {
|
||||
if (locks.add(lockName)) {
|
||||
return new SingleInstanceLock(lockName);
|
||||
} else {
|
||||
throw new LockObtainFailedException("lock instance already obtained: (dir=" + dir + ", lockName=" + lockName + ")");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private static class SingleInstanceLock extends Lock {
|
||||
|
||||
private class SingleInstanceLock extends Lock {
|
||||
private final String lockName;
|
||||
private final HashSet<String> locks;
|
||||
private boolean obtained = false;
|
||||
private volatile boolean closed;
|
||||
|
||||
public SingleInstanceLock(HashSet<String> locks, String lockName) {
|
||||
this.locks = locks;
|
||||
public SingleInstanceLock(String lockName) {
|
||||
this.lockName = lockName;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean obtain() throws IOException {
|
||||
synchronized(locks) {
|
||||
if (obtained) {
|
||||
// Our instance is already locked:
|
||||
throw new LockObtainFailedException("this lock instance was already obtained");
|
||||
public void ensureValid() throws IOException {
|
||||
if (closed) {
|
||||
throw new AlreadyClosedException("Lock instance already released: " + this);
|
||||
}
|
||||
obtained = locks.add(lockName);
|
||||
|
||||
return obtained;
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() {
|
||||
synchronized(locks) {
|
||||
if (obtained) {
|
||||
locks.remove(lockName);
|
||||
obtained = false;
|
||||
// check we are still in the locks map (some debugger or something crazy didn't remove us)
|
||||
synchronized (locks) {
|
||||
if (!locks.contains(lockName)) {
|
||||
throw new AlreadyClosedException("Lock instance was invalidated from map: " + this);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isLocked() {
|
||||
synchronized(locks) {
|
||||
return locks.contains(lockName);
|
||||
public synchronized void close() throws IOException {
|
||||
if (closed) {
|
||||
return;
|
||||
}
|
||||
try {
|
||||
synchronized (locks) {
|
||||
if (!locks.remove(lockName)) {
|
||||
throw new AlreadyClosedException("Lock was already released: " + this);
|
||||
}
|
||||
}
|
||||
} finally {
|
||||
closed = true;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -43,10 +43,23 @@ public final class VerifyingLockFactory extends LockFactory {
|
|||
|
||||
private class CheckedLock extends Lock {
|
||||
private final Lock lock;
|
||||
private boolean obtained = false;
|
||||
|
||||
public CheckedLock(Lock lock) {
|
||||
public CheckedLock(Lock lock) throws IOException {
|
||||
this.lock = lock;
|
||||
verify((byte) 1);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void ensureValid() throws IOException {
|
||||
lock.ensureValid();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() throws IOException {
|
||||
try (Lock l = lock) {
|
||||
l.ensureValid();
|
||||
verify((byte) 0);
|
||||
}
|
||||
}
|
||||
|
||||
private void verify(byte message) throws IOException {
|
||||
|
@ -60,29 +73,6 @@ public final class VerifyingLockFactory extends LockFactory {
|
|||
throw new IOException("Protocol violation.");
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public synchronized boolean obtain() throws IOException {
|
||||
obtained = lock.obtain();
|
||||
if (obtained) {
|
||||
verify((byte) 1);
|
||||
}
|
||||
return obtained;
|
||||
}
|
||||
|
||||
@Override
|
||||
public synchronized boolean isLocked() throws IOException {
|
||||
return lock.isLocked();
|
||||
}
|
||||
|
||||
@Override
|
||||
public synchronized void close() throws IOException {
|
||||
if (obtained) {
|
||||
assert isLocked();
|
||||
verify((byte) 0);
|
||||
}
|
||||
lock.close();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -97,7 +87,7 @@ public final class VerifyingLockFactory extends LockFactory {
|
|||
}
|
||||
|
||||
@Override
|
||||
public Lock makeLock(Directory dir, String lockName) {
|
||||
return new CheckedLock(lf.makeLock(dir, lockName));
|
||||
public Lock obtainLock(Directory dir, String lockName) throws IOException {
|
||||
return new CheckedLock(lf.obtainLock(dir, lockName));
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1268,7 +1268,6 @@ public class TestAddIndexes extends LuceneTestCase {
|
|||
Directory dest = newDirectory();
|
||||
|
||||
IndexWriterConfig iwc = newIndexWriterConfig(new MockAnalyzer(random()));
|
||||
iwc.setWriteLockTimeout(1);
|
||||
RandomIndexWriter w2 = new RandomIndexWriter(random(), dest, iwc);
|
||||
|
||||
try {
|
||||
|
|
|
@ -96,14 +96,7 @@ public class TestIndexWriter extends LuceneTestCase {
|
|||
IndexReader reader = null;
|
||||
int i;
|
||||
|
||||
long savedWriteLockTimeout = IndexWriterConfig.getDefaultWriteLockTimeout();
|
||||
try {
|
||||
IndexWriterConfig.setDefaultWriteLockTimeout(2000);
|
||||
assertEquals(2000, IndexWriterConfig.getDefaultWriteLockTimeout());
|
||||
writer = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random())));
|
||||
} finally {
|
||||
IndexWriterConfig.setDefaultWriteLockTimeout(savedWriteLockTimeout);
|
||||
}
|
||||
|
||||
// add 100 documents
|
||||
for (i = 0; i < 100; i++) {
|
||||
|
@ -1725,8 +1718,7 @@ public class TestIndexWriter extends LuceneTestCase {
|
|||
RandomIndexWriter w1 = new RandomIndexWriter(random(), d);
|
||||
w1.deleteAll();
|
||||
try {
|
||||
new RandomIndexWriter(random(), d, newIndexWriterConfig(null)
|
||||
.setWriteLockTimeout(100));
|
||||
new RandomIndexWriter(random(), d, newIndexWriterConfig(null));
|
||||
fail("should not be able to create another writer");
|
||||
} catch (LockObtainFailedException lofe) {
|
||||
// expected
|
||||
|
|
|
@ -2199,7 +2199,7 @@ public class TestIndexWriterExceptions extends LuceneTestCase {
|
|||
|
||||
// even though we hit exception: we are closed, no locks or files held, index in good state
|
||||
assertTrue(iw.isClosed());
|
||||
assertFalse(IndexWriter.isLocked(dir));
|
||||
dir.obtainLock(IndexWriter.WRITE_LOCK_NAME).close();
|
||||
|
||||
r = DirectoryReader.open(dir);
|
||||
assertEquals(10, r.maxDoc());
|
||||
|
@ -2268,7 +2268,7 @@ public class TestIndexWriterExceptions extends LuceneTestCase {
|
|||
|
||||
// even though we hit exception: we are closed, no locks or files held, index in good state
|
||||
assertTrue(iw.isClosed());
|
||||
assertFalse(IndexWriter.isLocked(dir));
|
||||
dir.obtainLock(IndexWriter.WRITE_LOCK_NAME).close();
|
||||
|
||||
r = DirectoryReader.open(dir);
|
||||
assertEquals(10, r.maxDoc());
|
||||
|
|
|
@ -0,0 +1,49 @@
|
|||
package org.apache.lucene.index;
|
||||
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import java.io.IOException;
|
||||
import java.nio.file.Path;
|
||||
|
||||
import org.apache.lucene.index.SleepingLockWrapper;
|
||||
import org.apache.lucene.store.BaseLockFactoryTestCase;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.store.SingleInstanceLockFactory;
|
||||
import org.apache.lucene.util.TestUtil;
|
||||
|
||||
/** Simple tests for SleepingLockWrapper */
|
||||
public class TestSleepingLockWrapper extends BaseLockFactoryTestCase {
|
||||
|
||||
@Override
|
||||
protected Directory getDirectory(Path path) throws IOException {
|
||||
long lockWaitTimeout = TestUtil.nextLong(random(), 20, 100);
|
||||
long pollInterval = TestUtil.nextLong(random(), 2, 10);
|
||||
|
||||
int which = random().nextInt(3);
|
||||
switch (which) {
|
||||
case 0:
|
||||
return new SleepingLockWrapper(newDirectory(random(), new SingleInstanceLockFactory()), lockWaitTimeout, pollInterval);
|
||||
case 1:
|
||||
return new SleepingLockWrapper(newFSDirectory(path), lockWaitTimeout, pollInterval);
|
||||
default:
|
||||
return new SleepingLockWrapper(newFSDirectory(path), lockWaitTimeout, pollInterval);
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: specific tests to this impl
|
||||
}
|
|
@ -86,14 +86,12 @@ public class TestDirectory extends LuceneTestCase {
|
|||
assertFalse(slowFileExists(d2, fname));
|
||||
}
|
||||
|
||||
Lock lock = dir.makeLock(lockname);
|
||||
assertTrue(lock.obtain());
|
||||
Lock lock = dir.obtainLock(lockname);
|
||||
|
||||
for (int j=0; j<dirs.length; j++) {
|
||||
FSDirectory d2 = dirs[j];
|
||||
Lock lock2 = d2.makeLock(lockname);
|
||||
for (Directory other : dirs) {
|
||||
try {
|
||||
assertFalse(lock2.obtain(1));
|
||||
other.obtainLock(lockname);
|
||||
fail("didnt get exception");
|
||||
} catch (LockObtainFailedException e) {
|
||||
// OK
|
||||
}
|
||||
|
@ -102,8 +100,7 @@ public class TestDirectory extends LuceneTestCase {
|
|||
lock.close();
|
||||
|
||||
// now lock with different dir
|
||||
lock = dirs[(i+1)%dirs.length].makeLock(lockname);
|
||||
assertTrue(lock.obtain());
|
||||
lock = dirs[(i+1)%dirs.length].obtainLock(lockname);
|
||||
lock.close();
|
||||
}
|
||||
|
||||
|
|
|
@ -1,168 +0,0 @@
|
|||
package org.apache.lucene.store;
|
||||
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.concurrent.CyclicBarrier;
|
||||
import java.util.concurrent.atomic.AtomicBoolean;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
import java.util.concurrent.locks.ReentrantLock;
|
||||
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
|
||||
public class TestLock extends LuceneTestCase {
|
||||
|
||||
public void testObtain() {
|
||||
LockMock lock = new LockMock();
|
||||
Lock.LOCK_POLL_INTERVAL = 10;
|
||||
|
||||
try {
|
||||
lock.obtain(Lock.LOCK_POLL_INTERVAL);
|
||||
fail("Should have failed to obtain lock");
|
||||
} catch (IOException e) {
|
||||
assertEquals("should attempt to lock more than once", lock.lockAttempts, 2);
|
||||
}
|
||||
}
|
||||
|
||||
private class LockMock extends Lock {
|
||||
public int lockAttempts;
|
||||
|
||||
@Override
|
||||
public boolean obtain() {
|
||||
lockAttempts++;
|
||||
return false;
|
||||
}
|
||||
@Override
|
||||
public void close() {
|
||||
// do nothing
|
||||
}
|
||||
@Override
|
||||
public boolean isLocked() {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
public void testObtainConcurrently() throws InterruptedException, IOException {
|
||||
final Directory directory;
|
||||
if (random().nextBoolean()) {
|
||||
directory = newDirectory();
|
||||
} else {
|
||||
LockFactory lf = random().nextBoolean() ? SimpleFSLockFactory.INSTANCE : NativeFSLockFactory.INSTANCE;
|
||||
directory = newFSDirectory(createTempDir(), lf);
|
||||
}
|
||||
final AtomicBoolean running = new AtomicBoolean(true);
|
||||
final AtomicInteger atomicCounter = new AtomicInteger(0);
|
||||
final ReentrantLock assertingLock = new ReentrantLock();
|
||||
int numThreads = 2 + random().nextInt(10);
|
||||
final int runs = 500 + random().nextInt(1000);
|
||||
CyclicBarrier barrier = new CyclicBarrier(numThreads);
|
||||
Thread[] threads = new Thread[numThreads];
|
||||
for (int i = 0; i < threads.length; i++) {
|
||||
threads[i] = new Thread() {
|
||||
@Override
|
||||
public void run() {
|
||||
try {
|
||||
barrier.await();
|
||||
} catch (Exception e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
while (running.get()) {
|
||||
try (Lock lock = directory.makeLock("foo.lock")) {
|
||||
if (lock.isLocked() == false && lock.obtain()) {
|
||||
assertTrue(lock.isLocked());
|
||||
assertFalse(assertingLock.isLocked());
|
||||
if (assertingLock.tryLock()) {
|
||||
assertingLock.unlock();
|
||||
} else {
|
||||
fail();
|
||||
}
|
||||
}
|
||||
} catch (IOException ex) {
|
||||
//
|
||||
}
|
||||
if (atomicCounter.incrementAndGet() > runs) {
|
||||
running.set(false);
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
threads[i].start();
|
||||
}
|
||||
|
||||
for (int i = 0; i < threads.length; i++) {
|
||||
threads[i].join();
|
||||
}
|
||||
directory.close();
|
||||
}
|
||||
|
||||
public void testSingleInstanceLockFactoryDoubleObtain() throws Exception {
|
||||
LockFactory lf = new SingleInstanceLockFactory();
|
||||
Directory dir = newFSDirectory(createTempDir(), lf);
|
||||
Lock lock = dir.makeLock("foo");
|
||||
assertTrue(lock.obtain());
|
||||
try {
|
||||
lock.obtain();
|
||||
fail("did not hit double-obtain failure");
|
||||
} catch (LockObtainFailedException lofe) {
|
||||
// expected
|
||||
}
|
||||
lock.close();
|
||||
|
||||
lock = dir.makeLock("foo");
|
||||
assertTrue(lock.obtain());
|
||||
lock.close();
|
||||
dir.close();
|
||||
}
|
||||
|
||||
public void testSimpleFSLockFactoryDoubleObtain() throws Exception {
|
||||
Directory dir = newFSDirectory(createTempDir(), SimpleFSLockFactory.INSTANCE);
|
||||
Lock lock = dir.makeLock("foo");
|
||||
assertTrue(lock.obtain());
|
||||
try {
|
||||
lock.obtain();
|
||||
fail("did not hit double-obtain failure");
|
||||
} catch (LockObtainFailedException lofe) {
|
||||
// expected
|
||||
}
|
||||
lock.close();
|
||||
|
||||
lock = dir.makeLock("foo");
|
||||
assertTrue(lock.obtain());
|
||||
lock.close();
|
||||
dir.close();
|
||||
}
|
||||
|
||||
public void testNativeFSLockFactoryDoubleObtain() throws Exception {
|
||||
Directory dir = newFSDirectory(createTempDir(), NativeFSLockFactory.INSTANCE);
|
||||
Lock lock = dir.makeLock("foo");
|
||||
assertTrue(lock.obtain());
|
||||
try {
|
||||
lock.obtain();
|
||||
fail("did not hit double-obtain failure");
|
||||
} catch (LockObtainFailedException lofe) {
|
||||
// expected
|
||||
}
|
||||
lock.close();
|
||||
|
||||
lock = dir.makeLock("foo");
|
||||
assertTrue(lock.obtain());
|
||||
lock.close();
|
||||
dir.close();
|
||||
}
|
||||
}
|
|
@ -18,8 +18,6 @@ package org.apache.lucene.store;
|
|||
*/
|
||||
|
||||
import java.io.IOException;
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.Path;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
|
@ -27,16 +25,9 @@ import java.util.Map;
|
|||
import org.apache.lucene.analysis.MockAnalyzer;
|
||||
import org.apache.lucene.document.Document;
|
||||
import org.apache.lucene.document.Field;
|
||||
import org.apache.lucene.index.DirectoryReader;
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.index.IndexWriter;
|
||||
import org.apache.lucene.index.IndexWriterConfig;
|
||||
import org.apache.lucene.index.Term;
|
||||
import org.apache.lucene.index.IndexWriterConfig.OpenMode;
|
||||
import org.apache.lucene.search.IndexSearcher;
|
||||
import org.apache.lucene.search.Query;
|
||||
import org.apache.lucene.search.TermQuery;
|
||||
import org.apache.lucene.util.IOUtils;
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
|
||||
public class TestLockFactory extends LuceneTestCase {
|
||||
|
@ -58,15 +49,6 @@ public class TestLockFactory extends LuceneTestCase {
|
|||
// Both write lock and commit lock should have been created:
|
||||
assertEquals("# of unique locks created (after instantiating IndexWriter)",
|
||||
1, lf.locksCreated.size());
|
||||
assertTrue("# calls to makeLock is 0 (after instantiating IndexWriter)",
|
||||
lf.makeLockCount >= 1);
|
||||
|
||||
for(final String lockName : lf.locksCreated.keySet()) {
|
||||
MockLockFactory.MockLock lock = (MockLockFactory.MockLock) lf.locksCreated.get(lockName);
|
||||
assertTrue("# calls to Lock.obtain is 0 (after instantiating IndexWriter)",
|
||||
lock.lockAttempts > 0);
|
||||
}
|
||||
|
||||
writer.close();
|
||||
}
|
||||
|
||||
|
@ -75,7 +57,6 @@ public class TestLockFactory extends LuceneTestCase {
|
|||
// Verify: NoLockFactory allows two IndexWriters
|
||||
public void testRAMDirectoryNoLocking() throws IOException {
|
||||
MockDirectoryWrapper dir = new MockDirectoryWrapper(random(), new RAMDirectory(NoLockFactory.INSTANCE));
|
||||
dir.setAssertLocks(false); // we are gonna explicitly test we get this back
|
||||
|
||||
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(new MockAnalyzer(random())));
|
||||
writer.commit(); // required so the second open succeed
|
||||
|
@ -95,240 +76,29 @@ public class TestLockFactory extends LuceneTestCase {
|
|||
}
|
||||
}
|
||||
|
||||
// Verify: SingleInstanceLockFactory is the default lock for RAMDirectory
|
||||
// Verify: RAMDirectory does basic locking correctly (can't create two IndexWriters)
|
||||
public void testDefaultRAMDirectory() throws IOException {
|
||||
RAMDirectory dir = new RAMDirectory();
|
||||
|
||||
assertTrue("RAMDirectory did not use correct LockFactory: got " + dir.lockFactory,
|
||||
dir.lockFactory instanceof SingleInstanceLockFactory);
|
||||
|
||||
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(new MockAnalyzer(random())));
|
||||
|
||||
// Create a 2nd IndexWriter. This should fail:
|
||||
IndexWriter writer2 = null;
|
||||
try {
|
||||
writer2 = new IndexWriter(dir, new IndexWriterConfig(new MockAnalyzer(random())).setOpenMode(OpenMode.APPEND));
|
||||
fail("Should have hit an IOException with two IndexWriters on default SingleInstanceLockFactory");
|
||||
} catch (IOException e) {
|
||||
}
|
||||
|
||||
writer.close();
|
||||
if (writer2 != null) {
|
||||
writer2.close();
|
||||
}
|
||||
}
|
||||
|
||||
// Verify: do stress test, by opening IndexReaders and
|
||||
// IndexWriters over & over in 2 threads and making sure
|
||||
// no unexpected exceptions are raised:
|
||||
@Nightly
|
||||
public void testStressLocksSimpleFSLockFactory() throws Exception {
|
||||
_testStressLocks(SimpleFSLockFactory.INSTANCE, createTempDir("index.TestLockFactory6"));
|
||||
}
|
||||
|
||||
// Verify: do stress test, by opening IndexReaders and
|
||||
// IndexWriters over & over in 2 threads and making sure
|
||||
// no unexpected exceptions are raised, but use
|
||||
// NativeFSLockFactory:
|
||||
@Nightly
|
||||
public void testStressLocksNativeFSLockFactory() throws Exception {
|
||||
Path dir = createTempDir("index.TestLockFactory7");
|
||||
_testStressLocks(NativeFSLockFactory.INSTANCE, dir);
|
||||
}
|
||||
|
||||
public void _testStressLocks(LockFactory lockFactory, Path indexDir) throws Exception {
|
||||
Directory dir = newFSDirectory(indexDir, lockFactory);
|
||||
|
||||
// First create a 1 doc index:
|
||||
IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(new MockAnalyzer(random())).setOpenMode(OpenMode.CREATE));
|
||||
addDoc(w);
|
||||
w.close();
|
||||
|
||||
WriterThread writer = new WriterThread(100, dir);
|
||||
SearcherThread searcher = new SearcherThread(100, dir);
|
||||
writer.start();
|
||||
searcher.start();
|
||||
|
||||
while(writer.isAlive() || searcher.isAlive()) {
|
||||
Thread.sleep(1000);
|
||||
}
|
||||
|
||||
assertTrue("IndexWriter hit unexpected exceptions", !writer.hitException);
|
||||
assertTrue("IndexSearcher hit unexpected exceptions", !searcher.hitException);
|
||||
|
||||
dir.close();
|
||||
// Cleanup
|
||||
IOUtils.rm(indexDir);
|
||||
}
|
||||
|
||||
// Verify: NativeFSLockFactory works correctly
|
||||
public void testNativeFSLockFactory() throws IOException {
|
||||
Directory dir = FSDirectory.open(createTempDir(LuceneTestCase.getTestClass().getSimpleName()), NativeFSLockFactory.INSTANCE);
|
||||
|
||||
Lock l = dir.makeLock("commit");
|
||||
Lock l2 = dir.makeLock("commit");
|
||||
|
||||
assertTrue("failed to obtain lock", l.obtain());
|
||||
assertTrue("succeeded in obtaining lock twice", !l2.obtain());
|
||||
l.close();
|
||||
|
||||
assertTrue("failed to obtain 2nd lock after first one was freed", l2.obtain());
|
||||
l2.close();
|
||||
|
||||
// Make sure we can obtain first one again, test isLocked():
|
||||
assertTrue("failed to obtain lock", l.obtain());
|
||||
assertTrue(l.isLocked());
|
||||
assertTrue(l2.isLocked());
|
||||
l.close();
|
||||
assertFalse(l.isLocked());
|
||||
assertFalse(l2.isLocked());
|
||||
}
|
||||
|
||||
|
||||
// Verify: NativeFSLockFactory works correctly if the lock file exists
|
||||
public void testNativeFSLockFactoryLockExists() throws IOException {
|
||||
Path tempDir = createTempDir(LuceneTestCase.getTestClass().getSimpleName());
|
||||
Path lockFile = tempDir.resolve("test.lock");
|
||||
Files.createFile(lockFile);
|
||||
|
||||
Directory dir = FSDirectory.open(tempDir, NativeFSLockFactory.INSTANCE);
|
||||
Lock l = dir.makeLock("test.lock");
|
||||
assertTrue("failed to obtain lock", l.obtain());
|
||||
l.close();
|
||||
assertFalse("failed to release lock", l.isLocked());
|
||||
Files.deleteIfExists(lockFile);
|
||||
}
|
||||
|
||||
private class WriterThread extends Thread {
|
||||
private Directory dir;
|
||||
private int numIteration;
|
||||
public boolean hitException = false;
|
||||
public WriterThread(int numIteration, Directory dir) {
|
||||
this.numIteration = numIteration;
|
||||
this.dir = dir;
|
||||
}
|
||||
@Override
|
||||
public void run() {
|
||||
IndexWriter writer = null;
|
||||
for(int i=0;i<this.numIteration;i++) {
|
||||
try {
|
||||
writer = new IndexWriter(dir, new IndexWriterConfig(new MockAnalyzer(random())).setOpenMode(OpenMode.APPEND));
|
||||
} catch (IOException e) {
|
||||
if (e.toString().indexOf(" timed out:") == -1) {
|
||||
hitException = true;
|
||||
System.out.println("Stress Test Index Writer: creation hit unexpected IOException: " + e.toString());
|
||||
e.printStackTrace(System.out);
|
||||
} else {
|
||||
// lock obtain timed out
|
||||
// NOTE: we should at some point
|
||||
// consider this a failure? The lock
|
||||
// obtains, across IndexReader &
|
||||
// IndexWriters should be "fair" (ie
|
||||
// FIFO).
|
||||
}
|
||||
} catch (Exception e) {
|
||||
hitException = true;
|
||||
System.out.println("Stress Test Index Writer: creation hit unexpected exception: " + e.toString());
|
||||
e.printStackTrace(System.out);
|
||||
break;
|
||||
}
|
||||
if (writer != null) {
|
||||
try {
|
||||
addDoc(writer);
|
||||
} catch (IOException e) {
|
||||
hitException = true;
|
||||
System.out.println("Stress Test Index Writer: addDoc hit unexpected exception: " + e.toString());
|
||||
e.printStackTrace(System.out);
|
||||
break;
|
||||
}
|
||||
try {
|
||||
writer.close();
|
||||
} catch (IOException e) {
|
||||
hitException = true;
|
||||
System.out.println("Stress Test Index Writer: close hit unexpected exception: " + e.toString());
|
||||
e.printStackTrace(System.out);
|
||||
break;
|
||||
}
|
||||
writer = null;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private class SearcherThread extends Thread {
|
||||
private Directory dir;
|
||||
private int numIteration;
|
||||
public boolean hitException = false;
|
||||
public SearcherThread(int numIteration, Directory dir) {
|
||||
this.numIteration = numIteration;
|
||||
this.dir = dir;
|
||||
}
|
||||
@Override
|
||||
public void run() {
|
||||
IndexReader reader = null;
|
||||
IndexSearcher searcher = null;
|
||||
Query query = new TermQuery(new Term("content", "aaa"));
|
||||
for(int i=0;i<this.numIteration;i++) {
|
||||
try{
|
||||
reader = DirectoryReader.open(dir);
|
||||
searcher = newSearcher(reader);
|
||||
} catch (Exception e) {
|
||||
hitException = true;
|
||||
System.out.println("Stress Test Index Searcher: create hit unexpected exception: " + e.toString());
|
||||
e.printStackTrace(System.out);
|
||||
break;
|
||||
}
|
||||
try {
|
||||
searcher.search(query, 1000);
|
||||
} catch (IOException e) {
|
||||
hitException = true;
|
||||
System.out.println("Stress Test Index Searcher: search hit unexpected exception: " + e.toString());
|
||||
e.printStackTrace(System.out);
|
||||
break;
|
||||
}
|
||||
// System.out.println(hits.length() + " total results");
|
||||
try {
|
||||
reader.close();
|
||||
} catch (IOException e) {
|
||||
hitException = true;
|
||||
System.out.println("Stress Test Index Searcher: close hit unexpected exception: " + e.toString());
|
||||
e.printStackTrace(System.out);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
class MockLockFactory extends LockFactory {
|
||||
|
||||
public Map<String,Lock> locksCreated = Collections.synchronizedMap(new HashMap<String,Lock>());
|
||||
public int makeLockCount = 0;
|
||||
|
||||
@Override
|
||||
public synchronized Lock makeLock(Directory dir, String lockName) {
|
||||
public synchronized Lock obtainLock(Directory dir, String lockName) {
|
||||
Lock lock = new MockLock();
|
||||
locksCreated.put(lockName, lock);
|
||||
makeLockCount++;
|
||||
return lock;
|
||||
}
|
||||
|
||||
public class MockLock extends Lock {
|
||||
public int lockAttempts;
|
||||
|
||||
@Override
|
||||
public boolean obtain() {
|
||||
lockAttempts++;
|
||||
return true;
|
||||
}
|
||||
@Override
|
||||
public void close() {
|
||||
// do nothing
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isLocked() {
|
||||
return false;
|
||||
public void ensureValid() throws IOException {
|
||||
// do nothing
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -0,0 +1,108 @@
|
|||
package org.apache.lucene.store;
|
||||
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import java.io.IOException;
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.Path;
|
||||
|
||||
import org.apache.lucene.util.IOUtils;
|
||||
|
||||
/** Simple tests for NativeFSLockFactory */
|
||||
public class TestNativeFSLockFactory extends BaseLockFactoryTestCase {
|
||||
|
||||
@Override
|
||||
protected Directory getDirectory(Path path) throws IOException {
|
||||
return newFSDirectory(path, NativeFSLockFactory.INSTANCE);
|
||||
}
|
||||
|
||||
/** Verify NativeFSLockFactory works correctly if the lock file exists */
|
||||
public void testLockFileExists() throws IOException {
|
||||
Path tempDir = createTempDir();
|
||||
Path lockFile = tempDir.resolve("test.lock");
|
||||
Files.createFile(lockFile);
|
||||
|
||||
Directory dir = getDirectory(tempDir);
|
||||
Lock l = dir.obtainLock("test.lock");
|
||||
l.close();
|
||||
dir.close();
|
||||
}
|
||||
|
||||
/** release the lock and test ensureValid fails */
|
||||
public void testInvalidateLock() throws IOException {
|
||||
Directory dir = getDirectory(createTempDir());
|
||||
NativeFSLockFactory.NativeFSLock lock = (NativeFSLockFactory.NativeFSLock) dir.obtainLock("test.lock");
|
||||
lock.ensureValid();
|
||||
lock.lock.release();
|
||||
try {
|
||||
lock.ensureValid();
|
||||
fail("no exception");
|
||||
} catch (AlreadyClosedException expected) {
|
||||
// ok
|
||||
} finally {
|
||||
IOUtils.closeWhileHandlingException(lock);
|
||||
}
|
||||
dir.close();
|
||||
}
|
||||
|
||||
/** close the channel and test ensureValid fails */
|
||||
public void testInvalidateChannel() throws IOException {
|
||||
Directory dir = getDirectory(createTempDir());
|
||||
NativeFSLockFactory.NativeFSLock lock = (NativeFSLockFactory.NativeFSLock) dir.obtainLock("test.lock");
|
||||
lock.ensureValid();
|
||||
lock.channel.close();
|
||||
try {
|
||||
lock.ensureValid();
|
||||
fail("no exception");
|
||||
} catch (AlreadyClosedException expected) {
|
||||
// ok
|
||||
} finally {
|
||||
IOUtils.closeWhileHandlingException(lock);
|
||||
}
|
||||
dir.close();
|
||||
}
|
||||
|
||||
/** delete the lockfile and test ensureValid fails */
|
||||
public void testDeleteLockFile() throws IOException {
|
||||
Directory dir = getDirectory(createTempDir());
|
||||
try {
|
||||
Lock lock = dir.obtainLock("test.lock");
|
||||
lock.ensureValid();
|
||||
|
||||
try {
|
||||
dir.deleteFile("test.lock");
|
||||
} catch (Exception e) {
|
||||
// we can't delete a file for some reason, just clean up and assume the test.
|
||||
IOUtils.closeWhileHandlingException(lock);
|
||||
assumeNoException("test requires the ability to delete a locked file", e);
|
||||
}
|
||||
|
||||
try {
|
||||
lock.ensureValid();
|
||||
fail("no exception");
|
||||
} catch (IOException expected) {
|
||||
// ok
|
||||
} finally {
|
||||
IOUtils.closeWhileHandlingException(lock);
|
||||
}
|
||||
} finally {
|
||||
// Do this in finally clause in case the assumeNoException is false:
|
||||
dir.close();
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,61 @@
|
|||
package org.apache.lucene.store;
|
||||
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import java.io.IOException;
|
||||
import java.nio.file.Path;
|
||||
|
||||
import org.apache.lucene.util.IOUtils;
|
||||
|
||||
/** Simple tests for SimpleFSLockFactory */
|
||||
public class TestSimpleFSLockFactory extends BaseLockFactoryTestCase {
|
||||
|
||||
@Override
|
||||
protected Directory getDirectory(Path path) throws IOException {
|
||||
return newFSDirectory(path, SimpleFSLockFactory.INSTANCE);
|
||||
}
|
||||
|
||||
/** delete the lockfile and test ensureValid fails */
|
||||
public void testDeleteLockFile() throws IOException {
|
||||
Directory dir = getDirectory(createTempDir());
|
||||
try {
|
||||
Lock lock = dir.obtainLock("test.lock");
|
||||
lock.ensureValid();
|
||||
|
||||
try {
|
||||
dir.deleteFile("test.lock");
|
||||
} catch (Exception e) {
|
||||
// we can't delete a file for some reason, just clean up and assume the test.
|
||||
IOUtils.closeWhileHandlingException(lock);
|
||||
assumeNoException("test requires the ability to delete a locked file", e);
|
||||
}
|
||||
|
||||
try {
|
||||
lock.ensureValid();
|
||||
fail("no exception");
|
||||
} catch (IOException expected) {
|
||||
// ok
|
||||
} finally {
|
||||
IOUtils.closeWhileHandlingException(lock);
|
||||
}
|
||||
} finally {
|
||||
// Do this in finally clause in case the assumeNoException is false:
|
||||
dir.close();
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,59 @@
|
|||
package org.apache.lucene.store;
|
||||
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import java.io.IOException;
|
||||
import java.nio.file.Path;
|
||||
|
||||
import org.apache.lucene.analysis.MockAnalyzer;
|
||||
import org.apache.lucene.index.IndexWriter;
|
||||
import org.apache.lucene.index.IndexWriterConfig;
|
||||
import org.apache.lucene.index.IndexWriterConfig.OpenMode;
|
||||
|
||||
/** Simple tests for SingleInstanceLockFactory */
|
||||
public class TestSingleInstanceLockFactory extends BaseLockFactoryTestCase {
|
||||
|
||||
@Override
|
||||
protected Directory getDirectory(Path path) throws IOException {
|
||||
return newDirectory(random(), new SingleInstanceLockFactory());
|
||||
}
|
||||
|
||||
// Verify: SingleInstanceLockFactory is the default lock for RAMDirectory
|
||||
// Verify: RAMDirectory does basic locking correctly (can't create two IndexWriters)
|
||||
public void testDefaultRAMDirectory() throws IOException {
|
||||
RAMDirectory dir = new RAMDirectory();
|
||||
|
||||
assertTrue("RAMDirectory did not use correct LockFactory: got " + dir.lockFactory,
|
||||
dir.lockFactory instanceof SingleInstanceLockFactory);
|
||||
|
||||
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(new MockAnalyzer(random())));
|
||||
|
||||
// Create a 2nd IndexWriter. This should fail:
|
||||
IndexWriter writer2 = null;
|
||||
try {
|
||||
writer2 = new IndexWriter(dir, new IndexWriterConfig(new MockAnalyzer(random())).setOpenMode(OpenMode.APPEND));
|
||||
fail("Should have hit an IOException with two IndexWriters on default SingleInstanceLockFactory");
|
||||
} catch (IOException e) {
|
||||
}
|
||||
|
||||
writer.close();
|
||||
if (writer2 != null) {
|
||||
writer2.close();
|
||||
}
|
||||
}
|
||||
}
|
|
@ -43,7 +43,7 @@ import org.apache.lucene.index.TermsEnum;
|
|||
import org.apache.lucene.index.TieredMergePolicy;
|
||||
import org.apache.lucene.store.AlreadyClosedException;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.store.LockObtainFailedException; // javadocs
|
||||
import org.apache.lucene.store.LockObtainFailedException;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
|
||||
/*
|
||||
|
|
|
@ -320,7 +320,7 @@ public abstract class BaseCompoundFormatTestCase extends BaseIndexFileFormatTest
|
|||
si.getCodec().compoundFormat().write(dir, si, IOContext.DEFAULT);
|
||||
Directory cfs = si.getCodec().compoundFormat().getCompoundReader(dir, si, IOContext.DEFAULT);
|
||||
try {
|
||||
cfs.makeLock("foobar");
|
||||
cfs.obtainLock("foobar");
|
||||
fail("didn't get expected exception");
|
||||
} catch (UnsupportedOperationException expected) {
|
||||
// expected UOE
|
||||
|
|
|
@ -0,0 +1,275 @@
|
|||
package org.apache.lucene.store;
|
||||
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import java.io.IOException;
|
||||
import java.nio.file.Path;
|
||||
import java.util.concurrent.CyclicBarrier;
|
||||
import java.util.concurrent.atomic.AtomicBoolean;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
import java.util.concurrent.locks.ReentrantLock;
|
||||
|
||||
import org.apache.lucene.analysis.MockAnalyzer;
|
||||
import org.apache.lucene.document.Document;
|
||||
import org.apache.lucene.document.Field;
|
||||
import org.apache.lucene.index.DirectoryReader;
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.index.IndexWriter;
|
||||
import org.apache.lucene.index.IndexWriterConfig;
|
||||
import org.apache.lucene.index.Term;
|
||||
import org.apache.lucene.index.IndexWriterConfig.OpenMode;
|
||||
import org.apache.lucene.search.IndexSearcher;
|
||||
import org.apache.lucene.search.Query;
|
||||
import org.apache.lucene.search.TermQuery;
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
|
||||
/** Base class for per-LockFactory tests. */
|
||||
public abstract class BaseLockFactoryTestCase extends LuceneTestCase {
|
||||
|
||||
/** Subclass returns the Directory to be tested; if it's
|
||||
* an FS-based directory it should point to the specified
|
||||
* path, else it can ignore it. */
|
||||
protected abstract Directory getDirectory(Path path) throws IOException;
|
||||
|
||||
/** Test obtaining and releasing locks, checking validity */
|
||||
public void testBasics() throws IOException {
|
||||
Directory dir = getDirectory(createTempDir());
|
||||
|
||||
Lock l = dir.obtainLock("commit");
|
||||
try {
|
||||
dir.obtainLock("commit");
|
||||
fail("succeeded in obtaining lock twice, didn't get exception");
|
||||
} catch (LockObtainFailedException expected) {}
|
||||
l.close();
|
||||
|
||||
// Make sure we can obtain first one again:
|
||||
l = dir.obtainLock("commit");
|
||||
l.close();
|
||||
|
||||
dir.close();
|
||||
}
|
||||
|
||||
/** Test closing locks twice */
|
||||
public void testDoubleClose() throws IOException {
|
||||
Directory dir = getDirectory(createTempDir());
|
||||
|
||||
Lock l = dir.obtainLock("commit");
|
||||
l.close();
|
||||
l.close(); // close again, should be no exception
|
||||
|
||||
dir.close();
|
||||
}
|
||||
|
||||
/** Test ensureValid returns true after acquire */
|
||||
public void testValidAfterAcquire() throws IOException {
|
||||
Directory dir = getDirectory(createTempDir());
|
||||
|
||||
Lock l = dir.obtainLock("commit");
|
||||
l.ensureValid(); // no exception
|
||||
l.close();
|
||||
|
||||
dir.close();
|
||||
}
|
||||
|
||||
/** Test ensureValid throws exception after close */
|
||||
public void testInvalidAfterClose() throws IOException {
|
||||
Directory dir = getDirectory(createTempDir());
|
||||
|
||||
Lock l = dir.obtainLock("commit");
|
||||
l.close();
|
||||
|
||||
try {
|
||||
l.ensureValid();
|
||||
fail("didn't get exception");
|
||||
} catch (AlreadyClosedException expected) {}
|
||||
|
||||
dir.close();
|
||||
}
|
||||
|
||||
public void testObtainConcurrently() throws InterruptedException, IOException {
|
||||
final Directory directory = getDirectory(createTempDir());
|
||||
final AtomicBoolean running = new AtomicBoolean(true);
|
||||
final AtomicInteger atomicCounter = new AtomicInteger(0);
|
||||
final ReentrantLock assertingLock = new ReentrantLock();
|
||||
int numThreads = 2 + random().nextInt(10);
|
||||
final int runs = atLeast(10000);
|
||||
CyclicBarrier barrier = new CyclicBarrier(numThreads);
|
||||
Thread[] threads = new Thread[numThreads];
|
||||
for (int i = 0; i < threads.length; i++) {
|
||||
threads[i] = new Thread() {
|
||||
@Override
|
||||
public void run() {
|
||||
try {
|
||||
barrier.await();
|
||||
} catch (Exception e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
while (running.get()) {
|
||||
try (Lock lock = directory.obtainLock("foo.lock")) {
|
||||
assertFalse(assertingLock.isLocked());
|
||||
if (assertingLock.tryLock()) {
|
||||
assertingLock.unlock();
|
||||
} else {
|
||||
fail();
|
||||
}
|
||||
assert lock != null; // stupid compiler
|
||||
} catch (IOException ex) {
|
||||
//
|
||||
}
|
||||
if (atomicCounter.incrementAndGet() > runs) {
|
||||
running.set(false);
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
threads[i].start();
|
||||
}
|
||||
|
||||
for (int i = 0; i < threads.length; i++) {
|
||||
threads[i].join();
|
||||
}
|
||||
directory.close();
|
||||
}
|
||||
|
||||
// Verify: do stress test, by opening IndexReaders and
|
||||
// IndexWriters over & over in 2 threads and making sure
|
||||
// no unexpected exceptions are raised:
|
||||
public void testStressLocks() throws Exception {
|
||||
Directory dir = getDirectory(createTempDir());
|
||||
|
||||
// First create a 1 doc index:
|
||||
IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(new MockAnalyzer(random())).setOpenMode(OpenMode.CREATE));
|
||||
addDoc(w);
|
||||
w.close();
|
||||
|
||||
WriterThread writer = new WriterThread(100, dir);
|
||||
SearcherThread searcher = new SearcherThread(100, dir);
|
||||
writer.start();
|
||||
searcher.start();
|
||||
|
||||
while(writer.isAlive() || searcher.isAlive()) {
|
||||
Thread.sleep(1000);
|
||||
}
|
||||
|
||||
assertTrue("IndexWriter hit unexpected exceptions", !writer.hitException);
|
||||
assertTrue("IndexSearcher hit unexpected exceptions", !searcher.hitException);
|
||||
|
||||
dir.close();
|
||||
}
|
||||
|
||||
private void addDoc(IndexWriter writer) throws IOException {
|
||||
Document doc = new Document();
|
||||
doc.add(newTextField("content", "aaa", Field.Store.NO));
|
||||
writer.addDocument(doc);
|
||||
}
|
||||
|
||||
private class WriterThread extends Thread {
|
||||
private Directory dir;
|
||||
private int numIteration;
|
||||
public boolean hitException = false;
|
||||
public WriterThread(int numIteration, Directory dir) {
|
||||
this.numIteration = numIteration;
|
||||
this.dir = dir;
|
||||
}
|
||||
@Override
|
||||
public void run() {
|
||||
IndexWriter writer = null;
|
||||
for(int i=0;i<this.numIteration;i++) {
|
||||
try {
|
||||
writer = new IndexWriter(dir, new IndexWriterConfig(new MockAnalyzer(random())).setOpenMode(OpenMode.APPEND));
|
||||
} catch (LockObtainFailedException e) {
|
||||
// lock obtain timed out
|
||||
// NOTE: we should at some point
|
||||
// consider this a failure? The lock
|
||||
// obtains, across IndexReader &
|
||||
// IndexWriters should be "fair" (ie
|
||||
// FIFO).
|
||||
} catch (Exception e) {
|
||||
hitException = true;
|
||||
System.out.println("Stress Test Index Writer: creation hit unexpected exception: " + e.toString());
|
||||
e.printStackTrace(System.out);
|
||||
break;
|
||||
}
|
||||
if (writer != null) {
|
||||
try {
|
||||
addDoc(writer);
|
||||
} catch (IOException e) {
|
||||
hitException = true;
|
||||
System.out.println("Stress Test Index Writer: addDoc hit unexpected exception: " + e.toString());
|
||||
e.printStackTrace(System.out);
|
||||
break;
|
||||
}
|
||||
try {
|
||||
writer.close();
|
||||
} catch (IOException e) {
|
||||
hitException = true;
|
||||
System.out.println("Stress Test Index Writer: close hit unexpected exception: " + e.toString());
|
||||
e.printStackTrace(System.out);
|
||||
break;
|
||||
}
|
||||
writer = null;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private class SearcherThread extends Thread {
|
||||
private Directory dir;
|
||||
private int numIteration;
|
||||
public boolean hitException = false;
|
||||
public SearcherThread(int numIteration, Directory dir) {
|
||||
this.numIteration = numIteration;
|
||||
this.dir = dir;
|
||||
}
|
||||
@Override
|
||||
public void run() {
|
||||
IndexReader reader = null;
|
||||
IndexSearcher searcher = null;
|
||||
Query query = new TermQuery(new Term("content", "aaa"));
|
||||
for(int i=0;i<this.numIteration;i++) {
|
||||
try{
|
||||
reader = DirectoryReader.open(dir);
|
||||
searcher = newSearcher(reader);
|
||||
} catch (Exception e) {
|
||||
hitException = true;
|
||||
System.out.println("Stress Test Index Searcher: create hit unexpected exception: " + e.toString());
|
||||
e.printStackTrace(System.out);
|
||||
break;
|
||||
}
|
||||
try {
|
||||
searcher.search(query, 1000);
|
||||
} catch (IOException e) {
|
||||
hitException = true;
|
||||
System.out.println("Stress Test Index Searcher: search hit unexpected exception: " + e.toString());
|
||||
e.printStackTrace(System.out);
|
||||
break;
|
||||
}
|
||||
// System.out.println(hits.length() + " total results");
|
||||
try {
|
||||
reader.close();
|
||||
} catch (IOException e) {
|
||||
hitException = true;
|
||||
System.out.println("Stress Test Index Searcher: close hit unexpected exception: " + e.toString());
|
||||
e.printStackTrace(System.out);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
|
@ -76,7 +76,6 @@ public class MockDirectoryWrapper extends BaseDirectoryWrapper {
|
|||
boolean assertNoDeleteOpenFile = false;
|
||||
boolean preventDoubleWrite = true;
|
||||
boolean trackDiskUsage = false;
|
||||
boolean wrapLocking = true;
|
||||
boolean useSlowOpenClosers = LuceneTestCase.TEST_NIGHTLY;
|
||||
boolean enableVirusScanner = true;
|
||||
boolean allowRandomFileNotFoundException = true;
|
||||
|
@ -702,19 +701,6 @@ public class MockDirectoryWrapper extends BaseDirectoryWrapper {
|
|||
assertNoUnreferencedFilesOnClose = v;
|
||||
}
|
||||
|
||||
/**
|
||||
* Set to false if you want to return the pure {@link LockFactory} and not
|
||||
* wrap all lock with {@code AssertingLock}.
|
||||
* <p>
|
||||
* Be careful if you turn this off: {@code MockDirectoryWrapper} might
|
||||
* no longer be able to detect if you forget to close an {@link IndexWriter},
|
||||
* and spit out horribly scary confusing exceptions instead of
|
||||
* simply telling you that.
|
||||
*/
|
||||
public void setAssertLocks(boolean v) {
|
||||
this.wrapLocking = v;
|
||||
}
|
||||
|
||||
@Override
|
||||
public synchronized void close() throws IOException {
|
||||
if (isOpen) {
|
||||
|
@ -994,60 +980,10 @@ public class MockDirectoryWrapper extends BaseDirectoryWrapper {
|
|||
}
|
||||
|
||||
@Override
|
||||
public synchronized Lock makeLock(String name) {
|
||||
public synchronized Lock obtainLock(String name) throws IOException {
|
||||
maybeYield();
|
||||
if (wrapLocking) {
|
||||
return new AssertingLock(super.makeLock(name), name);
|
||||
} else {
|
||||
return super.makeLock(name);
|
||||
}
|
||||
}
|
||||
|
||||
private final class AssertingLock extends Lock {
|
||||
private final Lock delegateLock;
|
||||
private final String name;
|
||||
private boolean obtained = false;
|
||||
|
||||
AssertingLock(Lock delegate, String name) {
|
||||
this.delegateLock = delegate;
|
||||
this.name = name;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean obtain() throws IOException {
|
||||
if (delegateLock.obtain()) {
|
||||
final RuntimeException exception = openLocks.putIfAbsent(name, new RuntimeException("lock \"" + name + "\" was not released: " + delegateLock));
|
||||
if (exception != null && delegateLock != NoLockFactory.SINGLETON_LOCK) {
|
||||
throw exception;
|
||||
}
|
||||
obtained = true;
|
||||
} else {
|
||||
obtained = false;
|
||||
}
|
||||
|
||||
return obtained;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() throws IOException {
|
||||
if (obtained) {
|
||||
RuntimeException remove = openLocks.remove(name);
|
||||
// TODO: fix stupid tests like TestIndexWriter.testNoSegmentFile to not do this!
|
||||
assert remove != null || delegateLock == NoLockFactory.SINGLETON_LOCK;
|
||||
obtained = false;
|
||||
}
|
||||
delegateLock.close();
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isLocked() throws IOException {
|
||||
return delegateLock.isLocked();
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "AssertingLock(" + delegateLock + ")";
|
||||
}
|
||||
return super.obtainLock(name);
|
||||
// TODO: consider mocking locks, but not all the time, can hide bugs
|
||||
}
|
||||
|
||||
/** Use this when throwing fake {@code IOException},
|
||||
|
|
|
@ -271,7 +271,7 @@ public final class TestUtil {
|
|||
ByteArrayOutputStream bos = new ByteArrayOutputStream(1024);
|
||||
// TODO: actually use the dir's locking, unless test uses a special method?
|
||||
// some tests e.g. exception tests become much more complicated if they have to close the writer
|
||||
try (CheckIndex checker = new CheckIndex(dir, NoLockFactory.INSTANCE.makeLock(dir, "bogus"))) {
|
||||
try (CheckIndex checker = new CheckIndex(dir, NoLockFactory.INSTANCE.obtainLock(dir, "bogus"))) {
|
||||
checker.setCrossCheckTermVectors(crossCheckTermVectors);
|
||||
checker.setFailFast(failFast);
|
||||
checker.setInfoStream(new PrintStream(bos, false, IOUtils.UTF_8), false);
|
||||
|
|
|
@ -47,32 +47,6 @@ public class TestMockDirectoryWrapper extends BaseDirectoryTestCase {
|
|||
super.testThreadSafety();
|
||||
}
|
||||
|
||||
public void testFailIfIndexWriterNotClosed() throws IOException {
|
||||
MockDirectoryWrapper dir = newMockDirectory();
|
||||
IndexWriter iw = new IndexWriter(dir, new IndexWriterConfig(null));
|
||||
try {
|
||||
dir.close();
|
||||
fail();
|
||||
} catch (Exception expected) {
|
||||
assertTrue(expected.getMessage().contains("there are still open locks"));
|
||||
} finally {
|
||||
IOUtils.closeWhileHandlingException(iw);
|
||||
}
|
||||
}
|
||||
|
||||
public void testFailIfIndexWriterNotClosedChangeLockFactory() throws IOException {
|
||||
MockDirectoryWrapper dir = newMockDirectory(random(), new SingleInstanceLockFactory());
|
||||
IndexWriter iw = new IndexWriter(dir, new IndexWriterConfig(null));
|
||||
try {
|
||||
dir.close();
|
||||
fail();
|
||||
} catch (Exception expected) {
|
||||
assertTrue(expected.getMessage().contains("there are still open locks"));
|
||||
} finally {
|
||||
IOUtils.closeWhileHandlingException(iw);
|
||||
}
|
||||
}
|
||||
|
||||
public void testDiskFull() throws IOException {
|
||||
// test writeBytes
|
||||
MockDirectoryWrapper dir = newMockDirectory();
|
||||
|
|
|
@ -72,6 +72,16 @@ Upgrading from Solr 5.2
|
|||
* SolrJ's CollectionAdminRequest class is now marked as abstract. Use one of its concrete
|
||||
sub-classes instead.
|
||||
|
||||
* Solr no longer supports forcefully unlocking an index.
|
||||
This is no longer supported by the underlying Lucene locking
|
||||
framework. The setting in solrconfig.xml has no effect anymore.
|
||||
Background: If you use native lock factory, unlocking should
|
||||
not be needed, because the locks are cleared after process
|
||||
shutdown automatically by the operating system. If you are
|
||||
using simple lock factory (not recommended) or hdfs lock
|
||||
factory, you may need to manually unlock by deleting the lock
|
||||
file from filesystem / HDFS.
|
||||
|
||||
Detailed Change List
|
||||
----------------------
|
||||
|
||||
|
@ -127,6 +137,9 @@ Other Changes
|
|||
|
||||
* SOLR-7636: CLUSTERSTATUS API is executed at CollectionsHandler (noble)
|
||||
|
||||
* LUCENE-6508: Remove ability to forcefully unlock an index.
|
||||
This is no longer supported by the underlying Lucene locking
|
||||
framework. (Uwe Schindler, Mike McCandless, Robert Muir)
|
||||
|
||||
================== 5.2.0 ==================
|
||||
|
||||
|
|
|
@ -220,19 +220,6 @@
|
|||
-->
|
||||
<!-- <lockType>native</lockType> -->
|
||||
|
||||
<!-- Unlock On Startup
|
||||
|
||||
If true, unlock any held write or commit locks on startup.
|
||||
This defeats the locking mechanism that allows multiple
|
||||
processes to safely access a lucene index, and should be used
|
||||
with care. Default is "false".
|
||||
|
||||
This is not needed if lock type is 'none' or 'single'
|
||||
-->
|
||||
<!--
|
||||
<unlockOnStartup>false</unlockOnStartup>
|
||||
-->
|
||||
|
||||
<!-- If true, IndexReaders will be reopened (often more efficient)
|
||||
instead of closed and then opened. Default: true
|
||||
-->
|
||||
|
|
|
@ -236,19 +236,6 @@
|
|||
-->
|
||||
<lockType>${solr.lock.type:hdfs}</lockType>
|
||||
|
||||
<!-- Unlock On Startup
|
||||
|
||||
If true, unlock any held write or commit locks on startup.
|
||||
This defeats the locking mechanism that allows multiple
|
||||
processes to safely access a lucene index, and should be used
|
||||
with care. Default is "false".
|
||||
|
||||
This is not needed if lock type is 'single'
|
||||
-->
|
||||
<!--
|
||||
<unlockOnStartup>false</unlockOnStartup>
|
||||
-->
|
||||
|
||||
<!-- If true, IndexReaders will be reopened (often more efficient)
|
||||
instead of closed and then opened. Default: true
|
||||
-->
|
||||
|
|
|
@ -238,19 +238,6 @@
|
|||
-->
|
||||
<lockType>${solr.lock.type:hdfs}</lockType>
|
||||
|
||||
<!-- Unlock On Startup
|
||||
|
||||
If true, unlock any held write or commit locks on startup.
|
||||
This defeats the locking mechanism that allows multiple
|
||||
processes to safely access a lucene index, and should be used
|
||||
with care. Default is "false".
|
||||
|
||||
This is not needed if lock type is 'single'
|
||||
-->
|
||||
<!--
|
||||
<unlockOnStartup>false</unlockOnStartup>
|
||||
-->
|
||||
|
||||
<!-- If true, IndexReaders will be reopened (often more efficient)
|
||||
instead of closed and then opened. Default: true
|
||||
-->
|
||||
|
|
|
@ -220,19 +220,6 @@
|
|||
-->
|
||||
<!-- <lockType>native</lockType> -->
|
||||
|
||||
<!-- Unlock On Startup
|
||||
|
||||
If true, unlock any held write or commit locks on startup.
|
||||
This defeats the locking mechanism that allows multiple
|
||||
processes to safely access a lucene index, and should be used
|
||||
with care. Default is "false".
|
||||
|
||||
This is not needed if lock type is 'none' or 'single'
|
||||
-->
|
||||
<!--
|
||||
<unlockOnStartup>false</unlockOnStartup>
|
||||
-->
|
||||
|
||||
<!-- If true, IndexReaders will be reopened (often more efficient)
|
||||
instead of closed and then opened. Default: true
|
||||
-->
|
||||
|
|
|
@ -239,19 +239,6 @@
|
|||
-->
|
||||
<lockType>${solr.lock.type:hdfs}</lockType>
|
||||
|
||||
<!-- Unlock On Startup
|
||||
|
||||
If true, unlock any held write or commit locks on startup.
|
||||
This defeats the locking mechanism that allows multiple
|
||||
processes to safely access a lucene index, and should be used
|
||||
with care. Default is "false".
|
||||
|
||||
This is not needed if lock type is 'single'
|
||||
-->
|
||||
<!--
|
||||
<unlockOnStartup>false</unlockOnStartup>
|
||||
-->
|
||||
|
||||
<!-- If true, IndexReaders will be reopened (often more efficient)
|
||||
instead of closed and then opened. Default: true
|
||||
-->
|
||||
|
|
|
@ -255,7 +255,6 @@ public class SolrConfig extends Config implements MapSerializable {
|
|||
conf = new CacheConfig(FastLRUCache.class, args, null);
|
||||
}
|
||||
fieldValueCacheConfig = conf;
|
||||
unlockOnStartup = getBool(indexConfigPrefix + "/unlockOnStartup", false);
|
||||
useColdSearcher = getBool("query/useColdSearcher", false);
|
||||
dataDir = get("dataDir", null);
|
||||
if (dataDir != null && dataDir.length() == 0) dataDir = null;
|
||||
|
@ -485,7 +484,6 @@ public class SolrConfig extends Config implements MapSerializable {
|
|||
private Map<String, List<PluginInfo>> pluginStore = new LinkedHashMap<>();
|
||||
|
||||
public final int maxWarmingSearchers;
|
||||
public final boolean unlockOnStartup;
|
||||
public final boolean useColdSearcher;
|
||||
public final Version luceneMatchVersion;
|
||||
protected String dataDir;
|
||||
|
|
|
@ -506,7 +506,6 @@ public final class SolrCore implements SolrInfoMBean, Closeable {
|
|||
synchronized (SolrCore.class) {
|
||||
firstTime = dirs.add(getDirectoryFactory().normalize(indexDir));
|
||||
}
|
||||
boolean removeLocks = solrConfig.unlockOnStartup;
|
||||
|
||||
initIndexReaderFactory();
|
||||
|
||||
|
@ -516,20 +515,12 @@ public final class SolrCore implements SolrInfoMBean, Closeable {
|
|||
getSolrConfig().indexConfig.lockType);
|
||||
try {
|
||||
if (IndexWriter.isLocked(dir)) {
|
||||
if (removeLocks) {
|
||||
log.warn(
|
||||
logid
|
||||
+ "WARNING: Solr index directory '{}' is locked. Unlocking...",
|
||||
indexDir);
|
||||
dir.makeLock(IndexWriter.WRITE_LOCK_NAME).close();
|
||||
} else {
|
||||
log.error(logid
|
||||
+ "Solr index directory '{}' is locked. Throwing exception",
|
||||
+ "Solr index directory '{}' is locked. Throwing exception.",
|
||||
indexDir);
|
||||
throw new LockObtainFailedException(
|
||||
"Index locked for write for core " + name);
|
||||
}
|
||||
|
||||
"Index locked for write for core '" + name +
|
||||
"'. Solr now longer supports forceful unlocking via 'unlockOnStartup'. Please verify locks manually!");
|
||||
}
|
||||
} finally {
|
||||
directoryFactory.release(dir);
|
||||
|
|
|
@ -42,38 +42,17 @@ public class HdfsLockFactory extends LockFactory {
|
|||
private HdfsLockFactory() {}
|
||||
|
||||
@Override
|
||||
public Lock makeLock(Directory dir, String lockName) {
|
||||
public Lock obtainLock(Directory dir, String lockName) throws IOException {
|
||||
if (!(dir instanceof HdfsDirectory)) {
|
||||
throw new UnsupportedOperationException("HdfsLockFactory can only be used with HdfsDirectory subclasses, got: " + dir);
|
||||
}
|
||||
final HdfsDirectory hdfsDir = (HdfsDirectory) dir;
|
||||
return new HdfsLock(hdfsDir.getHdfsDirPath(), lockName, hdfsDir.getConfiguration());
|
||||
}
|
||||
|
||||
static class HdfsLock extends Lock {
|
||||
|
||||
private final Path lockPath;
|
||||
private final String lockName;
|
||||
private final Configuration conf;
|
||||
private boolean obtained;
|
||||
|
||||
public HdfsLock(Path lockPath, String lockName, Configuration conf) {
|
||||
this.lockPath = lockPath;
|
||||
this.lockName = lockName;
|
||||
this.conf = conf;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean obtain() throws IOException {
|
||||
|
||||
if (obtained) {
|
||||
// Our instance is already locked:
|
||||
throw new LockObtainFailedException("this lock instance was already obtained");
|
||||
}
|
||||
final Configuration conf = hdfsDir.getConfiguration();
|
||||
final Path lockPath = hdfsDir.getHdfsDirPath();
|
||||
final Path lockFile = new Path(lockPath, lockName);
|
||||
|
||||
FSDataOutputStream file = null;
|
||||
FileSystem fs = FileSystem.get(lockPath.toUri(), conf);
|
||||
try {
|
||||
final FileSystem fs = FileSystem.get(lockPath.toUri(), conf);
|
||||
while (true) {
|
||||
try {
|
||||
if (!fs.exists(lockPath)) {
|
||||
|
@ -86,10 +65,10 @@ public class HdfsLockFactory extends LockFactory {
|
|||
fs.mkdirs(lockPath);
|
||||
}
|
||||
|
||||
file = fs.create(new Path(lockPath, lockName), false);
|
||||
file = fs.create(lockFile, false);
|
||||
break;
|
||||
} catch (FileAlreadyExistsException e) {
|
||||
return obtained = false;
|
||||
throw new LockObtainFailedException("Cannot obtain lock file: " + lockFile, e);
|
||||
} catch (RemoteException e) {
|
||||
if (e.getClassName().equals(
|
||||
"org.apache.hadoop.hdfs.server.namenode.SafeModeException")) {
|
||||
|
@ -101,46 +80,45 @@ public class HdfsLockFactory extends LockFactory {
|
|||
}
|
||||
continue;
|
||||
}
|
||||
log.error("Error creating lock file", e);
|
||||
return obtained = false;
|
||||
throw new LockObtainFailedException("Cannot obtain lock file: " + lockFile, e);
|
||||
} catch (IOException e) {
|
||||
log.error("Error creating lock file", e);
|
||||
return obtained = false;
|
||||
throw new LockObtainFailedException("Cannot obtain lock file: " + lockFile, e);
|
||||
} finally {
|
||||
IOUtils.closeQuietly(file);
|
||||
}
|
||||
}
|
||||
} finally {
|
||||
IOUtils.closeQuietly(fs);
|
||||
|
||||
return new HdfsLock(fs, lockFile);
|
||||
}
|
||||
return obtained = true;
|
||||
|
||||
private static final class HdfsLock extends Lock {
|
||||
|
||||
private final FileSystem fs;
|
||||
private final Path lockFile;
|
||||
private volatile boolean closed;
|
||||
|
||||
HdfsLock(FileSystem fs, Path lockFile) {
|
||||
this.fs = fs;
|
||||
this.lockFile = lockFile;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() throws IOException {
|
||||
if (obtained) {
|
||||
FileSystem fs = FileSystem.get(lockPath.toUri(), conf);
|
||||
try {
|
||||
if (fs.exists(new Path(lockPath, lockName))
|
||||
&& !fs.delete(new Path(lockPath, lockName), false)) throw new LockReleaseFailedException(
|
||||
"failed to delete " + new Path(lockPath, lockName));
|
||||
} finally {
|
||||
obtained = false;
|
||||
IOUtils.closeQuietly(fs);
|
||||
if (closed) {
|
||||
return;
|
||||
}
|
||||
try {
|
||||
if (fs.exists(lockFile) && !fs.delete(lockFile, false)) {
|
||||
throw new LockReleaseFailedException("failed to delete: " + lockFile);
|
||||
}
|
||||
} finally {
|
||||
IOUtils.closeQuietly(fs);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isLocked() throws IOException {
|
||||
boolean isLocked = false;
|
||||
FileSystem fs = FileSystem.get(lockPath.toUri(), conf);
|
||||
try {
|
||||
isLocked = fs.exists(new Path(lockPath, lockName));
|
||||
} finally {
|
||||
IOUtils.closeQuietly(fs);
|
||||
}
|
||||
return isLocked;
|
||||
public void ensureValid() throws IOException {
|
||||
// no idea how to implement this on HDFS
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -23,12 +23,10 @@
|
|||
|
||||
<indexConfig>
|
||||
<useCompoundFile>true</useCompoundFile>
|
||||
<unlockOnStartup>false</unlockOnStartup>
|
||||
</indexConfig>
|
||||
<!-- BEGIN BAD: multiple indexConfig sections -->
|
||||
<indexConfig>
|
||||
<useCompoundFile>${useCompoundFile:false}</useCompoundFile>
|
||||
<unlockOnStartup>true</unlockOnStartup>
|
||||
</indexConfig>
|
||||
<!-- END BAD -->
|
||||
|
||||
|
|
|
@ -18,20 +18,16 @@ package org.apache.solr.store.hdfs;
|
|||
*/
|
||||
|
||||
import java.io.IOException;
|
||||
import java.net.URI;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||
import org.apache.lucene.store.Lock;
|
||||
import org.apache.lucene.store.LockObtainFailedException;
|
||||
import org.apache.lucene.util.IOUtils;
|
||||
import org.apache.solr.SolrTestCaseJ4;
|
||||
import org.apache.solr.cloud.hdfs.HdfsTestUtil;
|
||||
import org.apache.solr.util.BadHdfsThreadsFilter;
|
||||
import org.junit.After;
|
||||
import org.junit.AfterClass;
|
||||
import org.junit.Before;
|
||||
import org.junit.BeforeClass;
|
||||
import org.junit.Test;
|
||||
|
||||
|
@ -55,58 +51,32 @@ public class HdfsLockFactoryTest extends SolrTestCaseJ4 {
|
|||
dfsCluster = null;
|
||||
}
|
||||
|
||||
@Before
|
||||
public void setUp() throws Exception {
|
||||
super.setUp();
|
||||
}
|
||||
|
||||
@After
|
||||
public void tearDown() throws Exception {
|
||||
super.tearDown();
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testBasic() throws IOException {
|
||||
String uri = HdfsTestUtil.getURI(dfsCluster);
|
||||
Path lockPath = new Path(uri, "/basedir/lock");
|
||||
Configuration conf = HdfsTestUtil.getClientConfiguration(dfsCluster);
|
||||
HdfsDirectory dir = new HdfsDirectory(lockPath, conf);
|
||||
Lock lock = dir.makeLock("testlock");
|
||||
boolean success = lock.obtain();
|
||||
assertTrue("We could not get the lock when it should be available", success);
|
||||
Lock lock2 = dir.makeLock("testlock");
|
||||
success = lock2.obtain();
|
||||
assertFalse("We got the lock but it should be unavailble", success);
|
||||
IOUtils.close(lock, lock2);
|
||||
// now repeat after close()
|
||||
lock = dir.makeLock("testlock");
|
||||
success = lock.obtain();
|
||||
assertTrue("We could not get the lock when it should be available", success);
|
||||
lock2 = dir.makeLock("testlock");
|
||||
success = lock2.obtain();
|
||||
assertFalse("We got the lock but it should be unavailble", success);
|
||||
IOUtils.close(lock, lock2);
|
||||
dir.close();
|
||||
}
|
||||
|
||||
public void testDoubleObtain() throws Exception {
|
||||
String uri = HdfsTestUtil.getURI(dfsCluster);
|
||||
Path lockPath = new Path(uri, "/basedir/lock");
|
||||
Configuration conf = HdfsTestUtil.getClientConfiguration(dfsCluster);
|
||||
HdfsDirectory dir = new HdfsDirectory(lockPath, conf);
|
||||
Lock lock = dir.makeLock("foo");
|
||||
assertTrue(lock.obtain());
|
||||
try {
|
||||
lock.obtain();
|
||||
fail("did not hit double-obtain failure");
|
||||
try (Lock lock = dir.obtainLock("testlock")) {
|
||||
assert lock != null;
|
||||
try (Lock lock2 = dir.obtainLock("testlock")) {
|
||||
assert lock2 != null;
|
||||
fail("Locking should fail");
|
||||
} catch (LockObtainFailedException lofe) {
|
||||
// expected
|
||||
// pass
|
||||
}
|
||||
}
|
||||
// now repeat after close()
|
||||
try (Lock lock = dir.obtainLock("testlock")) {
|
||||
assert lock != null;
|
||||
try (Lock lock2 = dir.obtainLock("testlock")) {
|
||||
assert lock2 != null;
|
||||
fail("Locking should fail");
|
||||
} catch (LockObtainFailedException lofe) {
|
||||
// pass
|
||||
}
|
||||
}
|
||||
lock.close();
|
||||
|
||||
lock = dir.makeLock("foo");
|
||||
assertTrue(lock.obtain());
|
||||
lock.close();
|
||||
dir.close();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -263,19 +263,6 @@
|
|||
-->
|
||||
<lockType>${solr.lock.type:native}</lockType>
|
||||
|
||||
<!-- Unlock On Startup
|
||||
|
||||
If true, unlock any held write or commit locks on startup.
|
||||
This defeats the locking mechanism that allows multiple
|
||||
processes to safely access a lucene index, and should be used
|
||||
with care. Default is "false".
|
||||
|
||||
This is not needed if lock type is 'single'
|
||||
-->
|
||||
<!--
|
||||
<unlockOnStartup>false</unlockOnStartup>
|
||||
-->
|
||||
|
||||
<!-- Commit Deletion Policy
|
||||
Custom deletion policies can be specified here. The class must
|
||||
implement org.apache.lucene.index.IndexDeletionPolicy.
|
||||
|
|
|
@ -266,19 +266,6 @@
|
|||
-->
|
||||
<lockType>${solr.lock.type:native}</lockType>
|
||||
|
||||
<!-- Unlock On Startup
|
||||
|
||||
If true, unlock any held write or commit locks on startup.
|
||||
This defeats the locking mechanism that allows multiple
|
||||
processes to safely access a lucene index, and should be used
|
||||
with care. Default is "false".
|
||||
|
||||
This is not needed if lock type is 'single'
|
||||
-->
|
||||
<!--
|
||||
<unlockOnStartup>false</unlockOnStartup>
|
||||
-->
|
||||
|
||||
<!-- Commit Deletion Policy
|
||||
Custom deletion policies can be specified here. The class must
|
||||
implement org.apache.lucene.index.IndexDeletionPolicy.
|
||||
|
|
|
@ -263,19 +263,6 @@
|
|||
-->
|
||||
<lockType>${solr.lock.type:native}</lockType>
|
||||
|
||||
<!-- Unlock On Startup
|
||||
|
||||
If true, unlock any held write or commit locks on startup.
|
||||
This defeats the locking mechanism that allows multiple
|
||||
processes to safely access a lucene index, and should be used
|
||||
with care. Default is "false".
|
||||
|
||||
This is not needed if lock type is 'single'
|
||||
-->
|
||||
<!--
|
||||
<unlockOnStartup>false</unlockOnStartup>
|
||||
-->
|
||||
|
||||
<!-- Commit Deletion Policy
|
||||
Custom deletion policies can be specified here. The class must
|
||||
implement org.apache.lucene.index.IndexDeletionPolicy.
|
||||
|
|
|
@ -263,19 +263,6 @@
|
|||
-->
|
||||
<lockType>${solr.lock.type:native}</lockType>
|
||||
|
||||
<!-- Unlock On Startup
|
||||
|
||||
If true, unlock any held write or commit locks on startup.
|
||||
This defeats the locking mechanism that allows multiple
|
||||
processes to safely access a lucene index, and should be used
|
||||
with care. Default is "false".
|
||||
|
||||
This is not needed if lock type is 'single'
|
||||
-->
|
||||
<!--
|
||||
<unlockOnStartup>false</unlockOnStartup>
|
||||
-->
|
||||
|
||||
<!-- Commit Deletion Policy
|
||||
Custom deletion policies can be specified here. The class must
|
||||
implement org.apache.lucene.index.IndexDeletionPolicy.
|
||||
|
|
|
@ -264,19 +264,6 @@
|
|||
-->
|
||||
<lockType>${solr.lock.type:native}</lockType>
|
||||
|
||||
<!-- Unlock On Startup
|
||||
|
||||
If true, unlock any held write or commit locks on startup.
|
||||
This defeats the locking mechanism that allows multiple
|
||||
processes to safely access a lucene index, and should be used
|
||||
with care. Default is "false".
|
||||
|
||||
This is not needed if lock type is 'single'
|
||||
-->
|
||||
<!--
|
||||
<unlockOnStartup>false</unlockOnStartup>
|
||||
-->
|
||||
|
||||
<!-- Commit Deletion Policy
|
||||
Custom deletion policies can be specified here. The class must
|
||||
implement org.apache.lucene.index.IndexDeletionPolicy.
|
||||
|
|
|
@ -244,19 +244,6 @@
|
|||
-->
|
||||
<lockType>${solr.lock.type:native}</lockType>
|
||||
|
||||
<!-- Unlock On Startup
|
||||
|
||||
If true, unlock any held write or commit locks on startup.
|
||||
This defeats the locking mechanism that allows multiple
|
||||
processes to safely access a lucene index, and should be used
|
||||
with care. Default is "false".
|
||||
|
||||
This is not needed if lock type is 'single'
|
||||
-->
|
||||
<!--
|
||||
<unlockOnStartup>false</unlockOnStartup>
|
||||
-->
|
||||
|
||||
<!-- Commit Deletion Policy
|
||||
Custom deletion policies can be specified here. The class must
|
||||
implement org.apache.lucene.index.IndexDeletionPolicy.
|
||||
|
|
|
@ -244,19 +244,6 @@
|
|||
-->
|
||||
<lockType>${solr.lock.type:native}</lockType>
|
||||
|
||||
<!-- Unlock On Startup
|
||||
|
||||
If true, unlock any held write or commit locks on startup.
|
||||
This defeats the locking mechanism that allows multiple
|
||||
processes to safely access a lucene index, and should be used
|
||||
with care. Default is "false".
|
||||
|
||||
This is not needed if lock type is 'single'
|
||||
-->
|
||||
<!--
|
||||
<unlockOnStartup>false</unlockOnStartup>
|
||||
-->
|
||||
|
||||
<!-- Commit Deletion Policy
|
||||
Custom deletion policies can be specified here. The class must
|
||||
implement org.apache.lucene.index.IndexDeletionPolicy.
|
||||
|
|
|
@ -246,19 +246,6 @@
|
|||
-->
|
||||
<lockType>${solr.lock.type:native}</lockType>
|
||||
|
||||
<!-- Unlock On Startup
|
||||
|
||||
If true, unlock any held write or commit locks on startup.
|
||||
This defeats the locking mechanism that allows multiple
|
||||
processes to safely access a lucene index, and should be used
|
||||
with care. Default is "false".
|
||||
|
||||
This is not needed if lock type is 'single'
|
||||
-->
|
||||
<!--
|
||||
<unlockOnStartup>false</unlockOnStartup>
|
||||
-->
|
||||
|
||||
<!-- Commit Deletion Policy
|
||||
Custom deletion policies can be specified here. The class must
|
||||
implement org.apache.lucene.index.IndexDeletionPolicy.
|
||||
|
|
Loading…
Reference in New Issue