Merge branch 'master' into jira/lucene-5438-nrt-replication

Conflicts:
	lucene/core/src/java/org/apache/lucene/index/IndexFileDeleter.java
	lucene/test-framework/src/java/org/apache/lucene/store/MockDirectoryWrapper.java
This commit is contained in:
Mike McCandless 2016-02-07 14:03:13 -05:00
commit 9ba62e5e38
20 changed files with 364 additions and 150 deletions

View File

@ -1,4 +1,4 @@
/**
/*
* Copyright (C) 1999-2010, International Business Machines
* Corporation and others. All Rights Reserved.
*

View File

@ -696,49 +696,23 @@ final class IndexFileDeleter implements Closeable {
ensureOpen();
if (infoStream.isEnabled("IFD")) {
infoStream.message("IFD", "delete \"" + names + "\"");
infoStream.message("IFD", "delete " + names + "");
}
// We make two passes, first deleting any segments_N files, second deleting all the rest. We do this so that if we throw exc or JVM
// crashes during deletions, we don't leave the index in an "apparently corrupt" state:
// We make two passes, first deleting any segments_N files, second deleting the rest. We do this so that if we throw exc or JVM
// crashes during deletions, even when not on Windows, we don't leave the index in an "apparently corrupt" state:
for(String name : names) {
if (name.startsWith(IndexFileNames.SEGMENTS) == false) {
continue;
}
try {
directory.deleteFile(name);
} catch (NoSuchFileException | FileNotFoundException e) {
// IndexWriter should only ask us to delete files it knows it wrote, so if we hit this, something is wrong!
if (Constants.WINDOWS) {
// TODO: can we remove this OS-specific hacky logic? If windows deleteFile is buggy, we should instead contain this workaround in
// a WindowsFSDirectory ...
// LUCENE-6684: we suppress this assert for Windows, since a file could be in a confusing "pending delete" state, and falsely
// return NSFE/FNFE
} else {
throw e;
}
}
directory.deleteFile(name);
}
for(String name : names) {
if (name.startsWith(IndexFileNames.SEGMENTS) == true) {
continue;
}
try {
directory.deleteFile(name);
} catch (NoSuchFileException | FileNotFoundException e) {
// IndexWriter should only ask us to delete files it knows it wrote, so if we hit this, something is wrong!
if (Constants.WINDOWS) {
// TODO: can we remove this OS-specific hacky logic? If windows deleteFile is buggy, we should instead contain this workaround in
// a WindowsFSDirectory ...
// LUCENE-6684: we suppress this assert for Windows, since a file could be in a confusing "pending delete" state, and falsely
// return NSFE/FNFE
} else {
throw e;
}
}
directory.deleteFile(name);
}
}

View File

@ -765,7 +765,7 @@ public class IndexWriter implements Closeable, TwoPhaseCommit, Accountable {
*/
public IndexWriter(Directory d, IndexWriterConfig conf) throws IOException {
if (d instanceof FSDirectory && ((FSDirectory) d).checkPendingDeletions()) {
throw new IllegalArgumentException("Directory " + d + " is still has pending deleted files; cannot initialize IndexWriter");
throw new IllegalArgumentException("Directory " + d + " still has pending deleted files; cannot initialize IndexWriter");
}
conf.setIndexWriter(this); // prevent reuse by other instances

View File

@ -327,7 +327,7 @@ public abstract class FSDirectory extends BaseDirectory {
if (pendingDeletes.contains(name)) {
throw new NoSuchFileException("file \"" + name + "\" is already pending delete");
}
privateDeleteFile(name);
privateDeleteFile(name, false);
maybeDeletePendingFiles();
}
@ -347,7 +347,7 @@ public abstract class FSDirectory extends BaseDirectory {
// Clone the set since we mutate it in privateDeleteFile:
for(String name : new HashSet<>(pendingDeletes)) {
privateDeleteFile(name);
privateDeleteFile(name, true);
}
}
}
@ -363,14 +363,21 @@ public abstract class FSDirectory extends BaseDirectory {
}
}
private void privateDeleteFile(String name) throws IOException {
private void privateDeleteFile(String name, boolean isPendingDelete) throws IOException {
try {
Files.delete(directory.resolve(name));
pendingDeletes.remove(name);
} catch (NoSuchFileException | FileNotFoundException e) {
// We were asked to delete a non-existent file:
pendingDeletes.remove(name);
throw e;
if (isPendingDelete && Constants.WINDOWS) {
// TODO: can we remove this OS-specific hacky logic? If windows deleteFile is buggy, we should instead contain this workaround in
// a WindowsFSDirectory ...
// LUCENE-6684: we suppress this check for Windows, since a file could be in a confusing "pending delete" state, failing the first
// delete attempt with access denied and then apparently falsely failing here when we try ot delete it again, with NSFE/FNFE
} else {
throw e;
}
} catch (IOException ioe) {
// On windows, a file delete can fail because there's still an open
// file handle against it. We record this in pendingDeletes and

View File

@ -21,6 +21,7 @@ import java.io.IOException;
import java.nio.file.AtomicMoveNotSupportedException;
import java.nio.file.NoSuchFileException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.HashSet;
import java.util.List;
@ -118,7 +119,9 @@ public class FileSwitchDirectory extends Directory {
if (exc != null && files.isEmpty()) {
throw exc;
}
return files.toArray(new String[files.size()]);
String[] result = files.toArray(new String[files.size()]);
Arrays.sort(result);
return result;
}
/** Utility method to return a file's extension. */

View File

@ -106,7 +106,9 @@ public class NRTCachingDirectory extends FilterDirectory implements Accountable
"cache=" + Arrays.toString(cache.listAll()) + ",delegate=" + Arrays.toString(in.listAll()));
}
}
return files.toArray(new String[files.size()]);
String[] result = files.toArray(new String[files.size()]);
Arrays.sort(result);
return result;
}
@Override

View File

@ -17,6 +17,10 @@
package org.apache.lucene;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.io.PrintStream;
import org.apache.lucene.analysis.MockAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
@ -24,16 +28,16 @@ import org.apache.lucene.index.ConcurrentMergeScheduler;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.IndexWriterConfig;
import org.apache.lucene.index.LogMergePolicy;
import org.apache.lucene.index.MergePolicy;
import org.apache.lucene.index.MergePolicy.OneMerge;
import org.apache.lucene.index.MergePolicy;
import org.apache.lucene.index.MergeScheduler;
import org.apache.lucene.index.MergeTrigger;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.MockDirectoryWrapper;
import org.apache.lucene.store.RAMDirectory;
import org.apache.lucene.util.IOUtils;
import org.apache.lucene.util.LuceneTestCase;
import java.io.IOException;
import org.apache.lucene.util.PrintStreamInfoStream;
/**
* Holds tests cases to verify external APIs are accessible
@ -93,11 +97,16 @@ public class TestMergeSchedulerExternal extends LuceneTestCase {
Document doc = new Document();
Field idField = newStringField("id", "", Field.Store.YES);
doc.add(idField);
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random()))
.setMergeScheduler(new MyMergeScheduler())
.setMaxBufferedDocs(2).setRAMBufferSizeMB(IndexWriterConfig.DISABLE_AUTO_FLUSH)
.setMergePolicy(newLogMergePolicy()));
IndexWriterConfig iwc = newIndexWriterConfig(new MockAnalyzer(random()))
.setMergeScheduler(new MyMergeScheduler())
.setMaxBufferedDocs(2).setRAMBufferSizeMB(IndexWriterConfig.DISABLE_AUTO_FLUSH)
.setMergePolicy(newLogMergePolicy());
ByteArrayOutputStream baos = new ByteArrayOutputStream();
iwc.setInfoStream(new PrintStreamInfoStream(new PrintStream(baos, true, IOUtils.UTF_8)));
IndexWriter writer = new IndexWriter(dir, iwc);
LogMergePolicy logMP = (LogMergePolicy) writer.getConfig().getMergePolicy();
logMP.setMergeFactor(10);
for(int i=0;i<20;i++) {
@ -110,10 +119,16 @@ public class TestMergeSchedulerExternal extends LuceneTestCase {
// OK
}
writer.rollback();
assertTrue(mergeThreadCreated);
assertTrue(mergeCalled);
assertTrue(excCalled);
try {
assertTrue(mergeThreadCreated);
assertTrue(mergeCalled);
assertTrue(excCalled);
} catch (AssertionError ae) {
System.out.println("TEST FAILED; IW infoStream output:");
System.out.println(baos.toString(IOUtils.UTF_8));
throw ae;
}
dir.close();
}

View File

@ -488,4 +488,80 @@ public class TestIndexFileDeleter extends LuceneTestCase {
w.close();
dir.close();
}
// LUCENE-6835: make sure best-effort to not create an "apparently but not really" corrupt index is working:
public void testExcInDeleteFile() throws Throwable {
int iters = atLeast(10);
for(int iter=0;iter<iters;iter++) {
if (VERBOSE) {
System.out.println("TEST: iter=" + iter);
}
MockDirectoryWrapper dir = newMockDirectory();
final AtomicBoolean doFailExc = new AtomicBoolean();
dir.failOn(new MockDirectoryWrapper.Failure() {
@Override
public void eval(MockDirectoryWrapper dir) throws IOException {
if (doFailExc.get() && random().nextInt(4) == 1) {
Exception e = new Exception();
StackTraceElement stack[] = e.getStackTrace();
for (int i = 0; i < stack.length; i++) {
if (stack[i].getClassName().equals(MockDirectoryWrapper.class.getName()) && stack[i].getMethodName().equals("deleteFile")) {
throw new MockDirectoryWrapper.FakeIOException();
}
}
}
}
});
IndexWriterConfig iwc = newIndexWriterConfig();
iwc.setMergeScheduler(new SerialMergeScheduler());
RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc);
w.addDocument(new Document());
// makes segments_1
if (VERBOSE) {
System.out.println("TEST: now commit");
}
w.commit();
w.addDocument(new Document());
doFailExc.set(true);
if (VERBOSE) {
System.out.println("TEST: now close");
}
try {
w.close();
if (VERBOSE) {
System.out.println("TEST: no exception (ok)");
}
} catch (RuntimeException re) {
assertTrue(re.getCause() instanceof MockDirectoryWrapper.FakeIOException);
// good
if (VERBOSE) {
System.out.println("TEST: got expected exception:");
re.printStackTrace(System.out);
}
} catch (MockDirectoryWrapper.FakeIOException fioe) {
// good
if (VERBOSE) {
System.out.println("TEST: got expected exception:");
fioe.printStackTrace(System.out);
}
}
doFailExc.set(false);
assertFalse(w.w.isOpen());
for(String name : dir.listAll()) {
if (name.startsWith(IndexFileNames.SEGMENTS)) {
if (VERBOSE) {
System.out.println("TEST: now read " + name);
}
SegmentInfos.readCommit(dir, name);
}
}
dir.close();
}
}
}

View File

@ -78,6 +78,7 @@ import org.apache.lucene.store.IOContext;
import org.apache.lucene.store.IndexInput;
import org.apache.lucene.store.IndexOutput;
import org.apache.lucene.store.LockObtainFailedException;
import org.apache.lucene.store.MMapDirectory;
import org.apache.lucene.store.MockDirectoryWrapper;
import org.apache.lucene.store.NIOFSDirectory;
import org.apache.lucene.store.NoLockFactory;
@ -1268,8 +1269,14 @@ public class TestIndexWriter extends LuceneTestCase {
FileSystem fs = new WindowsFS(path.getFileSystem()).getFileSystem(URI.create("file:///"));
Path indexPath = new FilterPath(path, fs);
// NOTE: cannot use MMapDir, because WindowsFS doesn't see/think it keeps file handles open?
FSDirectory dir = new NIOFSDirectory(indexPath);
// NOTE: on Unix, we cannot use MMapDir, because WindowsFS doesn't see/think it keeps file handles open. Yet, on Windows, we MUST use
// MMapDir because the windows OS will in fact prevent file deletion for us, and fails otherwise:
FSDirectory dir;
if (Constants.WINDOWS) {
dir = new MMapDirectory(indexPath);
} else {
dir = new NIOFSDirectory(indexPath);
}
MergePolicy mergePolicy = newLogMergePolicy(true);

View File

@ -1,4 +1,4 @@
/**
/*
* Copyright (c) 2005 Bruno Martins
* All rights reserved.
*

View File

@ -1295,4 +1295,30 @@ public abstract class BaseDirectoryTestCase extends LuceneTestCase {
assertTrue(Arrays.asList(fsDir.listAll()).contains(fileName));
}
}
public void testListAllIsSorted() throws IOException {
try (Directory dir = getDirectory(createTempDir())) {
int count = atLeast(20);
Set<String> names = new HashSet<>();
while(names.size() < count) {
String name = TestUtil.randomSimpleString(random());
if (name.length() == 0) {
continue;
}
if (random().nextInt(5) == 1) {
IndexOutput out = dir.createTempOutput(name, "foo", IOContext.DEFAULT);
names.add(out.getName());
out.close();
} else if (names.contains(name) == false) {
IndexOutput out = dir.createOutput(name, IOContext.DEFAULT);
names.add(out.getName());
out.close();
}
}
String[] actual = dir.listAll();
String[] expected = actual.clone();
Arrays.sort(expected);
assertEquals(expected, actual);
}
}
}

View File

@ -210,6 +210,7 @@ public abstract class BaseLockFactoryTestCase extends LuceneTestCase {
@Override
public void run() {
IndexWriter writer = null;
ByteArrayOutputStream baos = new ByteArrayOutputStream();
for(int i=0;i<this.numIteration;i++) {
if (VERBOSE) {
System.out.println("TEST: WriterThread iter=" + i);
@ -218,13 +219,17 @@ public abstract class BaseLockFactoryTestCase extends LuceneTestCase {
IndexWriterConfig iwc = new IndexWriterConfig(new MockAnalyzer(random()));
// We only print the IW infoStream output on exc, below:
ByteArrayOutputStream baos = new ByteArrayOutputStream();
PrintStream printStream;
try {
iwc.setInfoStream(new PrintStreamInfoStream(new PrintStream(baos, true, "UTF8")));
printStream = new PrintStream(baos, true, "UTF8");
} catch (UnsupportedEncodingException uee) {
// shouldn't happen
throw new RuntimeException(uee);
}
iwc.setInfoStream(new PrintStreamInfoStream(printStream));
printStream.println("\nTEST: WriterThread iter=" + i);
iwc.setOpenMode(OpenMode.APPEND);
try {
writer = new IndexWriter(dir, iwc);

View File

@ -833,12 +833,12 @@ public class MockDirectoryWrapper extends BaseDirectoryWrapper {
}
throw new RuntimeException("MockDirectoryWrapper: cannot close: there are still open locks: " + openLocks, cause);
}
randomIOExceptionRate = 0.0;
randomIOExceptionRateOnOpen = 0.0;
if (getCheckIndexOnClose()) {
randomIOExceptionRate = 0.0;
randomIOExceptionRateOnOpen = 0.0;
if ((getCheckIndexOnClose() || assertNoUnreferencedFilesOnClose) && DirectoryReader.indexExists(this)) {
if (getCheckIndexOnClose()) {
if (DirectoryReader.indexExists(this)) {
if (LuceneTestCase.VERBOSE) {
System.out.println("\nNOTE: MockDirectoryWrapper: now crush");
}
@ -848,69 +848,66 @@ public class MockDirectoryWrapper extends BaseDirectoryWrapper {
}
TestUtil.checkIndex(this, getCrossCheckTermVectorsOnClose(), true);
}
// TODO: factor this out / share w/ TestIW.assertNoUnreferencedFiles
// nocommit pull this outside of "getCheckIndexOnClose"
if (assertNoUnreferencedFilesOnClose) {
// TODO: factor this out / share w/ TestIW.assertNoUnreferencedFiles
if (assertNoUnreferencedFilesOnClose) {
// now look for unreferenced files: discount ones that we tried to delete but could not
Set<String> allFiles = new HashSet<>(Arrays.asList(listAll()));
String[] startFiles = allFiles.toArray(new String[0]);
IndexWriterConfig iwc = new IndexWriterConfig(null);
iwc.setIndexDeletionPolicy(NoDeletionPolicy.INSTANCE);
// now look for unreferenced files: discount ones that we tried to delete but could not
Set<String> allFiles = new HashSet<>(Arrays.asList(listAll()));
String[] startFiles = allFiles.toArray(new String[0]);
IndexWriterConfig iwc = new IndexWriterConfig(null);
iwc.setIndexDeletionPolicy(NoDeletionPolicy.INSTANCE);
// We must do this before opening writer otherwise writer will be angry if there are pending deletions:
TestUtil.disableVirusChecker(in);
// We must do this before opening writer otherwise writer will be angry if there are pending deletions:
TestUtil.disableVirusChecker(in);
new IndexWriter(in, iwc).rollback();
Set<String> files = new HashSet<>(Arrays.asList(listAll()));
String[] endFiles = files.toArray(new String[0]);
new IndexWriter(in, iwc).rollback();
String[] endFiles = in.listAll();
Set<String> startSet = new TreeSet<>(Arrays.asList(startFiles));
Set<String> endSet = new TreeSet<>(Arrays.asList(endFiles));
Set<String> startSet = new TreeSet<>(Arrays.asList(startFiles));
Set<String> endSet = new TreeSet<>(Arrays.asList(endFiles));
startFiles = startSet.toArray(new String[0]);
endFiles = endSet.toArray(new String[0]);
startFiles = startSet.toArray(new String[0]);
endFiles = endSet.toArray(new String[0]);
if (!Arrays.equals(startFiles, endFiles)) {
List<String> removed = new ArrayList<>();
for(String fileName : startFiles) {
if (!endSet.contains(fileName)) {
removed.add(fileName);
}
if (!Arrays.equals(startFiles, endFiles)) {
List<String> removed = new ArrayList<>();
for(String fileName : startFiles) {
if (!endSet.contains(fileName)) {
removed.add(fileName);
}
List<String> added = new ArrayList<>();
for(String fileName : endFiles) {
if (!startSet.contains(fileName)) {
added.add(fileName);
}
}
String extras;
if (removed.size() != 0) {
extras = "\n\nThese files were removed: " + removed;
} else {
extras = "";
}
if (added.size() != 0) {
extras += "\n\nThese files were added (waaaaaaaaaat!): " + added;
}
throw new RuntimeException(this + ": unreferenced files: before delete:\n " + Arrays.toString(startFiles) + "\n after delete:\n " + Arrays.toString(endFiles) + extras);
}
DirectoryReader ir1 = DirectoryReader.open(this);
int numDocs1 = ir1.numDocs();
ir1.close();
new IndexWriter(this, new IndexWriterConfig(null)).close();
DirectoryReader ir2 = DirectoryReader.open(this);
int numDocs2 = ir2.numDocs();
ir2.close();
assert numDocs1 == numDocs2 : "numDocs changed after opening/closing IW: before=" + numDocs1 + " after=" + numDocs2;
List<String> added = new ArrayList<>();
for(String fileName : endFiles) {
if (!startSet.contains(fileName)) {
added.add(fileName);
}
}
String extras;
if (removed.size() != 0) {
extras = "\n\nThese files were removed: " + removed;
} else {
extras = "";
}
if (added.size() != 0) {
extras += "\n\nThese files were added (waaaaaaaaaat!): " + added;
}
throw new RuntimeException("unreferenced files: before delete:\n " + Arrays.toString(startFiles) + "\n after delete:\n " + Arrays.toString(endFiles) + extras);
}
DirectoryReader ir1 = DirectoryReader.open(this);
int numDocs1 = ir1.numDocs();
ir1.close();
new IndexWriter(this, new IndexWriterConfig(null)).close();
DirectoryReader ir2 = DirectoryReader.open(this);
int numDocs2 = ir2.numDocs();
ir2.close();
assert numDocs1 == numDocs2 : "numDocs changed after opening/closing IW: before=" + numDocs1 + " after=" + numDocs2;
}
}
success = true;

View File

@ -144,6 +144,8 @@ New Features
* SOLR-8285: Ensure the /export handler works with NULL field values (Joel Bernstein)
* SOLR-8502: Improve Solr JDBC Driver to support SQL Clients like DBVisualizer (Kevin Risden, Joel Bernstein)
Bug Fixes
----------------------
* SOLR-8386: Add field option in the new admin UI schema page loads up even when no schemaFactory has been
@ -347,6 +349,10 @@ New Features
* SOLR-8586: added index fingerprint, a hash over all versions currently in the index.
PeerSync now uses this to check if replicas are in sync. (yonik)
* SOLR-8500: Allow the number of threads ConcurrentUpdateSolrClient StreamingSolrClients configurable by a
system property. NOTE: this is an expert option and can result in more often needing to do full index replication
for recovery, the sweet spot for using this is very high volume, leader-only indexing. (Tim Potter, Erick Erickson)
Bug Fixes
----------------------

View File

@ -41,6 +41,8 @@ import java.util.concurrent.ExecutorService;
public class StreamingSolrClients {
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
private final int runnerCount = Integer.getInteger("solr.cloud.replication.runners", 1);
private HttpClient httpClient;
@ -70,7 +72,7 @@ public class StreamingSolrClients {
// NOTE: increasing to more than 1 threadCount for the client could cause updates to be reordered
// on a greater scale since the current behavior is to only increase the number of connections/Runners when
// the queue is more than half full.
client = new ConcurrentUpdateSolrClient(url, httpClient, 100, 1, updateExecutor, true) {
client = new ConcurrentUpdateSolrClient(url, httpClient, 100, runnerCount, updateExecutor, true) {
@Override
public void handleError(Throwable ex) {
req.trackRequestResult(null, false);

View File

@ -37,10 +37,10 @@ public class SolrClientCache implements Serializable {
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
private Map<String, SolrClient> solrClients = new HashMap();
private final Map<String, SolrClient> solrClients = new HashMap<>();
public synchronized CloudSolrClient getCloudSolrClient(String zkHost) {
CloudSolrClient client = null;
CloudSolrClient client;
if (solrClients.containsKey(zkHost)) {
client = (CloudSolrClient) solrClients.get(zkHost);
} else {
@ -53,7 +53,7 @@ public class SolrClientCache implements Serializable {
}
public synchronized HttpSolrClient getHttpSolrClient(String host) {
HttpSolrClient client = null;
HttpSolrClient client;
if (solrClients.containsKey(host)) {
client = (HttpSolrClient) solrClients.get(host);
} else {
@ -64,12 +64,11 @@ public class SolrClientCache implements Serializable {
}
public void close() {
Iterator<SolrClient> it = solrClients.values().iterator();
while(it.hasNext()) {
for(Map.Entry<String, SolrClient> entry : solrClients.entrySet()) {
try {
it.next().close();
entry.getValue().close();
} catch (IOException e) {
log.error(e.getMessage(), e);
log.error("Error closing SolrClient for " + entry.getKey(), e);
}
}
}

View File

@ -45,17 +45,17 @@ class ConnectionImpl implements Connection {
private final String url;
private final SolrClientCache solrClientCache = new SolrClientCache();
private final CloudSolrClient client;
private final String collection;
private final Properties properties;
private final DatabaseMetaData databaseMetaData;
private final Statement connectionStatement;
private String collection;
private boolean closed;
private SQLWarning currentWarning;
ConnectionImpl(String url, String zkHost, String collection, Properties properties) throws SQLException {
this.url = url;
this.client = solrClientCache.getCloudSolrClient(zkHost);
this.collection = collection;
this.client = this.solrClientCache.getCloudSolrClient(zkHost);
this.setSchema(collection);
this.properties = properties;
this.connectionStatement = createStatement();
this.databaseMetaData = new DatabaseMetaDataImpl(this, this.connectionStatement);
@ -158,7 +158,7 @@ class ConnectionImpl implements Connection {
@Override
public void setCatalog(String catalog) throws SQLException {
throw new UnsupportedOperationException();
}
@Override
@ -301,7 +301,7 @@ class ConnectionImpl implements Connection {
@Override
public boolean isValid(int timeout) throws SQLException {
// check that the connection isn't close and able to connect within the timeout
// check that the connection isn't closed and able to connect within the timeout
try {
if(!isClosed()) {
this.client.connect(timeout, TimeUnit.SECONDS);
@ -345,7 +345,7 @@ class ConnectionImpl implements Connection {
@Override
public void setSchema(String schema) throws SQLException {
throw new UnsupportedOperationException();
this.collection = schema;
}
@Override
@ -377,4 +377,4 @@ class ConnectionImpl implements Connection {
public boolean isWrapperFor(Class<?> iface) throws SQLException {
throw new UnsupportedOperationException();
}
}
}

View File

@ -16,12 +16,22 @@
*/
package org.apache.solr.client.solrj.io.sql;
import java.io.IOException;
import java.sql.Connection;
import java.sql.DatabaseMetaData;
import java.sql.ResultSet;
import java.sql.RowIdLifetime;
import java.sql.SQLException;
import java.sql.Statement;
import java.util.Set;
import org.apache.solr.client.solrj.SolrClient;
import org.apache.solr.client.solrj.SolrQuery;
import org.apache.solr.client.solrj.SolrServerException;
import org.apache.solr.client.solrj.impl.CloudSolrClient;
import org.apache.solr.client.solrj.impl.HttpSolrClient;
import org.apache.solr.client.solrj.response.QueryResponse;
import org.apache.solr.common.util.SimpleOrderedMap;
class DatabaseMetaDataImpl implements DatabaseMetaData {
private final ConnectionImpl connection;
@ -32,6 +42,21 @@ class DatabaseMetaDataImpl implements DatabaseMetaData {
this.connectionStatement = connectionStatement;
}
private int getVersionPart(String version, int part) {
// TODO Is there a better way to do this? Reuse code from elsewhere?
// Gets the parts of the Solr version. If fail then just return 0.
if (version != null) {
try {
String[] versionParts = version.split("\\.", 3);
return Integer.parseInt(versionParts[part]);
} catch (Throwable e) {
return 0;
}
} else {
return 0;
}
}
@Override
public boolean allProceduresAreCallable() throws SQLException {
return false;
@ -79,32 +104,78 @@ class DatabaseMetaDataImpl implements DatabaseMetaData {
@Override
public String getDatabaseProductName() throws SQLException {
return null;
return "Apache Solr";
}
@Override
public String getDatabaseProductVersion() throws SQLException {
return null;
// Returns the version for the first live node in the Solr cluster.
SolrQuery sysQuery = new SolrQuery();
sysQuery.setRequestHandler("/admin/info/system");
CloudSolrClient cloudSolrClient = this.connection.getClient();
Set<String> liveNodes = cloudSolrClient.getZkStateReader().getClusterState().getLiveNodes();
SolrClient solrClient = null;
for (String node : liveNodes) {
try {
String nodeURL = cloudSolrClient.getZkStateReader().getBaseUrlForNodeName(node);
solrClient = new HttpSolrClient(nodeURL);
QueryResponse rsp = solrClient.query(sysQuery);
return String.valueOf(((SimpleOrderedMap) rsp.getResponse().get("lucene")).get("solr-spec-version"));
} catch (SolrServerException | IOException ignore) {
return "";
} finally {
if (solrClient != null) {
try {
solrClient.close();
} catch (IOException ignore) {
// Don't worry about failing to close the Solr client
}
}
}
}
// If no version found just return empty string
return "";
}
@Override
public int getDatabaseMajorVersion() throws SQLException {
return getVersionPart(this.getDatabaseProductVersion(), 0);
}
@Override
public int getDatabaseMinorVersion() throws SQLException {
return getVersionPart(this.getDatabaseProductVersion(), 1);
}
@Override
public String getDriverName() throws SQLException {
return null;
return this.getClass().getPackage().getSpecificationTitle();
}
@Override
public String getDriverVersion() throws SQLException {
return null;
return this.getClass().getPackage().getSpecificationVersion();
}
@Override
public int getDriverMajorVersion() {
return 0;
try {
return getVersionPart(this.getDriverVersion(), 0);
} catch (SQLException e) {
return 0;
}
}
@Override
public int getDriverMinorVersion() {
return 0;
try {
return getVersionPart(this.getDriverVersion(), 1);
} catch (SQLException e) {
return 0;
}
}
@Override
@ -822,19 +893,9 @@ class DatabaseMetaDataImpl implements DatabaseMetaData {
return 0;
}
@Override
public int getDatabaseMajorVersion() throws SQLException {
return 0;
}
@Override
public int getDatabaseMinorVersion() throws SQLException {
return 0;
}
@Override
public int getJDBCMajorVersion() throws SQLException {
return 0;
return 4;
}
@Override

View File

@ -26,6 +26,7 @@ import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.List;
import java.util.Locale;
import java.util.Map;
import java.util.HashMap;
import java.util.Random;
@ -45,6 +46,7 @@ class StatementImpl implements Statement {
private String currentSQL;
private ResultSetImpl currentResultSet;
private SQLWarning currentWarning;
private int maxRows;
StatementImpl(ConnectionImpl connection) {
this.connection = connection;
@ -58,6 +60,10 @@ class StatementImpl implements Statement {
this.currentResultSet = null;
}
if(maxRows > 0 && !containsLimit(sql)) {
sql = sql + " limit "+Integer.toString(maxRows);
}
closed = false; // If closed reopen so Statement can be reused.
this.currentResultSet = new ResultSetImpl(this, constructStream(sql));
return this.currentResultSet;
@ -132,12 +138,12 @@ class StatementImpl implements Statement {
@Override
public int getMaxRows() throws SQLException {
throw new UnsupportedOperationException();
return this.maxRows;
}
@Override
public void setMaxRows(int max) throws SQLException {
throw new UnsupportedOperationException();
this.maxRows = max;
}
@Override
@ -351,4 +357,10 @@ class StatementImpl implements Statement {
public boolean isWrapperFor(Class<?> iface) throws SQLException {
throw new UnsupportedOperationException();
}
private boolean containsLimit(String sql) {
String[] tokens = sql.split("\\s+");
String secondToLastToken = tokens[tokens.length-2];
return ("limit").equals(secondToLastToken.toLowerCase(Locale.getDefault()));
}
}

View File

@ -166,7 +166,8 @@ public class JdbcTest extends AbstractFullDistribZkTestBase {
stmt.close();
//Test statement reuse
rs = stmt.executeQuery("select id, a_i, a_s, a_f from collection1 order by a_i asc limit 2");
stmt.setMaxRows(2);
rs = stmt.executeQuery("select id, a_i, a_s, a_f from collection1 order by a_i asc");
assert(rs.next());
assert(rs.getLong("a_i") == 0);
assert(rs.getLong(2) == 0);
@ -176,8 +177,8 @@ public class JdbcTest extends AbstractFullDistribZkTestBase {
assert(!rs.next());
stmt.close();
//Test simple loop
rs = stmt.executeQuery("select id, a_i, a_s, a_f from collection1 order by a_i asc limit 100");
//Test simple loop. Since limit is set it will override the statement maxRows.
rs = stmt.executeQuery("select id, a_i, a_s, a_f from collection1 order by a_i asc LIMIT 100");
int count = 0;
while(rs.next()) {
++count;
@ -381,7 +382,13 @@ public class JdbcTest extends AbstractFullDistribZkTestBase {
private void testJDBCMethods(String collection, String connectionString, Properties properties, String sql) throws Exception {
try (Connection con = DriverManager.getConnection(connectionString, properties)) {
assertTrue(con.isValid(DEFAULT_CONNECTION_TIMEOUT));
assertEquals(zkServer.getZkAddress(), con.getCatalog());
con.setCatalog(zkServer.getZkAddress());
assertEquals(zkServer.getZkAddress(), con.getCatalog());
assertEquals(collection, con.getSchema());
con.setSchema(collection);
assertEquals(collection, con.getSchema());
DatabaseMetaData databaseMetaData = con.getMetaData();
@ -390,6 +397,21 @@ public class JdbcTest extends AbstractFullDistribZkTestBase {
assertEquals(con, databaseMetaData.getConnection());
assertEquals(connectionString, databaseMetaData.getURL());
assertEquals(4, databaseMetaData.getJDBCMajorVersion());
assertEquals(0, databaseMetaData.getJDBCMinorVersion());
assertEquals("Apache Solr", databaseMetaData.getDatabaseProductName());
// The following tests require package information that is not available when running via Maven
// assertEquals(this.getClass().getPackage().getSpecificationVersion(), databaseMetaData.getDatabaseProductVersion());
// assertEquals(0, databaseMetaData.getDatabaseMajorVersion());
// assertEquals(0, databaseMetaData.getDatabaseMinorVersion());
// assertEquals(this.getClass().getPackage().getSpecificationTitle(), databaseMetaData.getDriverName());
// assertEquals(this.getClass().getPackage().getSpecificationVersion(), databaseMetaData.getDriverVersion());
// assertEquals(0, databaseMetaData.getDriverMajorVersion());
// assertEquals(0, databaseMetaData.getDriverMinorVersion());
try(ResultSet rs = databaseMetaData.getCatalogs()) {
assertTrue(rs.next());
assertEquals(zkServer.getZkAddress(), rs.getString("TABLE_CAT"));