From 3c15c3f03df2d769174964e59a760264dce918d8 Mon Sep 17 00:00:00 2001 From: Mike McCandless Date: Sat, 6 Feb 2016 09:41:46 -0500 Subject: [PATCH 01/17] LUCENE-6835: add test case confirming listAll is sorted; fix dir impls that weren't --- .../lucene/store/FileSwitchDirectory.java | 5 +++- .../lucene/store/NRTCachingDirectory.java | 4 ++- .../lucene/store/BaseDirectoryTestCase.java | 26 +++++++++++++++++++ 3 files changed, 33 insertions(+), 2 deletions(-) diff --git a/lucene/core/src/java/org/apache/lucene/store/FileSwitchDirectory.java b/lucene/core/src/java/org/apache/lucene/store/FileSwitchDirectory.java index eb3fa0f6f24..db9f08542d9 100644 --- a/lucene/core/src/java/org/apache/lucene/store/FileSwitchDirectory.java +++ b/lucene/core/src/java/org/apache/lucene/store/FileSwitchDirectory.java @@ -21,6 +21,7 @@ import java.io.IOException; import java.nio.file.AtomicMoveNotSupportedException; import java.nio.file.NoSuchFileException; import java.util.ArrayList; +import java.util.Arrays; import java.util.Collection; import java.util.HashSet; import java.util.List; @@ -118,7 +119,9 @@ public class FileSwitchDirectory extends Directory { if (exc != null && files.isEmpty()) { throw exc; } - return files.toArray(new String[files.size()]); + String[] result = files.toArray(new String[files.size()]); + Arrays.sort(result); + return result; } /** Utility method to return a file's extension. */ diff --git a/lucene/core/src/java/org/apache/lucene/store/NRTCachingDirectory.java b/lucene/core/src/java/org/apache/lucene/store/NRTCachingDirectory.java index 4f3b25422cc..ef7b895af76 100644 --- a/lucene/core/src/java/org/apache/lucene/store/NRTCachingDirectory.java +++ b/lucene/core/src/java/org/apache/lucene/store/NRTCachingDirectory.java @@ -106,7 +106,9 @@ public class NRTCachingDirectory extends FilterDirectory implements Accountable "cache=" + Arrays.toString(cache.listAll()) + ",delegate=" + Arrays.toString(in.listAll())); } } - return files.toArray(new String[files.size()]); + String[] result = files.toArray(new String[files.size()]); + Arrays.sort(result); + return result; } @Override diff --git a/lucene/test-framework/src/java/org/apache/lucene/store/BaseDirectoryTestCase.java b/lucene/test-framework/src/java/org/apache/lucene/store/BaseDirectoryTestCase.java index c2e199698dd..3d9c20dd46f 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/store/BaseDirectoryTestCase.java +++ b/lucene/test-framework/src/java/org/apache/lucene/store/BaseDirectoryTestCase.java @@ -1295,4 +1295,30 @@ public abstract class BaseDirectoryTestCase extends LuceneTestCase { assertTrue(Arrays.asList(fsDir.listAll()).contains(fileName)); } } + + public void testListAllIsSorted() throws IOException { + try (Directory dir = getDirectory(createTempDir())) { + int count = atLeast(20); + Set names = new HashSet<>(); + while(names.size() < count) { + String name = TestUtil.randomSimpleString(random()); + if (name.length() == 0) { + continue; + } + if (random().nextInt(5) == 1) { + IndexOutput out = dir.createTempOutput(name, "foo", IOContext.DEFAULT); + names.add(out.getName()); + out.close(); + } else if (names.contains(name) == false) { + IndexOutput out = dir.createOutput(name, IOContext.DEFAULT); + names.add(out.getName()); + out.close(); + } + } + String[] actual = dir.listAll(); + String[] expected = actual.clone(); + Arrays.sort(expected); + assertEquals(expected, actual); + } + } } From 0404be94a7e62dbb6e2d25ee8a3a10b975dd470e Mon Sep 17 00:00:00 2001 From: Mike McCandless Date: Sat, 6 Feb 2016 11:08:07 -0500 Subject: [PATCH 02/17] improve exception message --- lucene/core/src/java/org/apache/lucene/index/IndexWriter.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lucene/core/src/java/org/apache/lucene/index/IndexWriter.java b/lucene/core/src/java/org/apache/lucene/index/IndexWriter.java index a10d2e1f752..8517fb1f424 100644 --- a/lucene/core/src/java/org/apache/lucene/index/IndexWriter.java +++ b/lucene/core/src/java/org/apache/lucene/index/IndexWriter.java @@ -755,7 +755,7 @@ public class IndexWriter implements Closeable, TwoPhaseCommit, Accountable { */ public IndexWriter(Directory d, IndexWriterConfig conf) throws IOException { if (d instanceof FSDirectory && ((FSDirectory) d).checkPendingDeletions()) { - throw new IllegalArgumentException("Directory " + d + " is still has pending deleted files; cannot initialize IndexWriter"); + throw new IllegalArgumentException("Directory " + d + " still has pending deleted files; cannot initialize IndexWriter"); } conf.setIndexWriter(this); // prevent reuse by other instances From 3e7fe7867f64b254680d462092d01f07858aa7c3 Mon Sep 17 00:00:00 2001 From: Erick Erickson Date: Sat, 6 Feb 2016 08:34:04 -0800 Subject: [PATCH 03/17] SOLR-8500: Allow the number of threads ConcurrentUpdateSolrClient StreamingSolrClients configurable by a system property --- solr/CHANGES.txt | 4 ++++ .../src/java/org/apache/solr/update/StreamingSolrClients.java | 4 +++- 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/solr/CHANGES.txt b/solr/CHANGES.txt index 348cf017373..e93c45aebdd 100644 --- a/solr/CHANGES.txt +++ b/solr/CHANGES.txt @@ -144,6 +144,10 @@ New Features * SOLR-8285: Ensure the /export handler works with NULL field values (Joel Bernstein) +* SOLR-8500: Allow the number of threads ConcurrentUpdateSolrClient StreamingSolrClients configurable by a + system property. NOTE: this is an expert option and can result in more often needing to do full index replication + for recovery, the sweet spot for using this is very high volume, leader-only indexing. (Tim Potter, Erick Erickson) + Bug Fixes ---------------------- * SOLR-8386: Add field option in the new admin UI schema page loads up even when no schemaFactory has been diff --git a/solr/core/src/java/org/apache/solr/update/StreamingSolrClients.java b/solr/core/src/java/org/apache/solr/update/StreamingSolrClients.java index 0ca814999da..ecc2a2bb29e 100644 --- a/solr/core/src/java/org/apache/solr/update/StreamingSolrClients.java +++ b/solr/core/src/java/org/apache/solr/update/StreamingSolrClients.java @@ -41,6 +41,8 @@ import java.util.concurrent.ExecutorService; public class StreamingSolrClients { private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass()); + + private final int runnerCount = Integer.getInteger("solr.cloud.replication.runners", 1); private HttpClient httpClient; @@ -70,7 +72,7 @@ public class StreamingSolrClients { // NOTE: increasing to more than 1 threadCount for the client could cause updates to be reordered // on a greater scale since the current behavior is to only increase the number of connections/Runners when // the queue is more than half full. - client = new ConcurrentUpdateSolrClient(url, httpClient, 100, 1, updateExecutor, true) { + client = new ConcurrentUpdateSolrClient(url, httpClient, 100, runnerCount, updateExecutor, true) { @Override public void handleError(Throwable ex) { req.trackRequestResult(null, false); From 112a2311df50142ec19ec0033133fbc10df223c9 Mon Sep 17 00:00:00 2001 From: Erick Erickson Date: Sat, 6 Feb 2016 08:46:51 -0800 Subject: [PATCH 04/17] Put CHANGES entry for SOLR-8500 in the wrong section. --- solr/CHANGES.txt | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/solr/CHANGES.txt b/solr/CHANGES.txt index e93c45aebdd..06e24d04045 100644 --- a/solr/CHANGES.txt +++ b/solr/CHANGES.txt @@ -144,10 +144,6 @@ New Features * SOLR-8285: Ensure the /export handler works with NULL field values (Joel Bernstein) -* SOLR-8500: Allow the number of threads ConcurrentUpdateSolrClient StreamingSolrClients configurable by a - system property. NOTE: this is an expert option and can result in more often needing to do full index replication - for recovery, the sweet spot for using this is very high volume, leader-only indexing. (Tim Potter, Erick Erickson) - Bug Fixes ---------------------- * SOLR-8386: Add field option in the new admin UI schema page loads up even when no schemaFactory has been @@ -351,6 +347,10 @@ New Features * SOLR-8586: added index fingerprint, a hash over all versions currently in the index. PeerSync now uses this to check if replicas are in sync. (yonik) +* SOLR-8500: Allow the number of threads ConcurrentUpdateSolrClient StreamingSolrClients configurable by a + system property. NOTE: this is an expert option and can result in more often needing to do full index replication + for recovery, the sweet spot for using this is very high volume, leader-only indexing. (Tim Potter, Erick Erickson) + Bug Fixes ---------------------- From afe792b57a1d1924ba73c1c9b20dfadea6ad55f7 Mon Sep 17 00:00:00 2001 From: Uwe Schindler Date: Sun, 7 Feb 2016 00:08:49 +0100 Subject: [PATCH 05/17] Fix ICU-License header to not mimic Javadocs (reduces warning) --- .../apache/lucene/analysis/icu/segmentation/ScriptIterator.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lucene/analysis/icu/src/java/org/apache/lucene/analysis/icu/segmentation/ScriptIterator.java b/lucene/analysis/icu/src/java/org/apache/lucene/analysis/icu/segmentation/ScriptIterator.java index d17889b10f8..4dd723e287a 100644 --- a/lucene/analysis/icu/src/java/org/apache/lucene/analysis/icu/segmentation/ScriptIterator.java +++ b/lucene/analysis/icu/src/java/org/apache/lucene/analysis/icu/segmentation/ScriptIterator.java @@ -1,4 +1,4 @@ -/** +/* * Copyright (C) 1999-2010, International Business Machines * Corporation and others. All Rights Reserved. * From 8231bf9cd46638e26ef33af9fd2b9e9532a9ca5c Mon Sep 17 00:00:00 2001 From: Uwe Schindler Date: Sun, 7 Feb 2016 00:12:24 +0100 Subject: [PATCH 06/17] Fix Jaspell license header to not mimic Javadocs (reduces warning) --- .../lucene/search/suggest/jaspell/JaspellTernarySearchTrie.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lucene/suggest/src/java/org/apache/lucene/search/suggest/jaspell/JaspellTernarySearchTrie.java b/lucene/suggest/src/java/org/apache/lucene/search/suggest/jaspell/JaspellTernarySearchTrie.java index 29a99aa2cce..4dc377de348 100644 --- a/lucene/suggest/src/java/org/apache/lucene/search/suggest/jaspell/JaspellTernarySearchTrie.java +++ b/lucene/suggest/src/java/org/apache/lucene/search/suggest/jaspell/JaspellTernarySearchTrie.java @@ -1,4 +1,4 @@ -/** +/* * Copyright (c) 2005 Bruno Martins * All rights reserved. * From dc6b1a68d28142a1ef85f497b6437cbd7f2777f0 Mon Sep 17 00:00:00 2001 From: jbernste Date: Sat, 6 Feb 2016 20:18:38 -0500 Subject: [PATCH 07/17] SOLR-8507, SOLR-8638: Add information about database product name, product version, driver name, and driver version. Implement ConnectionImpl setCatalog and setSchema. --- .../solr/client/solrj/io/SolrClientCache.java | 13 ++- .../client/solrj/io/sql/ConnectionImpl.java | 14 +-- .../solrj/io/sql/DatabaseMetaDataImpl.java | 95 +++++++++++++++---- .../solr/client/solrj/io/sql/JdbcTest.java | 21 ++++ 4 files changed, 112 insertions(+), 31 deletions(-) diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/io/SolrClientCache.java b/solr/solrj/src/java/org/apache/solr/client/solrj/io/SolrClientCache.java index 7e3abd1f84a..e544c104252 100644 --- a/solr/solrj/src/java/org/apache/solr/client/solrj/io/SolrClientCache.java +++ b/solr/solrj/src/java/org/apache/solr/client/solrj/io/SolrClientCache.java @@ -37,10 +37,10 @@ public class SolrClientCache implements Serializable { private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass()); - private Map solrClients = new HashMap(); + private final Map solrClients = new HashMap<>(); public synchronized CloudSolrClient getCloudSolrClient(String zkHost) { - CloudSolrClient client = null; + CloudSolrClient client; if (solrClients.containsKey(zkHost)) { client = (CloudSolrClient) solrClients.get(zkHost); } else { @@ -53,7 +53,7 @@ public class SolrClientCache implements Serializable { } public synchronized HttpSolrClient getHttpSolrClient(String host) { - HttpSolrClient client = null; + HttpSolrClient client; if (solrClients.containsKey(host)) { client = (HttpSolrClient) solrClients.get(host); } else { @@ -64,12 +64,11 @@ public class SolrClientCache implements Serializable { } public void close() { - Iterator it = solrClients.values().iterator(); - while(it.hasNext()) { + for(Map.Entry entry : solrClients.entrySet()) { try { - it.next().close(); + entry.getValue().close(); } catch (IOException e) { - log.error(e.getMessage(), e); + log.error("Error closing SolrClient for " + entry.getKey(), e); } } } diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/io/sql/ConnectionImpl.java b/solr/solrj/src/java/org/apache/solr/client/solrj/io/sql/ConnectionImpl.java index 9f9c00ec9ec..95105c8e33f 100644 --- a/solr/solrj/src/java/org/apache/solr/client/solrj/io/sql/ConnectionImpl.java +++ b/solr/solrj/src/java/org/apache/solr/client/solrj/io/sql/ConnectionImpl.java @@ -45,17 +45,17 @@ class ConnectionImpl implements Connection { private final String url; private final SolrClientCache solrClientCache = new SolrClientCache(); private final CloudSolrClient client; - private final String collection; private final Properties properties; private final DatabaseMetaData databaseMetaData; private final Statement connectionStatement; + private String collection; private boolean closed; private SQLWarning currentWarning; ConnectionImpl(String url, String zkHost, String collection, Properties properties) throws SQLException { this.url = url; - this.client = solrClientCache.getCloudSolrClient(zkHost); - this.collection = collection; + this.client = this.solrClientCache.getCloudSolrClient(zkHost); + this.setSchema(collection); this.properties = properties; this.connectionStatement = createStatement(); this.databaseMetaData = new DatabaseMetaDataImpl(this, this.connectionStatement); @@ -158,7 +158,7 @@ class ConnectionImpl implements Connection { @Override public void setCatalog(String catalog) throws SQLException { - throw new UnsupportedOperationException(); + } @Override @@ -301,7 +301,7 @@ class ConnectionImpl implements Connection { @Override public boolean isValid(int timeout) throws SQLException { - // check that the connection isn't close and able to connect within the timeout + // check that the connection isn't closed and able to connect within the timeout try { if(!isClosed()) { this.client.connect(timeout, TimeUnit.SECONDS); @@ -345,7 +345,7 @@ class ConnectionImpl implements Connection { @Override public void setSchema(String schema) throws SQLException { - throw new UnsupportedOperationException(); + this.collection = schema; } @Override @@ -377,4 +377,4 @@ class ConnectionImpl implements Connection { public boolean isWrapperFor(Class iface) throws SQLException { throw new UnsupportedOperationException(); } -} \ No newline at end of file +} diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/io/sql/DatabaseMetaDataImpl.java b/solr/solrj/src/java/org/apache/solr/client/solrj/io/sql/DatabaseMetaDataImpl.java index 33cd94e94ce..62f2a1869d8 100644 --- a/solr/solrj/src/java/org/apache/solr/client/solrj/io/sql/DatabaseMetaDataImpl.java +++ b/solr/solrj/src/java/org/apache/solr/client/solrj/io/sql/DatabaseMetaDataImpl.java @@ -16,12 +16,22 @@ */ package org.apache.solr.client.solrj.io.sql; +import java.io.IOException; import java.sql.Connection; import java.sql.DatabaseMetaData; import java.sql.ResultSet; import java.sql.RowIdLifetime; import java.sql.SQLException; import java.sql.Statement; +import java.util.Set; + +import org.apache.solr.client.solrj.SolrClient; +import org.apache.solr.client.solrj.SolrQuery; +import org.apache.solr.client.solrj.SolrServerException; +import org.apache.solr.client.solrj.impl.CloudSolrClient; +import org.apache.solr.client.solrj.impl.HttpSolrClient; +import org.apache.solr.client.solrj.response.QueryResponse; +import org.apache.solr.common.util.SimpleOrderedMap; class DatabaseMetaDataImpl implements DatabaseMetaData { private final ConnectionImpl connection; @@ -32,6 +42,21 @@ class DatabaseMetaDataImpl implements DatabaseMetaData { this.connectionStatement = connectionStatement; } + private int getVersionPart(String version, int part) { + // TODO Is there a better way to do this? Reuse code from elsewhere? + // Gets the parts of the Solr version. If fail then just return 0. + if (version != null) { + try { + String[] versionParts = version.split("\\.", 3); + return Integer.parseInt(versionParts[part]); + } catch (Throwable e) { + return 0; + } + } else { + return 0; + } + } + @Override public boolean allProceduresAreCallable() throws SQLException { return false; @@ -79,32 +104,78 @@ class DatabaseMetaDataImpl implements DatabaseMetaData { @Override public String getDatabaseProductName() throws SQLException { - return null; + return "Apache Solr"; } @Override public String getDatabaseProductVersion() throws SQLException { - return null; + // Returns the version for the first live node in the Solr cluster. + SolrQuery sysQuery = new SolrQuery(); + sysQuery.setRequestHandler("/admin/info/system"); + + CloudSolrClient cloudSolrClient = this.connection.getClient(); + Set liveNodes = cloudSolrClient.getZkStateReader().getClusterState().getLiveNodes(); + SolrClient solrClient = null; + for (String node : liveNodes) { + try { + String nodeURL = cloudSolrClient.getZkStateReader().getBaseUrlForNodeName(node); + solrClient = new HttpSolrClient(nodeURL); + + QueryResponse rsp = solrClient.query(sysQuery); + return String.valueOf(((SimpleOrderedMap) rsp.getResponse().get("lucene")).get("solr-spec-version")); + } catch (SolrServerException | IOException ignore) { + return ""; + } finally { + if (solrClient != null) { + try { + solrClient.close(); + } catch (IOException ignore) { + // Don't worry about failing to close the Solr client + } + } + } + } + + // If no version found just return empty string + return ""; + } + + @Override + public int getDatabaseMajorVersion() throws SQLException { + return getVersionPart(this.getDatabaseProductVersion(), 0); + } + + @Override + public int getDatabaseMinorVersion() throws SQLException { + return getVersionPart(this.getDatabaseProductVersion(), 1); } @Override public String getDriverName() throws SQLException { - return null; + return this.getClass().getPackage().getSpecificationTitle(); } @Override public String getDriverVersion() throws SQLException { - return null; + return this.getClass().getPackage().getSpecificationVersion(); } @Override public int getDriverMajorVersion() { - return 0; + try { + return getVersionPart(this.getDriverVersion(), 0); + } catch (SQLException e) { + return 0; + } } @Override public int getDriverMinorVersion() { - return 0; + try { + return getVersionPart(this.getDriverVersion(), 1); + } catch (SQLException e) { + return 0; + } } @Override @@ -822,19 +893,9 @@ class DatabaseMetaDataImpl implements DatabaseMetaData { return 0; } - @Override - public int getDatabaseMajorVersion() throws SQLException { - return 0; - } - - @Override - public int getDatabaseMinorVersion() throws SQLException { - return 0; - } - @Override public int getJDBCMajorVersion() throws SQLException { - return 0; + return 4; } @Override diff --git a/solr/solrj/src/test/org/apache/solr/client/solrj/io/sql/JdbcTest.java b/solr/solrj/src/test/org/apache/solr/client/solrj/io/sql/JdbcTest.java index 82ae02e776f..ba2111404d3 100644 --- a/solr/solrj/src/test/org/apache/solr/client/solrj/io/sql/JdbcTest.java +++ b/solr/solrj/src/test/org/apache/solr/client/solrj/io/sql/JdbcTest.java @@ -381,7 +381,13 @@ public class JdbcTest extends AbstractFullDistribZkTestBase { private void testJDBCMethods(String collection, String connectionString, Properties properties, String sql) throws Exception { try (Connection con = DriverManager.getConnection(connectionString, properties)) { assertTrue(con.isValid(DEFAULT_CONNECTION_TIMEOUT)); + assertEquals(zkServer.getZkAddress(), con.getCatalog()); + con.setCatalog(zkServer.getZkAddress()); + assertEquals(zkServer.getZkAddress(), con.getCatalog()); + + assertEquals(collection, con.getSchema()); + con.setSchema(collection); assertEquals(collection, con.getSchema()); DatabaseMetaData databaseMetaData = con.getMetaData(); @@ -390,6 +396,21 @@ public class JdbcTest extends AbstractFullDistribZkTestBase { assertEquals(con, databaseMetaData.getConnection()); assertEquals(connectionString, databaseMetaData.getURL()); + assertEquals(4, databaseMetaData.getJDBCMajorVersion()); + assertEquals(0, databaseMetaData.getJDBCMinorVersion()); + + assertEquals("Apache Solr", databaseMetaData.getDatabaseProductName()); + + // The following tests require package information that is not available when running via Maven +// assertEquals(this.getClass().getPackage().getSpecificationVersion(), databaseMetaData.getDatabaseProductVersion()); +// assertEquals(0, databaseMetaData.getDatabaseMajorVersion()); +// assertEquals(0, databaseMetaData.getDatabaseMinorVersion()); + +// assertEquals(this.getClass().getPackage().getSpecificationTitle(), databaseMetaData.getDriverName()); +// assertEquals(this.getClass().getPackage().getSpecificationVersion(), databaseMetaData.getDriverVersion()); +// assertEquals(0, databaseMetaData.getDriverMajorVersion()); +// assertEquals(0, databaseMetaData.getDriverMinorVersion()); + try(ResultSet rs = databaseMetaData.getCatalogs()) { assertTrue(rs.next()); assertEquals(zkServer.getZkAddress(), rs.getString("TABLE_CAT")); From ba20faa9557cdd56d6a696d702fb5a62d9d43f74 Mon Sep 17 00:00:00 2001 From: jbernste Date: Sat, 6 Feb 2016 21:03:45 -0500 Subject: [PATCH 08/17] SOLR-8652: Implement Statement.setMaxRows() --- .../apache/solr/client/solrj/io/sql/StatementImpl.java | 9 +++++++-- .../org/apache/solr/client/solrj/io/sql/JdbcTest.java | 5 +++-- 2 files changed, 10 insertions(+), 4 deletions(-) diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/io/sql/StatementImpl.java b/solr/solrj/src/java/org/apache/solr/client/solrj/io/sql/StatementImpl.java index 77cebfe6149..1927029c78b 100644 --- a/solr/solrj/src/java/org/apache/solr/client/solrj/io/sql/StatementImpl.java +++ b/solr/solrj/src/java/org/apache/solr/client/solrj/io/sql/StatementImpl.java @@ -45,6 +45,7 @@ class StatementImpl implements Statement { private String currentSQL; private ResultSetImpl currentResultSet; private SQLWarning currentWarning; + private int maxRows; StatementImpl(ConnectionImpl connection) { this.connection = connection; @@ -58,6 +59,10 @@ class StatementImpl implements Statement { this.currentResultSet = null; } + if(maxRows > 0 && !(sql.toLowerCase()).contains("limit")) { + sql = sql + " limit "+Integer.toString(maxRows); + } + closed = false; // If closed reopen so Statement can be reused. this.currentResultSet = new ResultSetImpl(this, constructStream(sql)); return this.currentResultSet; @@ -132,12 +137,12 @@ class StatementImpl implements Statement { @Override public int getMaxRows() throws SQLException { - throw new UnsupportedOperationException(); + return this.maxRows; } @Override public void setMaxRows(int max) throws SQLException { - throw new UnsupportedOperationException(); + this.maxRows = max; } @Override diff --git a/solr/solrj/src/test/org/apache/solr/client/solrj/io/sql/JdbcTest.java b/solr/solrj/src/test/org/apache/solr/client/solrj/io/sql/JdbcTest.java index ba2111404d3..b3d7ae92fc1 100644 --- a/solr/solrj/src/test/org/apache/solr/client/solrj/io/sql/JdbcTest.java +++ b/solr/solrj/src/test/org/apache/solr/client/solrj/io/sql/JdbcTest.java @@ -166,7 +166,8 @@ public class JdbcTest extends AbstractFullDistribZkTestBase { stmt.close(); //Test statement reuse - rs = stmt.executeQuery("select id, a_i, a_s, a_f from collection1 order by a_i asc limit 2"); + stmt.setMaxRows(2); + rs = stmt.executeQuery("select id, a_i, a_s, a_f from collection1 order by a_i asc"); assert(rs.next()); assert(rs.getLong("a_i") == 0); assert(rs.getLong(2) == 0); @@ -176,7 +177,7 @@ public class JdbcTest extends AbstractFullDistribZkTestBase { assert(!rs.next()); stmt.close(); - //Test simple loop + //Test simple loop. Since limit is set it will override the statement maxRows rs = stmt.executeQuery("select id, a_i, a_s, a_f from collection1 order by a_i asc limit 100"); int count = 0; while(rs.next()) { From 75a81795b8c8370cc754b60801fc33cc020efb30 Mon Sep 17 00:00:00 2001 From: jbernste Date: Sat, 6 Feb 2016 21:25:05 -0500 Subject: [PATCH 09/17] SOLR-8652: Check if second to last token is limit to test for limit clause --- .../apache/solr/client/solrj/io/sql/StatementImpl.java | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/io/sql/StatementImpl.java b/solr/solrj/src/java/org/apache/solr/client/solrj/io/sql/StatementImpl.java index 1927029c78b..e236c8368f1 100644 --- a/solr/solrj/src/java/org/apache/solr/client/solrj/io/sql/StatementImpl.java +++ b/solr/solrj/src/java/org/apache/solr/client/solrj/io/sql/StatementImpl.java @@ -26,6 +26,7 @@ import java.util.ArrayList; import java.util.Collection; import java.util.Collections; import java.util.List; +import java.util.Locale; import java.util.Map; import java.util.HashMap; import java.util.Random; @@ -59,7 +60,7 @@ class StatementImpl implements Statement { this.currentResultSet = null; } - if(maxRows > 0 && !(sql.toLowerCase()).contains("limit")) { + if(maxRows > 0 && !containsLimit(sql)) { sql = sql + " limit "+Integer.toString(maxRows); } @@ -356,4 +357,10 @@ class StatementImpl implements Statement { public boolean isWrapperFor(Class iface) throws SQLException { throw new UnsupportedOperationException(); } + + private boolean containsLimit(String sql) { + String[] tokens = sql.split("\\s+"); + String secondToLastToken = tokens[tokens.length-2]; + return ("limit").equals(secondToLastToken); + } } \ No newline at end of file From 3d47612b04dae27080a5960088aa5e7303f14874 Mon Sep 17 00:00:00 2001 From: jbernste Date: Sat, 6 Feb 2016 21:42:01 -0500 Subject: [PATCH 10/17] SOLR-8652: Lower case the limit clause --- .../org/apache/solr/client/solrj/io/sql/StatementImpl.java | 2 +- .../test/org/apache/solr/client/solrj/io/sql/JdbcTest.java | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/io/sql/StatementImpl.java b/solr/solrj/src/java/org/apache/solr/client/solrj/io/sql/StatementImpl.java index e236c8368f1..f8598313025 100644 --- a/solr/solrj/src/java/org/apache/solr/client/solrj/io/sql/StatementImpl.java +++ b/solr/solrj/src/java/org/apache/solr/client/solrj/io/sql/StatementImpl.java @@ -361,6 +361,6 @@ class StatementImpl implements Statement { private boolean containsLimit(String sql) { String[] tokens = sql.split("\\s+"); String secondToLastToken = tokens[tokens.length-2]; - return ("limit").equals(secondToLastToken); + return ("limit").equals(secondToLastToken.toLowerCase(Locale.getDefault())); } } \ No newline at end of file diff --git a/solr/solrj/src/test/org/apache/solr/client/solrj/io/sql/JdbcTest.java b/solr/solrj/src/test/org/apache/solr/client/solrj/io/sql/JdbcTest.java index b3d7ae92fc1..6d9c51acfc4 100644 --- a/solr/solrj/src/test/org/apache/solr/client/solrj/io/sql/JdbcTest.java +++ b/solr/solrj/src/test/org/apache/solr/client/solrj/io/sql/JdbcTest.java @@ -177,8 +177,8 @@ public class JdbcTest extends AbstractFullDistribZkTestBase { assert(!rs.next()); stmt.close(); - //Test simple loop. Since limit is set it will override the statement maxRows - rs = stmt.executeQuery("select id, a_i, a_s, a_f from collection1 order by a_i asc limit 100"); + //Test simple loop. Since limit is set it will override the statement maxRows. + rs = stmt.executeQuery("select id, a_i, a_s, a_f from collection1 order by a_i asc LIMIT 100"); int count = 0; while(rs.next()) { ++count; From c2eb76941eb604f6cdf7873cab9ea02175c69000 Mon Sep 17 00:00:00 2001 From: jbernste Date: Sat, 6 Feb 2016 21:49:39 -0500 Subject: [PATCH 11/17] SOLR-8502: Improve Solr JDBC Driver to support SQL Clients like DBVisualizer --- solr/CHANGES.txt | 2 ++ 1 file changed, 2 insertions(+) diff --git a/solr/CHANGES.txt b/solr/CHANGES.txt index 06e24d04045..93a844792ad 100644 --- a/solr/CHANGES.txt +++ b/solr/CHANGES.txt @@ -144,6 +144,8 @@ New Features * SOLR-8285: Ensure the /export handler works with NULL field values (Joel Bernstein) +* SOLR-8502: Improve Solr JDBC Driver to support SQL Clients like DBVisualizer (Kevin Risden, Joel Bernstein) + Bug Fixes ---------------------- * SOLR-8386: Add field option in the new admin UI schema page loads up even when no schemaFactory has been From 2a7687641e03d586688dcdad419674032e8f7a01 Mon Sep 17 00:00:00 2001 From: Mike McCandless Date: Sun, 7 Feb 2016 05:24:19 -0500 Subject: [PATCH 12/17] add verbosity for this test when it fails --- .../org/apache/lucene/store/BaseLockFactoryTestCase.java | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/lucene/test-framework/src/java/org/apache/lucene/store/BaseLockFactoryTestCase.java b/lucene/test-framework/src/java/org/apache/lucene/store/BaseLockFactoryTestCase.java index 595efee4d95..f2c72685552 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/store/BaseLockFactoryTestCase.java +++ b/lucene/test-framework/src/java/org/apache/lucene/store/BaseLockFactoryTestCase.java @@ -210,6 +210,7 @@ public abstract class BaseLockFactoryTestCase extends LuceneTestCase { @Override public void run() { IndexWriter writer = null; + ByteArrayOutputStream baos = new ByteArrayOutputStream(); for(int i=0;i Date: Sun, 7 Feb 2016 09:19:57 -0500 Subject: [PATCH 14/17] MDW's assertNoUnreferencedFilesOnClose should still run even if checkIndexOnClose is turned off --- .../lucene/store/MockDirectoryWrapper.java | 115 +++++++++--------- 1 file changed, 58 insertions(+), 57 deletions(-) diff --git a/lucene/test-framework/src/java/org/apache/lucene/store/MockDirectoryWrapper.java b/lucene/test-framework/src/java/org/apache/lucene/store/MockDirectoryWrapper.java index 51dbffedd39..68bacbd9357 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/store/MockDirectoryWrapper.java +++ b/lucene/test-framework/src/java/org/apache/lucene/store/MockDirectoryWrapper.java @@ -722,80 +722,81 @@ public class MockDirectoryWrapper extends BaseDirectoryWrapper { throw new RuntimeException("MockDirectoryWrapper: cannot close: there are still open locks: " + openLocks, cause); } - if (getCheckIndexOnClose()) { - randomIOExceptionRate = 0.0; - randomIOExceptionRateOnOpen = 0.0; + randomIOExceptionRate = 0.0; + randomIOExceptionRateOnOpen = 0.0; + + if ((getCheckIndexOnClose() || assertNoUnreferencedFilesOnClose) && DirectoryReader.indexExists(this)) { + if (LuceneTestCase.VERBOSE) { + System.out.println("\nNOTE: MockDirectoryWrapper: now crush"); + } + if (getCheckIndexOnClose()) { - if (DirectoryReader.indexExists(this)) { - if (LuceneTestCase.VERBOSE) { - System.out.println("\nNOTE: MockDirectoryWrapper: now crush"); - } crash(); // corrupt any unsynced-files if (LuceneTestCase.VERBOSE) { System.out.println("\nNOTE: MockDirectoryWrapper: now run CheckIndex"); } TestUtil.checkIndex(this, getCrossCheckTermVectorsOnClose(), true); + } - // TODO: factor this out / share w/ TestIW.assertNoUnreferencedFiles - if (assertNoUnreferencedFilesOnClose) { + // TODO: factor this out / share w/ TestIW.assertNoUnreferencedFiles + if (assertNoUnreferencedFilesOnClose) { - // now look for unreferenced files: discount ones that we tried to delete but could not - Set allFiles = new HashSet<>(Arrays.asList(listAll())); - String[] startFiles = allFiles.toArray(new String[0]); - IndexWriterConfig iwc = new IndexWriterConfig(null); - iwc.setIndexDeletionPolicy(NoDeletionPolicy.INSTANCE); + // now look for unreferenced files: discount ones that we tried to delete but could not + Set allFiles = new HashSet<>(Arrays.asList(listAll())); + String[] startFiles = allFiles.toArray(new String[0]); + IndexWriterConfig iwc = new IndexWriterConfig(null); + iwc.setIndexDeletionPolicy(NoDeletionPolicy.INSTANCE); - // We must do this before opening writer otherwise writer will be angry if there are pending deletions: - TestUtil.disableVirusChecker(in); + // We must do this before opening writer otherwise writer will be angry if there are pending deletions: + TestUtil.disableVirusChecker(in); - new IndexWriter(in, iwc).rollback(); - String[] endFiles = in.listAll(); + new IndexWriter(in, iwc).rollback(); + String[] endFiles = in.listAll(); - Set startSet = new TreeSet<>(Arrays.asList(startFiles)); - Set endSet = new TreeSet<>(Arrays.asList(endFiles)); + Set startSet = new TreeSet<>(Arrays.asList(startFiles)); + Set endSet = new TreeSet<>(Arrays.asList(endFiles)); - startFiles = startSet.toArray(new String[0]); - endFiles = endSet.toArray(new String[0]); + startFiles = startSet.toArray(new String[0]); + endFiles = endSet.toArray(new String[0]); - if (!Arrays.equals(startFiles, endFiles)) { - List removed = new ArrayList<>(); - for(String fileName : startFiles) { - if (!endSet.contains(fileName)) { - removed.add(fileName); - } + if (!Arrays.equals(startFiles, endFiles)) { + List removed = new ArrayList<>(); + for(String fileName : startFiles) { + if (!endSet.contains(fileName)) { + removed.add(fileName); } - - List added = new ArrayList<>(); - for(String fileName : endFiles) { - if (!startSet.contains(fileName)) { - added.add(fileName); - } - } - - String extras; - if (removed.size() != 0) { - extras = "\n\nThese files were removed: " + removed; - } else { - extras = ""; - } - - if (added.size() != 0) { - extras += "\n\nThese files were added (waaaaaaaaaat!): " + added; - } - - throw new RuntimeException("unreferenced files: before delete:\n " + Arrays.toString(startFiles) + "\n after delete:\n " + Arrays.toString(endFiles) + extras); } - - DirectoryReader ir1 = DirectoryReader.open(this); - int numDocs1 = ir1.numDocs(); - ir1.close(); - new IndexWriter(this, new IndexWriterConfig(null)).close(); - DirectoryReader ir2 = DirectoryReader.open(this); - int numDocs2 = ir2.numDocs(); - ir2.close(); - assert numDocs1 == numDocs2 : "numDocs changed after opening/closing IW: before=" + numDocs1 + " after=" + numDocs2; + + List added = new ArrayList<>(); + for(String fileName : endFiles) { + if (!startSet.contains(fileName)) { + added.add(fileName); + } + } + + String extras; + if (removed.size() != 0) { + extras = "\n\nThese files were removed: " + removed; + } else { + extras = ""; + } + + if (added.size() != 0) { + extras += "\n\nThese files were added (waaaaaaaaaat!): " + added; + } + + throw new RuntimeException("unreferenced files: before delete:\n " + Arrays.toString(startFiles) + "\n after delete:\n " + Arrays.toString(endFiles) + extras); } + + DirectoryReader ir1 = DirectoryReader.open(this); + int numDocs1 = ir1.numDocs(); + ir1.close(); + new IndexWriter(this, new IndexWriterConfig(null)).close(); + DirectoryReader ir2 = DirectoryReader.open(this); + int numDocs2 = ir2.numDocs(); + ir2.close(); + assert numDocs1 == numDocs2 : "numDocs changed after opening/closing IW: before=" + numDocs1 + " after=" + numDocs2; } } success = true; From e0ba1e5bffb304dbb1196782851cf143ec995e97 Mon Sep 17 00:00:00 2001 From: Mike McCandless Date: Sun, 7 Feb 2016 09:26:23 -0500 Subject: [PATCH 15/17] move verbosity to the right place --- .../java/org/apache/lucene/store/MockDirectoryWrapper.java | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/lucene/test-framework/src/java/org/apache/lucene/store/MockDirectoryWrapper.java b/lucene/test-framework/src/java/org/apache/lucene/store/MockDirectoryWrapper.java index 68bacbd9357..515f012ea7d 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/store/MockDirectoryWrapper.java +++ b/lucene/test-framework/src/java/org/apache/lucene/store/MockDirectoryWrapper.java @@ -726,11 +726,11 @@ public class MockDirectoryWrapper extends BaseDirectoryWrapper { randomIOExceptionRateOnOpen = 0.0; if ((getCheckIndexOnClose() || assertNoUnreferencedFilesOnClose) && DirectoryReader.indexExists(this)) { - if (LuceneTestCase.VERBOSE) { - System.out.println("\nNOTE: MockDirectoryWrapper: now crush"); - } if (getCheckIndexOnClose()) { + if (LuceneTestCase.VERBOSE) { + System.out.println("\nNOTE: MockDirectoryWrapper: now crush"); + } crash(); // corrupt any unsynced-files if (LuceneTestCase.VERBOSE) { System.out.println("\nNOTE: MockDirectoryWrapper: now run CheckIndex"); From f8bd22e58c953a5ef27fd4859c91845755ebd490 Mon Sep 17 00:00:00 2001 From: Mike McCandless Date: Sun, 7 Feb 2016 13:21:39 -0500 Subject: [PATCH 16/17] LUCENE-6835, LUCENE-6684: move the 'suppress NSFE on windows' hack down lower, out of IFD into FSDir; also fix IFD to remove segments files before others --- .../apache/lucene/index/IndexFileDeleter.java | 27 ++++--- .../org/apache/lucene/store/FSDirectory.java | 15 +++- .../lucene/index/TestIndexFileDeleter.java | 76 +++++++++++++++++++ 3 files changed, 100 insertions(+), 18 deletions(-) diff --git a/lucene/core/src/java/org/apache/lucene/index/IndexFileDeleter.java b/lucene/core/src/java/org/apache/lucene/index/IndexFileDeleter.java index 6886055fb6c..52f0b401aff 100644 --- a/lucene/core/src/java/org/apache/lucene/index/IndexFileDeleter.java +++ b/lucene/core/src/java/org/apache/lucene/index/IndexFileDeleter.java @@ -696,24 +696,23 @@ final class IndexFileDeleter implements Closeable { ensureOpen(); if (infoStream.isEnabled("IFD")) { - infoStream.message("IFD", "delete \"" + names + "\""); + infoStream.message("IFD", "delete " + names + ""); + } + + // We make two passes, first deleting any segments_N files, second deleting the rest. We do this so that if we throw exc or JVM + // crashes during deletions, even when not on Windows, we don't leave the index in an "apparently corrupt" state: + for(String name : names) { + if (name.startsWith(IndexFileNames.SEGMENTS) == false) { + continue; + } + directory.deleteFile(name); } for(String name : names) { - try { - directory.deleteFile(name); - } catch (NoSuchFileException | FileNotFoundException e) { - // IndexWriter should only ask us to delete files it knows it wrote, so if we hit this, something is wrong! - - if (Constants.WINDOWS) { - // TODO: can we remove this OS-specific hacky logic? If windows deleteFile is buggy, we should instead contain this workaround in - // a WindowsFSDirectory ... - // LUCENE-6684: we suppress this assert for Windows, since a file could be in a confusing "pending delete" state, and falsely - // return NSFE/FNFE - } else { - throw e; - } + if (name.startsWith(IndexFileNames.SEGMENTS) == true) { + continue; } + directory.deleteFile(name); } } diff --git a/lucene/core/src/java/org/apache/lucene/store/FSDirectory.java b/lucene/core/src/java/org/apache/lucene/store/FSDirectory.java index 32e078caa52..26e45531aa2 100644 --- a/lucene/core/src/java/org/apache/lucene/store/FSDirectory.java +++ b/lucene/core/src/java/org/apache/lucene/store/FSDirectory.java @@ -327,7 +327,7 @@ public abstract class FSDirectory extends BaseDirectory { if (pendingDeletes.contains(name)) { throw new NoSuchFileException("file \"" + name + "\" is already pending delete"); } - privateDeleteFile(name); + privateDeleteFile(name, false); maybeDeletePendingFiles(); } @@ -347,7 +347,7 @@ public abstract class FSDirectory extends BaseDirectory { // Clone the set since we mutate it in privateDeleteFile: for(String name : new HashSet<>(pendingDeletes)) { - privateDeleteFile(name); + privateDeleteFile(name, true); } } } @@ -363,14 +363,21 @@ public abstract class FSDirectory extends BaseDirectory { } } - private void privateDeleteFile(String name) throws IOException { + private void privateDeleteFile(String name, boolean isPendingDelete) throws IOException { try { Files.delete(directory.resolve(name)); pendingDeletes.remove(name); } catch (NoSuchFileException | FileNotFoundException e) { // We were asked to delete a non-existent file: pendingDeletes.remove(name); - throw e; + if (isPendingDelete && Constants.WINDOWS) { + // TODO: can we remove this OS-specific hacky logic? If windows deleteFile is buggy, we should instead contain this workaround in + // a WindowsFSDirectory ... + // LUCENE-6684: we suppress this check for Windows, since a file could be in a confusing "pending delete" state, failing the first + // delete attempt with access denied and then apparently falsely failing here when we try ot delete it again, with NSFE/FNFE + } else { + throw e; + } } catch (IOException ioe) { // On windows, a file delete can fail because there's still an open // file handle against it. We record this in pendingDeletes and diff --git a/lucene/core/src/test/org/apache/lucene/index/TestIndexFileDeleter.java b/lucene/core/src/test/org/apache/lucene/index/TestIndexFileDeleter.java index 3ea9da004b5..ecebb18c16e 100644 --- a/lucene/core/src/test/org/apache/lucene/index/TestIndexFileDeleter.java +++ b/lucene/core/src/test/org/apache/lucene/index/TestIndexFileDeleter.java @@ -488,4 +488,80 @@ public class TestIndexFileDeleter extends LuceneTestCase { w.close(); dir.close(); } + + // LUCENE-6835: make sure best-effort to not create an "apparently but not really" corrupt index is working: + public void testExcInDeleteFile() throws Throwable { + int iters = atLeast(10); + for(int iter=0;iter Date: Sun, 7 Feb 2016 13:35:00 -0500 Subject: [PATCH 17/17] LUCENE-6750: add verbosity when this test fails --- .../lucene/TestMergeSchedulerExternal.java | 39 +++++++++++++------ 1 file changed, 27 insertions(+), 12 deletions(-) diff --git a/lucene/core/src/test/org/apache/lucene/TestMergeSchedulerExternal.java b/lucene/core/src/test/org/apache/lucene/TestMergeSchedulerExternal.java index 241e966c8e8..d4aaeb13fb6 100644 --- a/lucene/core/src/test/org/apache/lucene/TestMergeSchedulerExternal.java +++ b/lucene/core/src/test/org/apache/lucene/TestMergeSchedulerExternal.java @@ -17,6 +17,10 @@ package org.apache.lucene; +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.io.PrintStream; + import org.apache.lucene.analysis.MockAnalyzer; import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; @@ -24,16 +28,16 @@ import org.apache.lucene.index.ConcurrentMergeScheduler; import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.IndexWriterConfig; import org.apache.lucene.index.LogMergePolicy; -import org.apache.lucene.index.MergePolicy; import org.apache.lucene.index.MergePolicy.OneMerge; +import org.apache.lucene.index.MergePolicy; import org.apache.lucene.index.MergeScheduler; import org.apache.lucene.index.MergeTrigger; import org.apache.lucene.store.Directory; import org.apache.lucene.store.MockDirectoryWrapper; import org.apache.lucene.store.RAMDirectory; +import org.apache.lucene.util.IOUtils; import org.apache.lucene.util.LuceneTestCase; - -import java.io.IOException; +import org.apache.lucene.util.PrintStreamInfoStream; /** * Holds tests cases to verify external APIs are accessible @@ -93,11 +97,16 @@ public class TestMergeSchedulerExternal extends LuceneTestCase { Document doc = new Document(); Field idField = newStringField("id", "", Field.Store.YES); doc.add(idField); - - IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random())) - .setMergeScheduler(new MyMergeScheduler()) - .setMaxBufferedDocs(2).setRAMBufferSizeMB(IndexWriterConfig.DISABLE_AUTO_FLUSH) - .setMergePolicy(newLogMergePolicy())); + + IndexWriterConfig iwc = newIndexWriterConfig(new MockAnalyzer(random())) + .setMergeScheduler(new MyMergeScheduler()) + .setMaxBufferedDocs(2).setRAMBufferSizeMB(IndexWriterConfig.DISABLE_AUTO_FLUSH) + .setMergePolicy(newLogMergePolicy()); + + ByteArrayOutputStream baos = new ByteArrayOutputStream(); + iwc.setInfoStream(new PrintStreamInfoStream(new PrintStream(baos, true, IOUtils.UTF_8))); + + IndexWriter writer = new IndexWriter(dir, iwc); LogMergePolicy logMP = (LogMergePolicy) writer.getConfig().getMergePolicy(); logMP.setMergeFactor(10); for(int i=0;i<20;i++) { @@ -110,10 +119,16 @@ public class TestMergeSchedulerExternal extends LuceneTestCase { // OK } writer.rollback(); - - assertTrue(mergeThreadCreated); - assertTrue(mergeCalled); - assertTrue(excCalled); + + try { + assertTrue(mergeThreadCreated); + assertTrue(mergeCalled); + assertTrue(excCalled); + } catch (AssertionError ae) { + System.out.println("TEST FAILED; IW infoStream output:"); + System.out.println(baos.toString(IOUtils.UTF_8)); + throw ae; + } dir.close(); }