SOLR-821 -- Add support for replication to copy conf file to slave with a different name. This allows replication of solrconfig.xml

git-svn-id: https://svn.apache.org/repos/asf/lucene/solr/trunk@727319 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Shalin Shekhar Mangar 2008-12-17 09:19:26 +00:00
parent 89557b26aa
commit aa3a9b6927
9 changed files with 312 additions and 1612 deletions

View File

@ -108,6 +108,10 @@ New Features
also deprecated and replaced with "count" and "lex". also deprecated and replaced with "count" and "lex".
(Lars Kotthoff via yonik) (Lars Kotthoff via yonik)
23. SOLR-821: Add support for replication to copy conf file to slave with a different name. This allows replication
of solrconfig.xml
(Noble Paul, Akshay Ukey via shalin)
Optimizations Optimizations
---------------------- ----------------------
1. SOLR-374: Use IndexReader.reopen to save resources by re-using parts of the 1. SOLR-374: Use IndexReader.reopen to save resources by re-using parts of the

View File

@ -16,7 +16,6 @@
*/ */
package org.apache.solr.handler; package org.apache.solr.handler;
import org.apache.commons.httpclient.HttpClient;
import org.apache.lucene.index.IndexCommit; import org.apache.lucene.index.IndexCommit;
import org.apache.solr.common.params.CommonParams; import org.apache.solr.common.params.CommonParams;
import org.apache.solr.common.params.ModifiableSolrParams; import org.apache.solr.common.params.ModifiableSolrParams;
@ -48,27 +47,16 @@ import java.util.zip.Checksum;
import java.util.zip.DeflaterOutputStream; import java.util.zip.DeflaterOutputStream;
/** /**
* <p> A Handler which provides a REST API for replication and serves replication requests from Slaves. * <p> A Handler which provides a REST API for replication and serves replication requests from Slaves. <p/> </p>
* <p/> * <p>When running on the master, it provides the following commands <ol> <li>Get the current replicatable index version
* </p> * (command=indexversion)</li> <li>Get the list of files for a given index version
* <p>When running on the master, it provides the following commands * (command=filelist&amp;indexversion=&lt;VERSION&gt;)</li> <li>Get full or a part (chunk) of a given index or a config
* <ol> * file (command=filecontent&amp;file=&lt;FILE_NAME&gt;) You can optionally specify an offset and length to get that
* <li>Get the current replicatable index version (command=indexversion)</li> * chunk of the file. You can request a configuration file by using "cf" parameter instead of the "file" parameter.</li>
* <li>Get the list of files for a given index version (command=filelist&amp;indexversion=&lt;VERSION&gt;)</li> * <li>Get status/statistics (command=details)</li> </ol> </p> <p>When running on the slave, it provides the following
* <li>Get full or a part (chunk) of a given index or a config file (command=filecontent&amp;file=&lt;FILE_NAME&gt;) * commands <ol> <li>Perform a snap pull now (command=snappull)</li> <li>Get status/statistics (command=details)</li>
* You can optionally specify an offset and length to get that chunk of the file. * <li>Abort a snap pull (command=abort)</li> <li>Enable/Disable polling the master for new versions (command=enablepoll
* You can request a configuration file by using "cf" parameter instead of the "file" parameter.</li> * or command=disablepoll)</li> </ol> </p>
* <li>Get status/statistics (command=details)</li>
* </ol>
* </p>
* <p>When running on the slave, it provides the following commands
* <ol>
* <li>Perform a snap pull now (command=snappull)</li>
* <li>Get status/statistics (command=details)</li>
* <li>Abort a snap pull (command=abort)</li>
* <li>Enable/Disable polling the master for new versions (command=enablepoll or command=disablepoll)</li>
* </ol>
* </p>
* *
* @version $Id$ * @version $Id$
* @since solr 1.4 * @since solr 1.4
@ -81,7 +69,9 @@ public class ReplicationHandler extends RequestHandlerBase implements SolrCoreAw
private ReentrantLock snapPullLock = new ReentrantLock(); private ReentrantLock snapPullLock = new ReentrantLock();
private List<String> includeConfFiles; private String includeConfFiles;
private NamedList<String> confFileNameAlias = new NamedList<String>();
private boolean isMaster = false; private boolean isMaster = false;
@ -239,8 +229,8 @@ public class ReplicationHandler extends RequestHandlerBase implements SolrCoreAw
} }
/** /**
* This method adds an Object of FileStream to the resposnse . * This method adds an Object of FileStream to the resposnse . The FileStream implements a custom protocol which is
* The FileStream implements a custom protocol which is understood by SnapPuller.FileFetcher * understood by SnapPuller.FileFetcher
* *
* @see org.apache.solr.handler.SnapPuller.FileFetcher * @see org.apache.solr.handler.SnapPuller.FileFetcher
*/ */
@ -281,27 +271,28 @@ public class ReplicationHandler extends RequestHandlerBase implements SolrCoreAw
+ version, e); + version, e);
} }
rsp.add(CMD_GET_FILE_LIST, result); rsp.add(CMD_GET_FILE_LIST, result);
if (includeConfFiles == null) if (confFileNameAlias.size() < 1)
return; return;
LOG.debug("Adding config files to list: " + includeConfFiles); LOG.debug("Adding config files to list: " + includeConfFiles);
//if configuration files need to be included get their details //if configuration files need to be included get their details
List<Map<String, Object>> confFiles = getConfFileCache(includeConfFiles); rsp.add(CONF_FILES, getConfFileInfoFromCache(confFileNameAlias, confFileInfoCache));
rsp.add(CONF_FILES, confFiles);
} }
/** /**
* For configuration files, checksum of the file is included * For configuration files, checksum of the file is included because, unlike index files, they may have same content
* because, unlike index files, they may have same content but different timestamps. * but different timestamps.
* <p/> * <p/>
* The local conf files information is cached so that everytime it does not have to * The local conf files information is cached so that everytime it does not have to compute the checksum. The cache is
* compute the checksum. The cache is refreshed only if the lastModified of the file changes * refreshed only if the lastModified of the file changes
*/ */
List<Map<String, Object>> getConfFileCache(Collection<String> filenames) { List<Map<String, Object>> getConfFileInfoFromCache(NamedList<String> nameAndAlias,
final Map<String, FileInfo> confFileInfoCache) {
List<Map<String, Object>> confFiles = new ArrayList<Map<String, Object>>(); List<Map<String, Object>> confFiles = new ArrayList<Map<String, Object>>();
synchronized (confFileInfoCache) { synchronized (confFileInfoCache) {
File confDir = new File(core.getResourceLoader().getConfigDir()); File confDir = new File(core.getResourceLoader().getConfigDir());
Checksum checksum = null; Checksum checksum = null;
for (String cf : filenames) { for (int i = 0; i < nameAndAlias.size(); i++) {
String cf = nameAndAlias.getName(i);
File f = new File(confDir, cf); File f = new File(confDir, cf);
if (!f.exists() || f.isDirectory()) continue; //must not happen if (!f.exists() || f.isDirectory()) continue; //must not happen
FileInfo info = confFileInfoCache.get(cf); FileInfo info = confFileInfoCache.get(cf);
@ -310,13 +301,15 @@ public class ReplicationHandler extends RequestHandlerBase implements SolrCoreAw
info = new FileInfo(f.lastModified(), cf, f.length(), getCheckSum(checksum, f)); info = new FileInfo(f.lastModified(), cf, f.length(), getCheckSum(checksum, f));
confFileInfoCache.put(cf, info); confFileInfoCache.put(cf, info);
} }
confFiles.add(info.getAsMap()); Map<String, Object> m = info.getAsMap();
if (nameAndAlias.getVal(i) != null) m.put(ALIAS, nameAndAlias.getVal(i));
confFiles.add(m);
} }
} }
return confFiles; return confFiles;
} }
private static class FileInfo { static class FileInfo {
long lastmodified; long lastmodified;
String name; String name;
long size; long size;
@ -329,7 +322,7 @@ public class ReplicationHandler extends RequestHandlerBase implements SolrCoreAw
this.checksum = checksum; this.checksum = checksum;
} }
public Map<String, Object> getAsMap() { Map<String, Object> getAsMap() {
Map<String, Object> map = new HashMap<String, Object>(); Map<String, Object> map = new HashMap<String, Object>();
map.put(NAME, name); map.put(NAME, name);
map.put(SIZE, size); map.put(SIZE, size);
@ -453,7 +446,8 @@ public class ReplicationHandler extends RequestHandlerBase implements SolrCoreAw
list.add("isReplicating", String.valueOf(isReplicating())); list.add("isReplicating", String.valueOf(isReplicating()));
} }
if (isMaster) { if (isMaster) {
list.add("confFilesToReplicate", includeConfFiles.toString()); if (includeConfFiles != null)
list.add("confFilesToReplicate", includeConfFiles);
if (replicateOnCommit) if (replicateOnCommit)
list.add(REPLICATE_AFTER, "commit"); list.add(REPLICATE_AFTER, "commit");
if (replicateOnOptimize) if (replicateOnOptimize)
@ -611,6 +605,7 @@ public class ReplicationHandler extends RequestHandlerBase implements SolrCoreAw
totalPercent = (bytesDownloaded * 100) / bytesToDownload; totalPercent = (bytesDownloaded * 100) / bytesToDownload;
if (timeElapsed > 0) if (timeElapsed > 0)
downloadSpeed = (bytesDownloaded / timeElapsed); downloadSpeed = (bytesDownloaded / timeElapsed);
if (currFile != null)
details.add("currentFile", currFile); details.add("currentFile", currFile);
details.add("currentFileSize", readableSize(currFileSize)); details.add("currentFileSize", readableSize(currFileSize));
details.add("currentFileSizeDownloaded", readableSize(currFileSizeDownloaded)); details.add("currentFileSizeDownloaded", readableSize(currFileSizeDownloaded));
@ -627,7 +622,8 @@ public class ReplicationHandler extends RequestHandlerBase implements SolrCoreAw
} }
if (isMaster) { if (isMaster) {
details.add(CONF_FILES, includeConfFiles.toString()); if (includeConfFiles != null)
details.add(CONF_FILES, includeConfFiles);
if (replicateOnCommit) if (replicateOnCommit)
details.add(REPLICATE_AFTER, "commit"); details.add(REPLICATE_AFTER, "commit");
if (replicateOnOptimize) if (replicateOnOptimize)
@ -649,9 +645,15 @@ public class ReplicationHandler extends RequestHandlerBase implements SolrCoreAw
} }
NamedList master = (NamedList) initArgs.get("master"); NamedList master = (NamedList) initArgs.get("master");
if (master != null) { if (master != null) {
String includeFiles = (String) master.get(CONF_FILES); includeConfFiles = (String) master.get(CONF_FILES);
if (includeFiles != null && !includeFiles.trim().equals("")) { if (includeConfFiles != null && includeConfFiles.trim().length() > 0) {
includeConfFiles = Arrays.asList(includeFiles.split(",")); List<String> files = Arrays.asList(includeConfFiles.split(","));
for (String file : files) {
if (file.trim().length() == 0) continue;
String[] strs = file.split(":");
// if there is an alias add it or it is null
confFileNameAlias.add(strs[0], strs.length > 1 ? strs[1] : null);
}
LOG.info("Replication enabled for following config files: " + includeConfFiles); LOG.info("Replication enabled for following config files: " + includeConfFiles);
} }
List snapshot = master.getAll("snapshot"); List snapshot = master.getAll("snapshot");
@ -701,9 +703,8 @@ public class ReplicationHandler extends RequestHandlerBase implements SolrCoreAw
} }
/** /**
* A ResponseWriter is registered automatically for wt=filestream * A ResponseWriter is registered automatically for wt=filestream This response writer is used to transfer index files
* This response writer is used to transfer index files in a block-by-block manner within * in a block-by-block manner within the same HTTP response.
* the same HTTP response.
*/ */
private void registerFileStreamResponseWriter() { private void registerFileStreamResponseWriter() {
core.registerResponseWriter(FILE_STREAM, new BinaryQueryResponseWriter() { core.registerResponseWriter(FILE_STREAM, new BinaryQueryResponseWriter() {
@ -730,6 +731,7 @@ public class ReplicationHandler extends RequestHandlerBase implements SolrCoreAw
* *
* @param snapshoot do a snapshoot * @param snapshoot do a snapshoot
* @param getCommit get a commitpoint also * @param getCommit get a commitpoint also
*
* @return an instance of the eventlistener * @return an instance of the eventlistener
*/ */
private SolrEventListener getEventListener(final boolean snapshoot, final boolean getCommit) { private SolrEventListener getEventListener(final boolean snapshoot, final boolean getCommit) {
@ -912,6 +914,8 @@ public class ReplicationHandler extends RequestHandlerBase implements SolrCoreAw
public static final String CHECKSUM = "checksum"; public static final String CHECKSUM = "checksum";
public static final String ALIAS = "alias";
public static final String CONF_CHECKSUM = "confchecksum"; public static final String CONF_CHECKSUM = "confchecksum";
public static final String CONF_FILES = "confFiles"; public static final String CONF_FILES = "confFiles";

View File

@ -48,9 +48,8 @@ import java.util.zip.GZIPInputStream;
import java.util.zip.InflaterInputStream; import java.util.zip.InflaterInputStream;
/** /**
* <p/> Provides functionality equivalent to the snappull script as well as a * <p/> Provides functionality equivalent to the snappull script as well as a timer for scheduling pulls from the
* timer for scheduling pulls from the master. * master. </p>
* </p>
* *
* @version $Id$ * @version $Id$
* @since solr 1.4 * @since solr 1.4
@ -189,12 +188,13 @@ public class SnapPuller {
} }
/** /**
* This command downloads all the necessary files from master to install a * This command downloads all the necessary files from master to install a index commit point. Only changed files are
* index commit point. Only changed files are downloaded. It also downloads the * downloaded. It also downloads the conf files (if they are modified).
* conf files (if they are modified).
* *
* @param core the SolrCore * @param core the SolrCore
*
* @return true on success, false if slave is already in sync * @return true on success, false if slave is already in sync
*
* @throws IOException if an exception occurs * @throws IOException if an exception occurs
*/ */
@SuppressWarnings("unchecked") @SuppressWarnings("unchecked")
@ -290,8 +290,8 @@ public class SnapPuller {
} }
/** /**
* Helper method to record the last replication's details so that we can show them on the * Helper method to record the last replication's details so that we can show them on the statistics page across
* statistics page across restarts. * restarts.
*/ */
private void logReplicationTimeAndConfFiles(Collection<Map<String, Object>> modifiedConfFiles) { private void logReplicationTimeAndConfFiles(Collection<Map<String, Object>> modifiedConfFiles) {
FileOutputStream outFile = null; FileOutputStream outFile = null;
@ -392,7 +392,8 @@ public class SnapPuller {
"Failed to create temporary config folder: " + tmpconfDir.getName()); "Failed to create temporary config folder: " + tmpconfDir.getName());
} }
for (Map<String, Object> file : confFilesToDownload) { for (Map<String, Object> file : confFilesToDownload) {
fileFetcher = new FileFetcher(tmpconfDir, file, (String) file.get(NAME), true, latestVersion); String saveAs = (String) (file.get(ALIAS) == null ? file.get(NAME) : file.get(ALIAS));
fileFetcher = new FileFetcher(tmpconfDir, file, saveAs, true, latestVersion);
currentFile = file; currentFile = file;
fileFetcher.fetchFile(); fileFetcher.fetchFile();
confFilesDownloaded.add(new HashMap<String, Object>(file)); confFilesDownloaded.add(new HashMap<String, Object>(file));
@ -422,8 +423,8 @@ public class SnapPuller {
} }
/** /**
* All the files which are common between master and slave must have * All the files which are common between master and slave must have same timestamp and size else we assume they are
* same timestamp and size else we assume they are not compatible (stale). * not compatible (stale).
* *
* @return true if the index stale and we need to download a fresh copy, false otherwise. * @return true if the index stale and we need to download a fresh copy, false otherwise.
*/ */
@ -442,8 +443,7 @@ public class SnapPuller {
} }
/** /**
* Copy a file by the File#renameTo() method. If it fails, it is considered * Copy a file by the File#renameTo() method. If it fails, it is considered a failure
* a failure
* <p/> * <p/>
* Todo may be we should try a simple copy if it fails * Todo may be we should try a simple copy if it fails
*/ */
@ -466,8 +466,7 @@ public class SnapPuller {
} }
/** /**
* Copy all index files from the temp index dir to the actual index. * Copy all index files from the temp index dir to the actual index. The segments_N file is copied last.
* The segments_N file is copied last.
*/ */
private boolean copyIndexFiles(File snapDir, File indexDir) { private boolean copyIndexFiles(File snapDir, File indexDir) {
String segmentsFile = null; String segmentsFile = null;
@ -494,8 +493,7 @@ public class SnapPuller {
} }
/** /**
* The conf files are copied to the tmp dir to the conf dir. * The conf files are copied to the tmp dir to the conf dir. A backup of the old file is maintained
* A backup of the old file is maintained
*/ */
private void copyTmpConfFiles2Conf(File tmpconfDir) throws IOException { private void copyTmpConfFiles2Conf(File tmpconfDir) throws IOException {
File confDir = new File(solrCore.getResourceLoader().getConfigDir()); File confDir = new File(solrCore.getResourceLoader().getConfigDir());
@ -556,26 +554,36 @@ public class SnapPuller {
} }
} }
private final Map<String, FileInfo> confFileInfoCache = new HashMap<String, FileInfo>();
/** /**
* The local conf files are compared with the conf files in the master. If they are * The local conf files are compared with the conf files in the master. If they are same (by checksum) do not copy.
* same (by checksum) do not copy. *
* @param confFilesToDownload The list of files obtained from master
* *
* @return a list of configuration files which have changed on the master and need to be downloaded. * @return a list of configuration files which have changed on the master and need to be downloaded.
*/ */
private Collection<Map<String, Object>> getModifiedConfFiles(List<Map<String, Object>> confFilesToDownload) { private Collection<Map<String, Object>> getModifiedConfFiles(List<Map<String, Object>> confFilesToDownload) {
if (confFilesToDownload == null || confFilesToDownload.isEmpty()) if (confFilesToDownload == null || confFilesToDownload.isEmpty())
return Collections.EMPTY_LIST; return Collections.EMPTY_LIST;
//build a map with alias/name as the key
Map<String, Map<String, Object>> nameVsFile = new HashMap<String, Map<String, Object>>(); Map<String, Map<String, Object>> nameVsFile = new HashMap<String, Map<String, Object>>();
NamedList names = new NamedList();
for (Map<String, Object> map : confFilesToDownload) { for (Map<String, Object> map : confFilesToDownload) {
nameVsFile.put((String) map.get(NAME), map); //if alias is present that is the name the file may have in the slave
String name = (String) (map.get(ALIAS) == null ? map.get(NAME) : map.get(ALIAS));
nameVsFile.put(name, map);
names.add(name, null);
} }
List<Map<String, Object>> localFilesInfo = replicationHandler.getConfFileCache(nameVsFile.keySet()); //get the details of the local conf files with the same alias/name
List<Map<String, Object>> localFilesInfo = replicationHandler.getConfFileInfoFromCache(names, confFileInfoCache);
//compare their size/checksum to see if
for (Map<String, Object> fileInfo : localFilesInfo) { for (Map<String, Object> fileInfo : localFilesInfo) {
String name = (String) fileInfo.get(NAME); String name = (String) fileInfo.get(NAME);
Map<String, Object> m = nameVsFile.get(name); Map<String, Object> m = nameVsFile.get(name);
if (m == null) continue; if (m == null) continue; // the file is not even present locally (so must be downloaded)
if (m.get(CHECKSUM).equals(fileInfo.get(CHECKSUM))) { if (m.get(CHECKSUM).equals(fileInfo.get(CHECKSUM))) {
nameVsFile.remove(name); nameVsFile.remove(name); //checksums are same so the file need not be downloaded
} }
} }
return nameVsFile.isEmpty() ? Collections.EMPTY_LIST : nameVsFile.values(); return nameVsFile.isEmpty() ? Collections.EMPTY_LIST : nameVsFile.values();
@ -685,8 +693,7 @@ public class SnapPuller {
} }
/** /**
* The class acts as a client for ReplicationHandler.FileStream. * The class acts as a client for ReplicationHandler.FileStream. It understands the protocol of wt=filestream
* It understands the protocol of wt=filestream
* *
* @see org.apache.solr.handler.ReplicationHandler.FileStream * @see org.apache.solr.handler.ReplicationHandler.FileStream
*/ */
@ -826,9 +833,8 @@ public class SnapPuller {
} }
/** /**
* The webcontainer flushes the data only after it fills the buffer size. * The webcontainer flushes the data only after it fills the buffer size. So, all data has to be read as readFully()
* So, all data has to be read as readFully() other wise it fails. So read * other wise it fails. So read everything as bytes and then extract an integer out of it
* everything as bytes and then extract an integer out of it
*/ */
private int readInt(byte[] b) { private int readInt(byte[] b) {
return (((b[0] & 0xff) << 24) | ((b[1] & 0xff) << 16) return (((b[0] & 0xff) << 24) | ((b[1] & 0xff) << 16)
@ -858,7 +864,8 @@ public class SnapPuller {
} }
try { try {
post.releaseConnection(); post.releaseConnection();
} catch (Exception e) {} } catch (Exception e) {
}
if (bytesDownloaded != size) { if (bytesDownloaded != size) {
//if the download is not complete then //if the download is not complete then
//delete the file being downloaded //delete the file being downloaded

View File

@ -122,7 +122,7 @@ public class TestReplicationHandler extends TestCase {
//add 500 docs to master //add 500 docs to master
for (int i = 0; i < 500; i++) for (int i = 0; i < 500; i++)
index(masterClient, "id", i, "name", "name = " + String.valueOf(i)); index(masterClient, "id", i, "name", "name = " + i);
masterClient.commit(); masterClient.commit();
@ -130,9 +130,9 @@ public class TestReplicationHandler extends TestCase {
SolrDocumentList masterQueryResult = (SolrDocumentList) masterQueryRsp.get("response"); SolrDocumentList masterQueryResult = (SolrDocumentList) masterQueryRsp.get("response");
assertEquals(500, masterQueryResult.getNumFound()); assertEquals(500, masterQueryResult.getNumFound());
//sleep for pollinterval time, 4s for letting slave to pull data. //sleep for pollinterval time 4s, to let slave pull data.
Thread.sleep(4000); Thread.sleep(3000);
//get docs from slave and check equal to master //get docs from slave and check if number is equal to master
NamedList slaveQueryRsp = query("*:*", slaveClient); NamedList slaveQueryRsp = query("*:*", slaveClient);
SolrDocumentList slaveQueryResult = (SolrDocumentList) slaveQueryRsp.get("response"); SolrDocumentList slaveQueryResult = (SolrDocumentList) slaveQueryRsp.get("response");
assertEquals(500, slaveQueryResult.getNumFound()); assertEquals(500, slaveQueryResult.getNumFound());
@ -145,9 +145,10 @@ public class TestReplicationHandler extends TestCase {
masterClient.deleteByQuery("*:*"); masterClient.deleteByQuery("*:*");
masterClient.commit(); masterClient.commit();
copyFile(new File("." + System.getProperty("file.separator") + //change the schema on master
"solr" + System.getProperty("file.separator") + copyFile(new File("." + File.separator +
"conf" + System.getProperty("file.separator") + "schema-replication2.xml"), "solr" + File.separator +
"conf" + File.separator + "schema-replication2.xml"),
new File(master.getConfDir(), "schema.xml")); new File(master.getConfDir(), "schema.xml"));
masterJetty.stop(); masterJetty.stop();
@ -156,11 +157,11 @@ public class TestReplicationHandler extends TestCase {
masterClient = createNewSolrServer(masterJetty.getLocalPort()); masterClient = createNewSolrServer(masterJetty.getLocalPort());
//add a doc with new field and commit on master to trigger snappull from slave. //add a doc with new field and commit on master to trigger snappull from slave.
index(masterClient, "id", "2000", "name", "name = " + String.valueOf(2000), "newname", "newname = " + String.valueOf(2000)); index(masterClient, "id", "2000", "name", "name = " + 2000, "newname", "newname = " + 2000);
masterClient.commit(); masterClient.commit();
//sleep for 4s for replication to happen. //sleep for 4s for replication to happen.
Thread.sleep(4000); Thread.sleep(3000);
slaveQueryRsp = query("*:*", slaveClient); slaveQueryRsp = query("*:*", slaveClient);
SolrDocument d = ((SolrDocumentList) slaveQueryRsp.get("response")).get(0); SolrDocument d = ((SolrDocumentList) slaveQueryRsp.get("response")).get(0);
@ -168,6 +169,73 @@ public class TestReplicationHandler extends TestCase {
} }
public void testIndexAndConfigAliasReplication() throws Exception {
//add 500 docs to master
for (int i = 0; i < 500; i++)
index(masterClient, "id", i, "name", "name = " + i);
masterClient.commit();
NamedList masterQueryRsp = query("*:*", masterClient);
SolrDocumentList masterQueryResult = (SolrDocumentList) masterQueryRsp.get("response");
assertEquals(500, masterQueryResult.getNumFound());
//sleep for pollinterval time 3s, to let slave pull data.
Thread.sleep(3000);
//get docs from slave and check if number is equal to master
NamedList slaveQueryRsp = query("*:*", slaveClient);
SolrDocumentList slaveQueryResult = (SolrDocumentList) slaveQueryRsp.get("response");
assertEquals(500, slaveQueryResult.getNumFound());
//compare results
String cmp = TestDistributedSearch.compare(masterQueryResult, slaveQueryResult, 0, null);
assertEquals(null, cmp);
//start config files replication test
//clear master index
masterClient.deleteByQuery("*:*");
masterClient.commit();
//change solrconfig on master
copyFile(new File("." + File.separator +
"solr" + File.separator +
"conf" + File.separator + "solrconfig-master1.xml"),
new File(master.getConfDir(), "solrconfig.xml"));
//change schema on master
copyFile(new File("." + File.separator +
"solr" + File.separator +
"conf" + File.separator + "schema-replication2.xml"),
new File(master.getConfDir(), "schema.xml"));
//keep a copy of the new schema
copyFile(new File("." + File.separator +
"solr" + File.separator +
"conf" + File.separator + "schema-replication2.xml"),
new File(master.getConfDir(), "schema-replication2.xml"));
masterJetty.stop();
masterJetty = createJetty(master, 9999);
masterClient = createNewSolrServer(masterJetty.getLocalPort());
//add a doc with new field and commit on master to trigger snappull from slave.
index(masterClient, "id", "2000", "name", "name = " + 2000, "newname", "newname = " + 2000);
masterClient.commit();
//sleep for 3s for replication to happen.
Thread.sleep(3000);
index(slaveClient, "id", "2000", "name", "name = " + 2001, "newname", "newname = " + 2001);
slaveClient.commit();
slaveQueryRsp = query("*:*", slaveClient);
SolrDocument d = ((SolrDocumentList) slaveQueryRsp.get("response")).get(0);
assertEquals("newname = 2001", (String) d.getFieldValue("newname"));
}
void copyFile(File src, File dst) throws IOException { void copyFile(File src, File dst) throws IOException {
InputStream in = new FileInputStream(src); InputStream in = new FileInputStream(src);
OutputStream out = new FileOutputStream(dst); OutputStream out = new FileOutputStream(dst);
@ -193,29 +261,30 @@ public class TestReplicationHandler extends TestCase {
} }
public String getHomeDir() { public String getHomeDir() {
return homeDir.toString() + System.getProperty("file.separator"); return homeDir.toString();
} }
@Override @Override
public String getSchemaFile() { public String getSchemaFile() {
return "." + System.getProperty("file.separator") + "solr" + System.getProperty("file.separator") + "conf" + System.getProperty("file.separator") + "schema-replication1.xml"; return "." + File.separator + "solr" + File.separator + "conf" + File.separator + "schema-replication1.xml";
} }
public String getConfDir() { public String getConfDir() {
return confDir.toString() + System.getProperty("file.separator"); return confDir.toString();
} }
public String getDataDir() { public String getDataDir() {
return dataDir.toString() + System.getProperty("file.separator"); return dataDir.toString();
} }
@Override @Override
public String getSolrConfigFile() { public String getSolrConfigFile() {
String fname = ""; String fname = "";
if (type == 1) if (type == 1)
fname = "." + System.getProperty("file.separator") + "solr" + System.getProperty("file.separator") + "conf" + System.getProperty("file.separator") + "solrconfig-master.xml"; fname = "." + File.separator + "solr" + File.separator + "conf" + File.separator + "solrconfig-master.xml";
if (type == 0) if (type == 0)
fname = "." + System.getProperty("file.separator") + "solr" + System.getProperty("file.separator") + "conf" + System.getProperty("file.separator") + "solrconfig-slave.xml"; fname = "." + File.separator + "solr" + File.separator + "conf" + File.separator + "solrconfig-slave.xml";
System.out.println(fname);
return fname; return fname;
} }
@ -224,18 +293,18 @@ public class TestReplicationHandler extends TestCase {
System.setProperty("solr.test.sys.prop2", "proptwo"); System.setProperty("solr.test.sys.prop2", "proptwo");
String home = System.getProperty("java.io.tmpdir") String home = System.getProperty("java.io.tmpdir")
+ System.getProperty("file.separator") + File.separator
+ getClass().getName() + "-" + System.currentTimeMillis() + System.getProperty("file.separator"); + getClass().getName() + "-" + System.currentTimeMillis();
if (type == 1) { if (type == 1) {
homeDir = new File(home + "master" + System.getProperty("file.separator")); homeDir = new File(home + "master");
dataDir = new File(home + "master" + System.getProperty("file.separator") + "data" + System.getProperty("file.separator")); dataDir = new File(home + "master", "data");
confDir = new File(home + "master" + System.getProperty("file.separator") + "conf" + System.getProperty("file.separator")); confDir = new File(home + "master", "conf");
} }
if (type == 0) { if (type == 0) {
homeDir = new File(home + "slave" + System.getProperty("file.separator")); homeDir = new File(home + "slave");
dataDir = new File(home + "slave" + System.getProperty("file.separator") + "data" + System.getProperty("file.separator")); dataDir = new File(home + "slave", "data");
confDir = new File(home + "slave" + System.getProperty("file.separator") + "conf" + System.getProperty("file.separator")); confDir = new File(home + "slave", "conf");
} }
homeDir.mkdirs(); homeDir.mkdirs();
@ -254,6 +323,3 @@ public class TestReplicationHandler extends TestCase {
} }
} }
} }

View File

@ -31,432 +31,19 @@
<schema name="test" version="1.0"> <schema name="test" version="1.0">
<types> <types>
<!-- field type definitions... note that the "name" attribute is
just a label to be used by field definitions. The "class"
attribute and any other attributes determine the real type and
behavior of the fieldtype.
-->
<!-- numeric field types that store and index the text
value verbatim (and hence don't sort correctly or support range queries.)
These are provided more for backward compatability, allowing one
to create a schema that matches an existing lucene index.
-->
<fieldType name="integer" class="solr.IntField"/> <fieldType name="integer" class="solr.IntField"/>
<fieldType name="long" class="solr.LongField"/>
<fieldtype name="float" class="solr.FloatField"/>
<fieldType name="double" class="solr.DoubleField"/>
<!-- numeric field types that manipulate the value into
a string value that isn't human readable in it's internal form,
but sorts correctly and supports range queries.
If sortMissingLast="true" then a sort on this field will cause documents
without the field to come after documents with the field,
regardless of the requested sort order.
If sortMissingFirst="true" then a sort on this field will cause documents
without the field to come before documents with the field,
regardless of the requested sort order.
If sortMissingLast="false" and sortMissingFirst="false" (the default),
then default lucene sorting will be used which places docs without the field
first in an ascending sort and last in a descending sort.
-->
<fieldtype name="sint" class="solr.SortableIntField" sortMissingLast="true"/>
<fieldtype name="slong" class="solr.SortableLongField" sortMissingLast="true"/>
<fieldtype name="sfloat" class="solr.SortableFloatField" sortMissingLast="true"/>
<fieldtype name="sdouble" class="solr.SortableDoubleField" sortMissingLast="true"/>
<!-- bcd versions of sortable numeric type may provide smaller
storage space and support very large numbers.
-->
<fieldtype name="bcdint" class="solr.BCDIntField" sortMissingLast="true"/>
<fieldtype name="bcdlong" class="solr.BCDLongField" sortMissingLast="true"/>
<fieldtype name="bcdstr" class="solr.BCDStrField" sortMissingLast="true"/>
<!-- Field type demonstrating an Analyzer failure -->
<fieldtype name="failtype1" class="solr.TextField">
<analyzer type="index">
<tokenizer class="solr.WhitespaceTokenizerFactory"/>
<filter class="solr.WordDelimiterFilterFactory" generateWordParts="1" generateNumberParts="0" catenateWords="0" catenateNumbers="0" catenateAll="0"/>
<filter class="solr.LowerCaseFilterFactory"/>
</analyzer>
</fieldtype>
<!-- Demonstrating ignoreCaseChange -->
<fieldtype name="wdf_nocase" class="solr.TextField">
<analyzer>
<tokenizer class="solr.WhitespaceTokenizerFactory"/>
<filter class="solr.WordDelimiterFilterFactory" generateWordParts="1" generateNumberParts="0" catenateWords="0" catenateNumbers="0" catenateAll="0" splitOnCaseChange="0" preserveOriginal="0"/>
<filter class="solr.LowerCaseFilterFactory"/>
</analyzer>
</fieldtype>
<fieldtype name="wdf_preserve" class="solr.TextField">
<analyzer>
<tokenizer class="solr.WhitespaceTokenizerFactory"/>
<filter class="solr.WordDelimiterFilterFactory" generateWordParts="0" generateNumberParts="1" catenateWords="0" catenateNumbers="0" catenateAll="0" splitOnCaseChange="0" preserveOriginal="1"/>
<filter class="solr.LowerCaseFilterFactory"/>
</analyzer>
</fieldtype>
<!-- HighlitText optimizes storage for (long) columns which will be highlit -->
<fieldtype name="highlittext" class="solr.TextField" compressThreshold="345" />
<fieldtype name="boolean" class="solr.BoolField" sortMissingLast="true"/>
<fieldtype name="string" class="solr.StrField" sortMissingLast="true"/> <fieldtype name="string" class="solr.StrField" sortMissingLast="true"/>
<!-- format for date is 1995-12-31T23:59:59.999Z and only the fractional
seconds part (.999) is optional.
-->
<fieldtype name="date" class="solr.DateField" sortMissingLast="true"/>
<!-- solr.TextField allows the specification of custom
text analyzers specified as a tokenizer and a list
of token filters.
-->
<fieldtype name="text" class="solr.TextField">
<analyzer>
<tokenizer class="solr.StandardTokenizerFactory"/>
<filter class="solr.StandardFilterFactory"/>
<filter class="solr.LowerCaseFilterFactory"/>
<filter class="solr.StopFilterFactory"/>
<!-- lucene PorterStemFilterFactory deprecated
<filter class="solr.PorterStemFilterFactory"/>
-->
<filter class="solr.EnglishPorterFilterFactory"/>
</analyzer>
</fieldtype>
<fieldtype name="nametext" class="solr.TextField">
<analyzer class="org.apache.lucene.analysis.WhitespaceAnalyzer"/>
</fieldtype>
<fieldtype name="teststop" class="solr.TextField">
<analyzer>
<tokenizer class="solr.LowerCaseTokenizerFactory"/>
<filter class="solr.StandardFilterFactory"/>
<!--filter class="solr.StopFilterFactory" words="stopwords.txt"/-->
</analyzer>
</fieldtype>
<!-- fieldtypes in this section isolate tokenizers and tokenfilters for testing -->
<fieldtype name="lowertok" class="solr.TextField">
<analyzer><tokenizer class="solr.LowerCaseTokenizerFactory"/></analyzer>
</fieldtype>
<fieldtype name="keywordtok" class="solr.TextField">
<analyzer><tokenizer class="solr.KeywordTokenizerFactory"/></analyzer>
</fieldtype>
<fieldtype name="standardtok" class="solr.TextField">
<analyzer><tokenizer class="solr.StandardTokenizerFactory"/></analyzer>
</fieldtype>
<fieldtype name="lettertok" class="solr.TextField">
<analyzer><tokenizer class="solr.LetterTokenizerFactory"/></analyzer>
</fieldtype>
<fieldtype name="whitetok" class="solr.TextField">
<analyzer><tokenizer class="solr.WhitespaceTokenizerFactory"/></analyzer>
</fieldtype>
<fieldtype name="HTMLstandardtok" class="solr.TextField">
<analyzer><tokenizer class="solr.HTMLStripStandardTokenizerFactory"/></analyzer>
</fieldtype>
<fieldtype name="HTMLwhitetok" class="solr.TextField">
<analyzer><tokenizer class="solr.HTMLStripWhitespaceTokenizerFactory"/></analyzer>
</fieldtype>
<fieldtype name="standardtokfilt" class="solr.TextField">
<analyzer>
<tokenizer class="solr.StandardTokenizerFactory"/>
<filter class="solr.StandardFilterFactory"/>
</analyzer>
</fieldtype>
<fieldtype name="standardfilt" class="solr.TextField">
<analyzer>
<tokenizer class="solr.WhitespaceTokenizerFactory"/>
<filter class="solr.StandardFilterFactory"/>
</analyzer>
</fieldtype>
<fieldtype name="lowerfilt" class="solr.TextField">
<analyzer>
<tokenizer class="solr.WhitespaceTokenizerFactory"/>
<filter class="solr.LowerCaseFilterFactory"/>
</analyzer>
</fieldtype>
<fieldtype name="patternreplacefilt" class="solr.TextField">
<analyzer type="index">
<tokenizer class="solr.KeywordTokenizerFactory"/>
<filter class="solr.PatternReplaceFilterFactory"
pattern="([^a-zA-Z])" replacement="_" replace="all"
/>
</analyzer>
<analyzer type="query">
<tokenizer class="solr.KeywordTokenizerFactory"/>
</analyzer>
</fieldtype>
<fieldtype name="porterfilt" class="solr.TextField">
<analyzer>
<tokenizer class="solr.WhitespaceTokenizerFactory"/>
<filter class="solr.PorterStemFilterFactory"/>
</analyzer>
</fieldtype>
<!-- fieldtype name="snowballfilt" class="solr.TextField">
<analyzer>
<tokenizer class="solr.WhitespaceTokenizerFactory"/>
<filter class="solr.SnowballPorterFilterFactory"/>
</analyzer>
</fieldtype -->
<fieldtype name="engporterfilt" class="solr.TextField">
<analyzer>
<tokenizer class="solr.WhitespaceTokenizerFactory"/>
<filter class="solr.EnglishPorterFilterFactory"/>
</analyzer>
</fieldtype>
<fieldtype name="custengporterfilt" class="solr.TextField">
<analyzer>
<tokenizer class="solr.WhitespaceTokenizerFactory"/>
<!--filter class="solr.EnglishPorterFilterFactory" protected="protwords.txt"/-->
</analyzer>
</fieldtype>
<fieldtype name="stopfilt" class="solr.TextField">
<analyzer>
<tokenizer class="solr.WhitespaceTokenizerFactory"/>
<filter class="solr.StopFilterFactory" ignoreCase="true"/>
</analyzer>
</fieldtype>
<fieldtype name="custstopfilt" class="solr.TextField">
<analyzer>
<tokenizer class="solr.WhitespaceTokenizerFactory"/>
<!--filter class="solr.StopFilterFactory" words="stopwords.txt"/-->
</analyzer>
</fieldtype>
<fieldtype name="lengthfilt" class="solr.TextField">
<analyzer>
<tokenizer class="solr.WhitespaceTokenizerFactory"/>
<filter class="solr.LengthFilterFactory" min="2" max="5"/>
</analyzer>
</fieldtype>
<fieldtype name="subword" class="solr.TextField" multiValued="true" positionIncrementGap="100">
<analyzer type="index">
<tokenizer class="solr.WhitespaceTokenizerFactory"/>
<filter class="solr.WordDelimiterFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="1" catenateNumbers="1" catenateAll="0"/>
<filter class="solr.LowerCaseFilterFactory"/>
<filter class="solr.StopFilterFactory"/>
<filter class="solr.EnglishPorterFilterFactory"/>
</analyzer>
<analyzer type="query">
<tokenizer class="solr.WhitespaceTokenizerFactory"/>
<filter class="solr.WordDelimiterFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="0" catenateNumbers="0" catenateAll="0"/>
<filter class="solr.LowerCaseFilterFactory"/>
<filter class="solr.StopFilterFactory"/>
<filter class="solr.EnglishPorterFilterFactory"/>
</analyzer>
</fieldtype>
<!-- more flexible in matching skus, but more chance of a false match -->
<fieldtype name="skutype1" class="solr.TextField">
<analyzer type="index">
<tokenizer class="solr.WhitespaceTokenizerFactory"/>
<filter class="solr.WordDelimiterFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="1" catenateNumbers="1" catenateAll="0"/>
<filter class="solr.LowerCaseFilterFactory"/>
</analyzer>
<analyzer type="query">
<tokenizer class="solr.WhitespaceTokenizerFactory"/>
<filter class="solr.WordDelimiterFilterFactory" generateWordParts="0" generateNumberParts="0" catenateWords="1" catenateNumbers="1" catenateAll="0"/>
<filter class="solr.LowerCaseFilterFactory"/>
</analyzer>
</fieldtype>
<!-- less flexible in matching skus, but less chance of a false match -->
<fieldtype name="skutype2" class="solr.TextField">
<analyzer type="index">
<tokenizer class="solr.WhitespaceTokenizerFactory"/>
<filter class="solr.WordDelimiterFilterFactory" generateWordParts="0" generateNumberParts="0" catenateWords="1" catenateNumbers="1" catenateAll="0"/>
<filter class="solr.LowerCaseFilterFactory"/>
</analyzer>
<analyzer type="query">
<tokenizer class="solr.WhitespaceTokenizerFactory"/>
<filter class="solr.WordDelimiterFilterFactory" generateWordParts="0" generateNumberParts="0" catenateWords="1" catenateNumbers="1" catenateAll="0"/>
<filter class="solr.LowerCaseFilterFactory"/>
</analyzer>
</fieldtype>
<!-- less flexible in matching skus, but less chance of a false match -->
<fieldtype name="syn" class="solr.TextField">
<analyzer>
<tokenizer class="solr.WhitespaceTokenizerFactory"/>
<!--filter name="syn" class="solr.SynonymFilterFactory" synonyms="synonyms.txt"/-->
</analyzer>
</fieldtype>
<!-- Demonstrates How RemoveDuplicatesTokenFilter makes stemmed
synonyms "better"
-->
<fieldtype name="dedup" class="solr.TextField">
<analyzer>
<tokenizer class="solr.WhitespaceTokenizerFactory"/>
<!--filter class="solr.SynonymFilterFactory"
synonyms="synonyms.txt" expand="true" /-->
<filter class="solr.EnglishPorterFilterFactory"/>
<filter class="solr.RemoveDuplicatesTokenFilterFactory" />
</analyzer>
</fieldtype>
<fieldtype name="unstored" class="solr.StrField" indexed="true" stored="false"/>
<fieldtype name="textgap" class="solr.TextField" multiValued="true" positionIncrementGap="100">
<analyzer>
<tokenizer class="solr.WhitespaceTokenizerFactory"/>
<filter class="solr.LowerCaseFilterFactory"/>
</analyzer>
</fieldtype>
</types> </types>
<fields> <fields>
<field name="id" type="integer" indexed="true" stored="true" multiValued="false" required="false"/> <field name="id" type="integer" indexed="true" stored="true" multiValued="false" required="false"/>
<field name="name" type="nametext" indexed="true" stored="true"/> <field name="name" type="string" indexed="true" stored="true"/>
<field name="text" type="text" indexed="true" stored="false"/>
<field name="subject" type="text" indexed="true" stored="true"/>
<field name="title" type="nametext" indexed="true" stored="true"/>
<field name="weight" type="float" indexed="true" stored="true"/>
<field name="bday" type="date" indexed="true" stored="true"/>
<field name="title_stemmed" type="text" indexed="true" stored="false"/>
<field name="title_lettertok" type="lettertok" indexed="true" stored="false"/>
<field name="syn" type="syn" indexed="true" stored="true"/>
<!-- to test property inheritance and overriding -->
<field name="shouldbeunstored" type="unstored" />
<field name="shouldbestored" type="unstored" stored="true"/>
<field name="shouldbeunindexed" type="unstored" indexed="false" stored="true"/>
<!-- test different combinations of indexed and stored -->
<field name="bind" type="boolean" indexed="true" stored="false"/>
<field name="bsto" type="boolean" indexed="false" stored="true"/>
<field name="bindsto" type="boolean" indexed="true" stored="true"/>
<field name="isto" type="integer" indexed="false" stored="true"/>
<field name="iind" type="integer" indexed="true" stored="false"/>
<field name="ssto" type="string" indexed="false" stored="true"/>
<field name="sind" type="string" indexed="true" stored="false"/>
<field name="sindsto" type="string" indexed="true" stored="true"/>
<!-- test combinations of term vector settings -->
<field name="test_basictv" type="text" termVectors="true"/>
<field name="test_notv" type="text" termVectors="false"/>
<field name="test_postv" type="text" termVectors="true" termPositions="true"/>
<field name="test_offtv" type="text" termVectors="true" termOffsets="true"/>
<field name="test_posofftv" type="text" termVectors="true"
termPositions="true" termOffsets="true"/>
<!-- test highlit field settings -->
<field name="test_hlt" type="highlittext" indexed="true" compressed="true"/>
<field name="test_hlt_off" type="highlittext" indexed="true" compressed="false"/>
<!-- fields to test individual tokenizers and tokenfilters -->
<field name="teststop" type="teststop" indexed="true" stored="true"/>
<field name="lowertok" type="lowertok" indexed="true" stored="true"/>
<field name="keywordtok" type="keywordtok" indexed="true" stored="true"/>
<field name="standardtok" type="standardtok" indexed="true" stored="true"/>
<field name="HTMLstandardtok" type="HTMLstandardtok" indexed="true" stored="true"/>
<field name="lettertok" type="lettertok" indexed="true" stored="true"/>
<field name="whitetok" type="whitetok" indexed="true" stored="true"/>
<field name="HTMLwhitetok" type="HTMLwhitetok" indexed="true" stored="true"/>
<field name="standardtokfilt" type="standardtokfilt" indexed="true" stored="true"/>
<field name="standardfilt" type="standardfilt" indexed="true" stored="true"/>
<field name="lowerfilt" type="lowerfilt" indexed="true" stored="true"/>
<field name="patternreplacefilt" type="patternreplacefilt" indexed="true" stored="true"/>
<field name="porterfilt" type="porterfilt" indexed="true" stored="true"/>
<field name="engporterfilt" type="engporterfilt" indexed="true" stored="true"/>
<field name="custengporterfilt" type="custengporterfilt" indexed="true" stored="true"/>
<field name="stopfilt" type="stopfilt" indexed="true" stored="true"/>
<field name="custstopfilt" type="custstopfilt" indexed="true" stored="true"/>
<field name="lengthfilt" type="lengthfilt" indexed="true" stored="true"/>
<field name="dedup" type="dedup" indexed="true" stored="true"/>
<field name="wdf_nocase" type="wdf_nocase" indexed="true" stored="true"/>
<field name="wdf_preserve" type="wdf_preserve" indexed="true" stored="true"/>
<field name="numberpartfail" type="failtype1" indexed="true" stored="true"/>
<field name="nullfirst" type="string" indexed="true" stored="true" sortMissingFirst="true"/>
<field name="subword" type="subword" indexed="true" stored="true"/>
<field name="sku1" type="skutype1" indexed="true" stored="true"/>
<field name="sku2" type="skutype2" indexed="true" stored="true"/>
<field name="textgap" type="textgap" indexed="true" stored="true"/>
<field name="timestamp" type="date" indexed="true" stored="true" default="NOW" multiValued="false"/>
<field name="multiDefault" type="string" indexed="true" stored="true" default="muLti-Default" multiValued="true"/>
<field name="intDefault" type="sint" indexed="true" stored="true" default="42" multiValued="false"/>
<!-- Dynamic field definitions. If a field name is not found, dynamicFields
will be used if the name matches any of the patterns.
RESTRICTION: the glob-like pattern in the name attribute must have
a "*" only at the start or the end.
EXAMPLE: name="*_i" will match any field ending in _i (like myid_i, z_i)
Longer patterns will be matched first. if equal size patterns
both match, the first appearing in the schema will be used.
-->
<dynamicField name="*_i" type="sint" indexed="true" stored="true"/>
<dynamicField name="*_s" type="string" indexed="true" stored="true"/>
<dynamicField name="*_s1" type="string" indexed="true" stored="true" multiValued="false"/>
<dynamicField name="*_l" type="slong" indexed="true" stored="true"/>
<dynamicField name="*_t" type="text" indexed="true" stored="true"/>
<dynamicField name="*_b" type="boolean" indexed="true" stored="true"/>
<dynamicField name="*_f" type="sfloat" indexed="true" stored="true"/>
<dynamicField name="*_d" type="sdouble" indexed="true" stored="true"/>
<dynamicField name="*_dt" type="date" indexed="true" stored="true"/>
<dynamicField name="*_bcd" type="bcdstr" indexed="true" stored="true"/>
<dynamicField name="*_sI" type="string" indexed="true" stored="false"/>
<dynamicField name="*_sS" type="string" indexed="false" stored="true"/>
<dynamicField name="t_*" type="text" indexed="true" stored="true"/>
<dynamicField name="tv_*" type="text" indexed="true" stored="true"
termVectors="true" termPositions="true" termOffsets="true"/>
<!-- special fields for dynamic copyField test -->
<dynamicField name="dynamic_*" type="string" indexed="true" stored="true"/>
<dynamicField name="*_dynamic" type="string" indexed="true" stored="true"/>
<!-- for testing to ensure that longer patterns are matched first -->
<dynamicField name="*aa" type="string" indexed="true" stored="true"/>
<dynamicField name="*aaa" type="integer" indexed="false" stored="true"/>
<!-- ignored becuase not stored or indexed -->
<dynamicField name="*_ignored" type="text" indexed="false" stored="false"/>
</fields> </fields>
<defaultSearchField>text</defaultSearchField>
<uniqueKey>id</uniqueKey> <uniqueKey>id</uniqueKey>
<!-- copyField commands copy one field to another at the time a document
is added to the index. It's used either to index the same field different
ways, or to add multiple fields to the same field for easier/faster searching.
-->
<copyField source="title" dest="title_stemmed"/>
<copyField source="title" dest="title_lettertok"/>
<copyField source="title" dest="text"/>
<copyField source="subject" dest="text"/>
<copyField source="*_t" dest="text"/>
<!-- dynamic destination -->
<copyField source="*_dynamic" dest="dynamic_*"/>
<!-- Similarity is the scoring routine for each document vs a query.
A custom similarity may be specified here, but the default is fine
for most applications.
-->
<similarity class="org.apache.solr.schema.CustomSimilarityFactory">
<str name="echo">is there an echo?</str>
</similarity>
</schema> </schema>

View File

@ -31,433 +31,22 @@
<schema name="test" version="1.0"> <schema name="test" version="1.0">
<types> <types>
<!-- field type definitions... note that the "name" attribute is
just a label to be used by field definitions. The "class"
attribute and any other attributes determine the real type and
behavior of the fieldtype.
-->
<!-- numeric field types that store and index the text
value verbatim (and hence don't sort correctly or support range queries.)
These are provided more for backward compatability, allowing one
to create a schema that matches an existing lucene index.
-->
<fieldType name="integer" class="solr.IntField"/> <fieldType name="integer" class="solr.IntField"/>
<fieldType name="long" class="solr.LongField"/>
<fieldtype name="float" class="solr.FloatField"/>
<fieldType name="double" class="solr.DoubleField"/>
<!-- numeric field types that manipulate the value into
a string value that isn't human readable in it's internal form,
but sorts correctly and supports range queries.
If sortMissingLast="true" then a sort on this field will cause documents
without the field to come after documents with the field,
regardless of the requested sort order.
If sortMissingFirst="true" then a sort on this field will cause documents
without the field to come before documents with the field,
regardless of the requested sort order.
If sortMissingLast="false" and sortMissingFirst="false" (the default),
then default lucene sorting will be used which places docs without the field
first in an ascending sort and last in a descending sort.
-->
<fieldtype name="sint" class="solr.SortableIntField" sortMissingLast="true"/>
<fieldtype name="slong" class="solr.SortableLongField" sortMissingLast="true"/>
<fieldtype name="sfloat" class="solr.SortableFloatField" sortMissingLast="true"/>
<fieldtype name="sdouble" class="solr.SortableDoubleField" sortMissingLast="true"/>
<!-- bcd versions of sortable numeric type may provide smaller
storage space and support very large numbers.
-->
<fieldtype name="bcdint" class="solr.BCDIntField" sortMissingLast="true"/>
<fieldtype name="bcdlong" class="solr.BCDLongField" sortMissingLast="true"/>
<fieldtype name="bcdstr" class="solr.BCDStrField" sortMissingLast="true"/>
<!-- Field type demonstrating an Analyzer failure -->
<fieldtype name="failtype1" class="solr.TextField">
<analyzer type="index">
<tokenizer class="solr.WhitespaceTokenizerFactory"/>
<filter class="solr.WordDelimiterFilterFactory" generateWordParts="1" generateNumberParts="0" catenateWords="0" catenateNumbers="0" catenateAll="0"/>
<filter class="solr.LowerCaseFilterFactory"/>
</analyzer>
</fieldtype>
<!-- Demonstrating ignoreCaseChange -->
<fieldtype name="wdf_nocase" class="solr.TextField">
<analyzer>
<tokenizer class="solr.WhitespaceTokenizerFactory"/>
<filter class="solr.WordDelimiterFilterFactory" generateWordParts="1" generateNumberParts="0" catenateWords="0" catenateNumbers="0" catenateAll="0" splitOnCaseChange="0" preserveOriginal="0"/>
<filter class="solr.LowerCaseFilterFactory"/>
</analyzer>
</fieldtype>
<fieldtype name="wdf_preserve" class="solr.TextField">
<analyzer>
<tokenizer class="solr.WhitespaceTokenizerFactory"/>
<filter class="solr.WordDelimiterFilterFactory" generateWordParts="0" generateNumberParts="1" catenateWords="0" catenateNumbers="0" catenateAll="0" splitOnCaseChange="0" preserveOriginal="1"/>
<filter class="solr.LowerCaseFilterFactory"/>
</analyzer>
</fieldtype>
<!-- HighlitText optimizes storage for (long) columns which will be highlit -->
<fieldtype name="highlittext" class="solr.TextField" compressThreshold="345" />
<fieldtype name="boolean" class="solr.BoolField" sortMissingLast="true"/>
<fieldtype name="string" class="solr.StrField" sortMissingLast="true"/> <fieldtype name="string" class="solr.StrField" sortMissingLast="true"/>
<!-- format for date is 1995-12-31T23:59:59.999Z and only the fractional
seconds part (.999) is optional.
-->
<fieldtype name="date" class="solr.DateField" sortMissingLast="true"/>
<!-- solr.TextField allows the specification of custom
text analyzers specified as a tokenizer and a list
of token filters.
-->
<fieldtype name="text" class="solr.TextField">
<analyzer>
<tokenizer class="solr.StandardTokenizerFactory"/>
<filter class="solr.StandardFilterFactory"/>
<filter class="solr.LowerCaseFilterFactory"/>
<filter class="solr.StopFilterFactory"/>
<!-- lucene PorterStemFilterFactory deprecated
<filter class="solr.PorterStemFilterFactory"/>
-->
<filter class="solr.EnglishPorterFilterFactory"/>
</analyzer>
</fieldtype>
<fieldtype name="nametext" class="solr.TextField">
<analyzer class="org.apache.lucene.analysis.WhitespaceAnalyzer"/>
</fieldtype>
<fieldtype name="teststop" class="solr.TextField">
<analyzer>
<tokenizer class="solr.LowerCaseTokenizerFactory"/>
<filter class="solr.StandardFilterFactory"/>
<!--filter class="solr.StopFilterFactory" words="stopwords.txt"/-->
</analyzer>
</fieldtype>
<!-- fieldtypes in this section isolate tokenizers and tokenfilters for testing -->
<fieldtype name="lowertok" class="solr.TextField">
<analyzer><tokenizer class="solr.LowerCaseTokenizerFactory"/></analyzer>
</fieldtype>
<fieldtype name="keywordtok" class="solr.TextField">
<analyzer><tokenizer class="solr.KeywordTokenizerFactory"/></analyzer>
</fieldtype>
<fieldtype name="standardtok" class="solr.TextField">
<analyzer><tokenizer class="solr.StandardTokenizerFactory"/></analyzer>
</fieldtype>
<fieldtype name="lettertok" class="solr.TextField">
<analyzer><tokenizer class="solr.LetterTokenizerFactory"/></analyzer>
</fieldtype>
<fieldtype name="whitetok" class="solr.TextField">
<analyzer><tokenizer class="solr.WhitespaceTokenizerFactory"/></analyzer>
</fieldtype>
<fieldtype name="HTMLstandardtok" class="solr.TextField">
<analyzer><tokenizer class="solr.HTMLStripStandardTokenizerFactory"/></analyzer>
</fieldtype>
<fieldtype name="HTMLwhitetok" class="solr.TextField">
<analyzer><tokenizer class="solr.HTMLStripWhitespaceTokenizerFactory"/></analyzer>
</fieldtype>
<fieldtype name="standardtokfilt" class="solr.TextField">
<analyzer>
<tokenizer class="solr.StandardTokenizerFactory"/>
<filter class="solr.StandardFilterFactory"/>
</analyzer>
</fieldtype>
<fieldtype name="standardfilt" class="solr.TextField">
<analyzer>
<tokenizer class="solr.WhitespaceTokenizerFactory"/>
<filter class="solr.StandardFilterFactory"/>
</analyzer>
</fieldtype>
<fieldtype name="lowerfilt" class="solr.TextField">
<analyzer>
<tokenizer class="solr.WhitespaceTokenizerFactory"/>
<filter class="solr.LowerCaseFilterFactory"/>
</analyzer>
</fieldtype>
<fieldtype name="patternreplacefilt" class="solr.TextField">
<analyzer type="index">
<tokenizer class="solr.KeywordTokenizerFactory"/>
<filter class="solr.PatternReplaceFilterFactory"
pattern="([^a-zA-Z])" replacement="_" replace="all"
/>
</analyzer>
<analyzer type="query">
<tokenizer class="solr.KeywordTokenizerFactory"/>
</analyzer>
</fieldtype>
<fieldtype name="porterfilt" class="solr.TextField">
<analyzer>
<tokenizer class="solr.WhitespaceTokenizerFactory"/>
<filter class="solr.PorterStemFilterFactory"/>
</analyzer>
</fieldtype>
<!-- fieldtype name="snowballfilt" class="solr.TextField">
<analyzer>
<tokenizer class="solr.WhitespaceTokenizerFactory"/>
<filter class="solr.SnowballPorterFilterFactory"/>
</analyzer>
</fieldtype -->
<fieldtype name="engporterfilt" class="solr.TextField">
<analyzer>
<tokenizer class="solr.WhitespaceTokenizerFactory"/>
<filter class="solr.EnglishPorterFilterFactory"/>
</analyzer>
</fieldtype>
<fieldtype name="custengporterfilt" class="solr.TextField">
<analyzer>
<tokenizer class="solr.WhitespaceTokenizerFactory"/>
<!--filter class="solr.EnglishPorterFilterFactory" protected="protwords.txt"/-->
</analyzer>
</fieldtype>
<fieldtype name="stopfilt" class="solr.TextField">
<analyzer>
<tokenizer class="solr.WhitespaceTokenizerFactory"/>
<filter class="solr.StopFilterFactory" ignoreCase="true"/>
</analyzer>
</fieldtype>
<fieldtype name="custstopfilt" class="solr.TextField">
<analyzer>
<tokenizer class="solr.WhitespaceTokenizerFactory"/>
<!--filter class="solr.StopFilterFactory" words="stopwords.txt"/-->
</analyzer>
</fieldtype>
<fieldtype name="lengthfilt" class="solr.TextField">
<analyzer>
<tokenizer class="solr.WhitespaceTokenizerFactory"/>
<filter class="solr.LengthFilterFactory" min="2" max="5"/>
</analyzer>
</fieldtype>
<fieldtype name="subword" class="solr.TextField" multiValued="true" positionIncrementGap="100">
<analyzer type="index">
<tokenizer class="solr.WhitespaceTokenizerFactory"/>
<filter class="solr.WordDelimiterFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="1" catenateNumbers="1" catenateAll="0"/>
<filter class="solr.LowerCaseFilterFactory"/>
<filter class="solr.StopFilterFactory"/>
<filter class="solr.EnglishPorterFilterFactory"/>
</analyzer>
<analyzer type="query">
<tokenizer class="solr.WhitespaceTokenizerFactory"/>
<filter class="solr.WordDelimiterFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="0" catenateNumbers="0" catenateAll="0"/>
<filter class="solr.LowerCaseFilterFactory"/>
<filter class="solr.StopFilterFactory"/>
<filter class="solr.EnglishPorterFilterFactory"/>
</analyzer>
</fieldtype>
<!-- more flexible in matching skus, but more chance of a false match -->
<fieldtype name="skutype1" class="solr.TextField">
<analyzer type="index">
<tokenizer class="solr.WhitespaceTokenizerFactory"/>
<filter class="solr.WordDelimiterFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="1" catenateNumbers="1" catenateAll="0"/>
<filter class="solr.LowerCaseFilterFactory"/>
</analyzer>
<analyzer type="query">
<tokenizer class="solr.WhitespaceTokenizerFactory"/>
<filter class="solr.WordDelimiterFilterFactory" generateWordParts="0" generateNumberParts="0" catenateWords="1" catenateNumbers="1" catenateAll="0"/>
<filter class="solr.LowerCaseFilterFactory"/>
</analyzer>
</fieldtype>
<!-- less flexible in matching skus, but less chance of a false match -->
<fieldtype name="skutype2" class="solr.TextField">
<analyzer type="index">
<tokenizer class="solr.WhitespaceTokenizerFactory"/>
<filter class="solr.WordDelimiterFilterFactory" generateWordParts="0" generateNumberParts="0" catenateWords="1" catenateNumbers="1" catenateAll="0"/>
<filter class="solr.LowerCaseFilterFactory"/>
</analyzer>
<analyzer type="query">
<tokenizer class="solr.WhitespaceTokenizerFactory"/>
<filter class="solr.WordDelimiterFilterFactory" generateWordParts="0" generateNumberParts="0" catenateWords="1" catenateNumbers="1" catenateAll="0"/>
<filter class="solr.LowerCaseFilterFactory"/>
</analyzer>
</fieldtype>
<!-- less flexible in matching skus, but less chance of a false match -->
<fieldtype name="syn" class="solr.TextField">
<analyzer>
<tokenizer class="solr.WhitespaceTokenizerFactory"/>
<!--filter name="syn" class="solr.SynonymFilterFactory" synonyms="synonyms.txt"/-->
</analyzer>
</fieldtype>
<!-- Demonstrates How RemoveDuplicatesTokenFilter makes stemmed
synonyms "better"
-->
<fieldtype name="dedup" class="solr.TextField">
<analyzer>
<tokenizer class="solr.WhitespaceTokenizerFactory"/>
<!--filter class="solr.SynonymFilterFactory"
synonyms="synonyms.txt" expand="true" /-->
<filter class="solr.EnglishPorterFilterFactory"/>
<filter class="solr.RemoveDuplicatesTokenFilterFactory" />
</analyzer>
</fieldtype>
<fieldtype name="unstored" class="solr.StrField" indexed="true" stored="false"/>
<fieldtype name="textgap" class="solr.TextField" multiValued="true" positionIncrementGap="100">
<analyzer>
<tokenizer class="solr.WhitespaceTokenizerFactory"/>
<filter class="solr.LowerCaseFilterFactory"/>
</analyzer>
</fieldtype>
</types> </types>
<fields> <fields>
<field name="id" type="integer" indexed="true" stored="true" multiValued="false" required="false"/> <field name="id" type="integer" indexed="true" stored="true" multiValued="false" required="false"/>
<field name="name" type="nametext" indexed="true" stored="true"/> <field name="name" type="string" indexed="true" stored="true"/>
<field name="text" type="text" indexed="true" stored="false"/>
<field name="subject" type="text" indexed="true" stored="true"/>
<field name="title" type="nametext" indexed="true" stored="true"/>
<field name="weight" type="float" indexed="true" stored="true"/>
<field name="bday" type="date" indexed="true" stored="true"/>
<field name="title_stemmed" type="text" indexed="true" stored="false"/>
<field name="title_lettertok" type="lettertok" indexed="true" stored="false"/>
<field name="syn" type="syn" indexed="true" stored="true"/>
<!-- to test property inheritance and overriding -->
<field name="shouldbeunstored" type="unstored" />
<field name="shouldbestored" type="unstored" stored="true"/>
<field name="shouldbeunindexed" type="unstored" indexed="false" stored="true"/>
<!-- test different combinations of indexed and stored -->
<field name="bind" type="boolean" indexed="true" stored="false"/>
<field name="bsto" type="boolean" indexed="false" stored="true"/>
<field name="bindsto" type="boolean" indexed="true" stored="true"/>
<field name="isto" type="integer" indexed="false" stored="true"/>
<field name="iind" type="integer" indexed="true" stored="false"/>
<field name="ssto" type="string" indexed="false" stored="true"/>
<field name="sind" type="string" indexed="true" stored="false"/>
<field name="sindsto" type="string" indexed="true" stored="true"/>
<!-- test combinations of term vector settings -->
<field name="test_basictv" type="text" termVectors="true"/>
<field name="test_notv" type="text" termVectors="false"/>
<field name="test_postv" type="text" termVectors="true" termPositions="true"/>
<field name="test_offtv" type="text" termVectors="true" termOffsets="true"/>
<field name="test_posofftv" type="text" termVectors="true"
termPositions="true" termOffsets="true"/>
<!-- test highlit field settings -->
<field name="test_hlt" type="highlittext" indexed="true" compressed="true"/>
<field name="test_hlt_off" type="highlittext" indexed="true" compressed="false"/>
<!-- fields to test individual tokenizers and tokenfilters -->
<field name="teststop" type="teststop" indexed="true" stored="true"/>
<field name="lowertok" type="lowertok" indexed="true" stored="true"/>
<field name="keywordtok" type="keywordtok" indexed="true" stored="true"/>
<field name="standardtok" type="standardtok" indexed="true" stored="true"/>
<field name="HTMLstandardtok" type="HTMLstandardtok" indexed="true" stored="true"/>
<field name="lettertok" type="lettertok" indexed="true" stored="true"/>
<field name="whitetok" type="whitetok" indexed="true" stored="true"/>
<field name="HTMLwhitetok" type="HTMLwhitetok" indexed="true" stored="true"/>
<field name="standardtokfilt" type="standardtokfilt" indexed="true" stored="true"/>
<field name="standardfilt" type="standardfilt" indexed="true" stored="true"/>
<field name="lowerfilt" type="lowerfilt" indexed="true" stored="true"/>
<field name="patternreplacefilt" type="patternreplacefilt" indexed="true" stored="true"/>
<field name="porterfilt" type="porterfilt" indexed="true" stored="true"/>
<field name="engporterfilt" type="engporterfilt" indexed="true" stored="true"/>
<field name="custengporterfilt" type="custengporterfilt" indexed="true" stored="true"/>
<field name="stopfilt" type="stopfilt" indexed="true" stored="true"/>
<field name="custstopfilt" type="custstopfilt" indexed="true" stored="true"/>
<field name="lengthfilt" type="lengthfilt" indexed="true" stored="true"/>
<field name="dedup" type="dedup" indexed="true" stored="true"/>
<field name="wdf_nocase" type="wdf_nocase" indexed="true" stored="true"/>
<field name="wdf_preserve" type="wdf_preserve" indexed="true" stored="true"/>
<field name="numberpartfail" type="failtype1" indexed="true" stored="true"/>
<field name="nullfirst" type="string" indexed="true" stored="true" sortMissingFirst="true"/>
<field name="subword" type="subword" indexed="true" stored="true"/>
<field name="sku1" type="skutype1" indexed="true" stored="true"/>
<field name="sku2" type="skutype2" indexed="true" stored="true"/>
<field name="textgap" type="textgap" indexed="true" stored="true"/>
<field name="timestamp" type="date" indexed="true" stored="true" default="NOW" multiValued="false"/>
<field name="multiDefault" type="string" indexed="true" stored="true" default="muLti-Default" multiValued="true"/>
<field name="intDefault" type="sint" indexed="true" stored="true" default="42" multiValued="false"/>
<!--adding new field newname in replication test.--> <!--adding new field newname in replication test.-->
<field name="newname" type="nametext" indexed="true" stored="true"/> <field name="newname" type="string" indexed="true" stored="true"/>
<!-- Dynamic field definitions. If a field name is not found, dynamicFields
will be used if the name matches any of the patterns.
RESTRICTION: the glob-like pattern in the name attribute must have
a "*" only at the start or the end.
EXAMPLE: name="*_i" will match any field ending in _i (like myid_i, z_i)
Longer patterns will be matched first. if equal size patterns
both match, the first appearing in the schema will be used.
-->
<dynamicField name="*_i" type="sint" indexed="true" stored="true"/>
<dynamicField name="*_s" type="string" indexed="true" stored="true"/>
<dynamicField name="*_s1" type="string" indexed="true" stored="true" multiValued="false"/>
<dynamicField name="*_l" type="slong" indexed="true" stored="true"/>
<dynamicField name="*_t" type="text" indexed="true" stored="true"/>
<dynamicField name="*_b" type="boolean" indexed="true" stored="true"/>
<dynamicField name="*_f" type="sfloat" indexed="true" stored="true"/>
<dynamicField name="*_d" type="sdouble" indexed="true" stored="true"/>
<dynamicField name="*_dt" type="date" indexed="true" stored="true"/>
<dynamicField name="*_bcd" type="bcdstr" indexed="true" stored="true"/>
<dynamicField name="*_sI" type="string" indexed="true" stored="false"/>
<dynamicField name="*_sS" type="string" indexed="false" stored="true"/>
<dynamicField name="t_*" type="text" indexed="true" stored="true"/>
<dynamicField name="tv_*" type="text" indexed="true" stored="true"
termVectors="true" termPositions="true" termOffsets="true"/>
<!-- special fields for dynamic copyField test -->
<dynamicField name="dynamic_*" type="string" indexed="true" stored="true"/>
<dynamicField name="*_dynamic" type="string" indexed="true" stored="true"/>
<!-- for testing to ensure that longer patterns are matched first -->
<dynamicField name="*aa" type="string" indexed="true" stored="true"/>
<dynamicField name="*aaa" type="integer" indexed="false" stored="true"/>
<!-- ignored becuase not stored or indexed -->
<dynamicField name="*_ignored" type="text" indexed="false" stored="false"/>
</fields> </fields>
<defaultSearchField>text</defaultSearchField>
<uniqueKey>id</uniqueKey> <uniqueKey>id</uniqueKey>
<!-- copyField commands copy one field to another at the time a document
is added to the index. It's used either to index the same field different
ways, or to add multiple fields to the same field for easier/faster searching.
-->
<copyField source="title" dest="title_stemmed"/>
<copyField source="title" dest="title_lettertok"/>
<copyField source="title" dest="text"/>
<copyField source="subject" dest="text"/>
<copyField source="*_t" dest="text"/>
<!-- dynamic destination -->
<copyField source="*_dynamic" dest="dynamic_*"/>
<!-- Similarity is the scoring routine for each document vs a query.
A custom similarity may be specified here, but the default is fine
for most applications.
-->
<similarity class="org.apache.solr.schema.CustomSimilarityFactory">
<str name="echo">is there an echo?</str>
</similarity>
</schema> </schema>

View File

@ -24,61 +24,19 @@
<config> <config>
<jmx />
<!-- Used to specify an alternate directory to hold all index data.
It defaults to "index" if not present, and should probably
not be changed if replication is in use. -->
<dataDir>${solr.data.dir:./solr/data}</dataDir> <dataDir>${solr.data.dir:./solr/data}</dataDir>
<indexDefaults> <indexDefaults>
<!-- Values here affect all index writers and act as a default
unless overridden. -->
<!-- Values here affect all index writers and act as a default unless overridden. -->
<useCompoundFile>false</useCompoundFile> <useCompoundFile>false</useCompoundFile>
<mergeFactor>10</mergeFactor> <mergeFactor>10</mergeFactor>
<!-- If both ramBufferSizeMB and maxBufferedDocs is set, then Lucene will flush based on whichever limit is hit first.
-->
<!--<maxBufferedDocs>1000</maxBufferedDocs>-->
<!-- Tell Lucene when to flush documents to disk.
Giving Lucene more memory for indexing means faster indexing at the cost of more RAM
If both ramBufferSizeMB and maxBufferedDocs is set, then Lucene will flush based on whichever limit is hit first.
-->
<ramBufferSizeMB>32</ramBufferSizeMB> <ramBufferSizeMB>32</ramBufferSizeMB>
<maxMergeDocs>2147483647</maxMergeDocs> <maxMergeDocs>2147483647</maxMergeDocs>
<maxFieldLength>10000</maxFieldLength> <maxFieldLength>10000</maxFieldLength>
<writeLockTimeout>1000</writeLockTimeout> <writeLockTimeout>1000</writeLockTimeout>
<commitLockTimeout>10000</commitLockTimeout> <commitLockTimeout>10000</commitLockTimeout>
<!--
Expert: Turn on Lucene's auto commit capability.
NOTE: Despite the name, this value does not have any relation to Solr's autoCommit functionality
-->
<luceneAutoCommit>false</luceneAutoCommit> <luceneAutoCommit>false</luceneAutoCommit>
<!--
Expert:
The Merge Policy in Lucene controls how merging is handled by Lucene. The default in 2.3 is the LogByteSizeMergePolicy, previous
versions used LogDocMergePolicy.
LogByteSizeMergePolicy chooses segments to merge based on their size. The Lucene 2.2 default, LogDocMergePolicy chose when
to merge based on number of documents
Other implementations of MergePolicy must have a no-argument constructor
-->
<mergePolicy>org.apache.lucene.index.LogByteSizeMergePolicy</mergePolicy>
<!--
Expert:
The Merge Scheduler in Lucene controls how merges are performed. The ConcurrentMergeScheduler (Lucene 2.3 default)
can perform merges in the background using separate threads. The SerialMergeScheduler (Lucene 2.2 default) does not.
-->
<mergeScheduler>org.apache.lucene.index.ConcurrentMergeScheduler</mergeScheduler> <mergeScheduler>org.apache.lucene.index.ConcurrentMergeScheduler</mergeScheduler>
<!-- these are global... can't currently override per index -->
<writeLockTimeout>1000</writeLockTimeout> <writeLockTimeout>1000</writeLockTimeout>
<commitLockTimeout>10000</commitLockTimeout> <commitLockTimeout>10000</commitLockTimeout>
@ -86,7 +44,6 @@
</indexDefaults> </indexDefaults>
<mainIndex> <mainIndex>
<!-- lucene options specific to the main on-disk lucene index -->
<useCompoundFile>false</useCompoundFile> <useCompoundFile>false</useCompoundFile>
<mergeFactor>10</mergeFactor> <mergeFactor>10</mergeFactor>
<ramBufferSizeMB>32</ramBufferSizeMB> <ramBufferSizeMB>32</ramBufferSizeMB>
@ -97,201 +54,18 @@
</mainIndex> </mainIndex>
<updateHandler class="solr.DirectUpdateHandler2"> <updateHandler class="solr.DirectUpdateHandler2">
<!-- autocommit pending docs if certain criteria are met
<autoCommit>
<maxDocs>10000</maxDocs>
<maxTime>3600000</maxTime>
</autoCommit>
-->
<!-- represents a lower bound on the frequency that commits may
occur (in seconds). NOTE: not yet implemented
<commitIntervalLowerBound>0</commitIntervalLowerBound>
-->
<!-- The RunExecutableListener executes an external command.
exe - the name of the executable to run
dir - dir to use as the current working directory. default="."
wait - the calling thread waits until the executable returns. default="true"
args - the arguments to pass to the program. default=nothing
env - environment variables to set. default=nothing
-->
<!-- A postCommit event is fired after every commit
<listener event="postCommit" class="solr.RunExecutableListener">
<str name="exe">/var/opt/resin3/__PORT__/scripts/solr/snapshooter</str>
<str name="dir">/var/opt/resin3/__PORT__</str>
<bool name="wait">true</bool>
<arr name="args"> <str>arg1</str> <str>arg2</str> </arr>
<arr name="env"> <str>MYVAR=val1</str> </arr>
</listener>
-->
</updateHandler> </updateHandler>
<query>
<!-- Maximum number of clauses in a boolean query... can affect
range or wildcard queries that expand to big boolean
queries. An exception is thrown if exceeded.
-->
<maxBooleanClauses>1024</maxBooleanClauses>
<!-- Cache specification for Filters or DocSets - unordered set of *all* documents
that match a particular query.
-->
<filterCache
class="solr.search.LRUCache"
size="512"
initialSize="512"
autowarmCount="256"/>
<queryResultCache
class="solr.search.LRUCache"
size="512"
initialSize="512"
autowarmCount="1024"/>
<documentCache
class="solr.search.LRUCache"
size="512"
initialSize="512"
autowarmCount="0"/>
<!-- If true, stored fields that are not requested will be loaded lazily.
-->
<enableLazyFieldLoading>true</enableLazyFieldLoading>
<!--
<cache name="myUserCache"
class="solr.search.LRUCache"
size="4096"
initialSize="1024"
autowarmCount="1024"
regenerator="MyRegenerator"
/>
-->
<useFilterForSortedQuery>true</useFilterForSortedQuery>
<queryResultWindowSize>10</queryResultWindowSize>
<!-- set maxSize artificially low to exercise both types of sets -->
<HashDocSet maxSize="3" loadFactor="0.75"/>
<!-- boolToFilterOptimizer converts boolean clauses with zero boost
into cached filters if the number of docs selected by the clause exceeds
the threshold (represented as a fraction of the total index)
-->
<boolTofilterOptimizer enabled="false" cacheSize="32" threshold=".05"/>
<!-- a newSearcher event is fired whenever a new searcher is being prepared
and there is a current searcher handling requests (aka registered). -->
<!-- QuerySenderListener takes an array of NamedList and executes a
local query request for each NamedList in sequence. -->
<!--
<listener event="newSearcher" class="solr.QuerySenderListener">
<arr name="queries">
<lst> <str name="q">solr</str> <str name="start">0</str> <str name="rows">10</str> </lst>
<lst> <str name="q">rocks</str> <str name="start">0</str> <str name="rows">10</str> </lst>
</arr>
</listener>
-->
<!-- a firstSearcher event is fired whenever a new searcher is being
prepared but there is no current registered searcher to handle
requests or to gain prewarming data from. -->
<!--
<listener event="firstSearcher" class="solr.QuerySenderListener">
<arr name="queries">
<lst> <str name="q">fast_warm</str> <str name="start">0</str> <str name="rows">10</str> </lst>
</arr>
</listener>
-->
</query>
<!-- An alternate set representation that uses an integer hash to store filters (sets of docids).
If the set cardinality <= maxSize elements, then HashDocSet will be used instead of the bitset
based HashBitset. -->
<!-- requestHandler plugins... incoming queries will be dispatched to the
correct handler based on the qt (query type) param matching the
name of registered handlers.
The "standard" request handler is the default and will be used if qt
is not specified in the request.
-->
<requestHandler name="standard" class="solr.StandardRequestHandler"> <requestHandler name="standard" class="solr.StandardRequestHandler">
<bool name="httpCaching">true</bool> <bool name="httpCaching">true</bool>
</requestHandler> </requestHandler>
<requestHandler name="dismaxOldStyleDefaults"
class="solr.DisMaxRequestHandler" >
<!-- for historic reasons, DisMaxRequestHandler will use all of
it's init params as "defaults" if there is no "defaults" list
specified
-->
<float name="tie">0.01</float>
<str name="qf">
text^0.5 features_t^1.0 subject^1.4 title_stemmed^2.0
</str>
<str name="pf">
text^0.2 features_t^1.1 subject^1.4 title_stemmed^2.0 title^1.5
</str>
<str name="bf">
ord(weight)^0.5 recip(rord(iind),1,1000,1000)^0.3
</str>
<str name="mm">
3&lt;-1 5&lt;-2 6&lt;90%
</str>
<int name="ps">100</int>
</requestHandler>
<requestHandler name="/replication" class="solr.ReplicationHandler" > <requestHandler name="/replication" class="solr.ReplicationHandler">
<lst name="master"> <lst name="master">
<str name="replicateAfter">commit</str> <str name="replicateAfter">commit</str>
<str name="confFiles">schema.xml,stopwords.txt</str> <str name="confFiles">schema.xml</str>
</lst>
</requestHandler>
<requestHandler name="dismax" class="solr.DisMaxRequestHandler" >
<lst name="defaults">
<str name="q.alt">*:*</str>
<float name="tie">0.01</float>
<str name="qf">
text^0.5 features_t^1.0 subject^1.4 title_stemmed^2.0
</str>
<str name="pf">
text^0.2 features_t^1.1 subject^1.4 title_stemmed^2.0 title^1.5
</str>
<str name="bf">
ord(weight)^0.5 recip(rord(iind),1,1000,1000)^0.3
</str>
<str name="mm">
3&lt;-1 5&lt;-2 6&lt;90%
</str>
<int name="ps">100</int>
</lst> </lst>
</requestHandler> </requestHandler>
<requestHandler name="old" class="solr.tst.OldRequestHandler" >
<int name="myparam">1000</int>
<float name="ratio">1.4142135</float>
<arr name="myarr"><int>1</int><int>2</int></arr>
<str>foo</str>
</requestHandler>
<requestHandler name="oldagain" class="solr.tst.OldRequestHandler" >
<lst name="lst1"> <str name="op">sqrt</str> <int name="val">2</int> </lst>
<lst name="lst2"> <str name="op">log</str> <float name="val">10</float> </lst>
</requestHandler>
<requestHandler name="test" class="solr.tst.TestRequestHandler" />
<!-- test query parameter defaults --> <!-- test query parameter defaults -->
<requestHandler name="defaults" class="solr.StandardRequestHandler"> <requestHandler name="defaults" class="solr.StandardRequestHandler">
@ -311,115 +85,14 @@
</lst> </lst>
</requestHandler> </requestHandler>
<requestHandler name="/update" class="solr.XmlUpdateRequestHandler" /> <requestHandler name="/update" class="solr.XmlUpdateRequestHandler"/>
<requestHandler name="/update/csv" class="solr.CSVRequestHandler" startup="lazy">
<bool name="httpCaching">false</bool>
</requestHandler>
<!-- test elevation -->
<!--searchComponent name="elevate" class="org.apache.solr.handler.component.QueryElevationComponent" >
<str name="queryFieldType">string</str>
<str name="config-file">elevate.xml</str>
</searchComponent-->
<requestHandler name="/elevate" class="org.apache.solr.handler.component.SearchHandler">
<lst name="defaults">
<str name="echoParams">explicit</str>
</lst>
<arr name="last-components">
<!--str>elevate</str-->
</arr>
</requestHandler>
<searchComponent name="spellcheck" class="org.apache.solr.handler.component.SpellCheckComponent">
<str name="queryAnalyzerFieldType">lowerfilt</str>
<lst name="spellchecker">
<str name="name">default</str>
<str name="field">lowerfilt</str>
<str name="spellcheckIndexDir">spellchecker1</str>
<str name="buildOnCommit">true</str>
</lst>
<!-- Example of using different distance measure -->
<lst name="spellchecker">
<str name="name">jarowinkler</str>
<str name="field">lowerfilt</str>
<!-- Use a different Distance Measure -->
<str name="distanceMeasure">org.apache.lucene.search.spell.JaroWinklerDistance</str>
<str name="spellcheckIndexDir">spellchecker2</str>
</lst>
<lst name="spellchecker">
<str name="classname">solr.FileBasedSpellChecker</str>
<str name="name">external</str>
<str name="sourceLocation">spellings.txt</str>
<str name="characterEncoding">UTF-8</str>
<str name="spellcheckIndexDir">spellchecker3</str>
</lst>
</searchComponent>
<!--
The SpellingQueryConverter to convert raw (CommonParams.Q) queries into tokens. Uses a simple regular expression
to strip off field markup, boosts, ranges, etc. but it is not guaranteed to match an exact parse from the query parser.
-->
<queryConverter name="queryConverter" class="org.apache.solr.spelling.SpellingQueryConverter"/>
<requestHandler name="spellCheckCompRH" class="org.apache.solr.handler.component.SearchHandler">
<lst name="defaults">
<!-- omp = Only More Popular -->
<str name="spellcheck.onlyMorePopular">false</str>
<!-- exr = Extended Results -->
<str name="spellcheck.extendedResults">false</str>
<!-- The number of suggestions to return -->
<str name="spellcheck.count">1</str>
</lst>
<arr name="last-components">
<str>spellcheck</str>
</arr>
</requestHandler>
<highlighting>
<!-- Configure the standard fragmenter -->
<fragmenter name="gap" class="org.apache.solr.highlight.GapFragmenter" default="true">
<lst name="defaults">
<int name="hl.fragsize">100</int>
</lst>
</fragmenter>
<fragmenter name="regex" class="org.apache.solr.highlight.RegexFragmenter">
<lst name="defaults">
<int name="hl.fragsize">70</int>
</lst>
</fragmenter>
<!-- Configure the standard formatter -->
<formatter name="html" class="org.apache.solr.highlight.HtmlFormatter" default="true">
<lst name="defaults">
<str name="hl.simple.pre"><![CDATA[<em>]]></str>
<str name="hl.simple.post"><![CDATA[</em>]]></str>
</lst>
</formatter>
</highlighting>
<!-- enable streaming for testing... --> <!-- enable streaming for testing... -->
<requestDispatcher handleSelect="true" > <requestDispatcher handleSelect="true">
<requestParsers enableRemoteStreaming="true" multipartUploadLimitInKB="2048" /> <requestParsers enableRemoteStreaming="true" multipartUploadLimitInKB="2048"/>
<httpCaching lastModifiedFrom="openTime" etagSeed="Solr" never304="false"> <httpCaching lastModifiedFrom="openTime" etagSeed="Solr" never304="false">
<cacheControl>max-age=30, public</cacheControl> <cacheControl>max-age=30, public</cacheControl>
</httpCaching> </httpCaching>
</requestDispatcher> </requestDispatcher>
<admin>
<defaultQuery>solr</defaultQuery>
<gettableFiles>solrconfig.xml scheam.xml admin-extra.html</gettableFiles>
</admin>
<!-- test getting system property -->
<propTest attr1="${solr.test.sys.prop1}-$${literal}"
attr2="${non.existent.sys.prop:default-from-config}">prefix-${solr.test.sys.prop2}-suffix</propTest>
<queryParser name="foo" class="FooQParserPlugin"/>
</config> </config>

View File

@ -0,0 +1,98 @@
<?xml version="1.0" ?>
<!--
Licensed to the Apache Software Foundation (ASF) under one or more
contributor license agreements. See the NOTICE file distributed with
this work for additional information regarding copyright ownership.
The ASF licenses this file to You under the Apache License, Version 2.0
(the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-->
<!-- $Id$
$Source$
$Name$
-->
<config>
<dataDir>${solr.data.dir:./solr/data}</dataDir>
<indexDefaults>
<useCompoundFile>false</useCompoundFile>
<mergeFactor>10</mergeFactor>
<ramBufferSizeMB>32</ramBufferSizeMB>
<maxMergeDocs>2147483647</maxMergeDocs>
<maxFieldLength>10000</maxFieldLength>
<writeLockTimeout>1000</writeLockTimeout>
<commitLockTimeout>10000</commitLockTimeout>
<luceneAutoCommit>false</luceneAutoCommit>
<mergeScheduler>org.apache.lucene.index.ConcurrentMergeScheduler</mergeScheduler>
<writeLockTimeout>1000</writeLockTimeout>
<commitLockTimeout>10000</commitLockTimeout>
<lockType>single</lockType>
</indexDefaults>
<mainIndex>
<useCompoundFile>false</useCompoundFile>
<mergeFactor>10</mergeFactor>
<ramBufferSizeMB>32</ramBufferSizeMB>
<maxMergeDocs>2147483647</maxMergeDocs>
<maxFieldLength>10000</maxFieldLength>
<unlockOnStartup>true</unlockOnStartup>
</mainIndex>
<updateHandler class="solr.DirectUpdateHandler2">
</updateHandler>
<requestHandler name="standard" class="solr.StandardRequestHandler">
<bool name="httpCaching">true</bool>
</requestHandler>
<requestHandler name="/replication" class="solr.ReplicationHandler">
<lst name="master">
<str name="replicateAfter">commit</str>
<str name="confFiles">schema-replication2.xml:schema.xml</str>
</lst>
</requestHandler>
<!-- test query parameter defaults -->
<requestHandler name="defaults" class="solr.StandardRequestHandler">
<lst name="defaults">
<int name="rows">4</int>
<bool name="hl">true</bool>
<str name="hl.fl">text,name,subject,title,whitetok</str>
</lst>
</requestHandler>
<!-- test query parameter defaults -->
<requestHandler name="lazy" class="solr.StandardRequestHandler" startup="lazy">
<lst name="defaults">
<int name="rows">4</int>
<bool name="hl">true</bool>
<str name="hl.fl">text,name,subject,title,whitetok</str>
</lst>
</requestHandler>
<requestHandler name="/update" class="solr.XmlUpdateRequestHandler"/>
<!-- enable streaming for testing... -->
<requestDispatcher handleSelect="true">
<requestParsers enableRemoteStreaming="true" multipartUploadLimitInKB="2048"/>
<httpCaching lastModifiedFrom="openTime" etagSeed="Solr" never304="false">
<cacheControl>max-age=30, public</cacheControl>
</httpCaching>
</requestDispatcher>
</config>

View File

@ -24,61 +24,17 @@
<config> <config>
<jmx />
<!-- Used to specify an alternate directory to hold all index data.
It defaults to "index" if not present, and should probably
not be changed if replication is in use. -->
<dataDir>${solr.data.dir:./solr/data}</dataDir> <dataDir>${solr.data.dir:./solr/data}</dataDir>
<indexDefaults> <indexDefaults>
<!-- Values here affect all index writers and act as a default
unless overridden. -->
<!-- Values here affect all index writers and act as a default unless overridden. -->
<useCompoundFile>false</useCompoundFile> <useCompoundFile>false</useCompoundFile>
<mergeFactor>10</mergeFactor> <mergeFactor>10</mergeFactor>
<!-- If both ramBufferSizeMB and maxBufferedDocs is set, then Lucene will flush based on whichever limit is hit first.
-->
<!--<maxBufferedDocs>1000</maxBufferedDocs>-->
<!-- Tell Lucene when to flush documents to disk.
Giving Lucene more memory for indexing means faster indexing at the cost of more RAM
If both ramBufferSizeMB and maxBufferedDocs is set, then Lucene will flush based on whichever limit is hit first.
-->
<ramBufferSizeMB>32</ramBufferSizeMB> <ramBufferSizeMB>32</ramBufferSizeMB>
<maxMergeDocs>2147483647</maxMergeDocs> <maxMergeDocs>2147483647</maxMergeDocs>
<maxFieldLength>10000</maxFieldLength> <maxFieldLength>10000</maxFieldLength>
<writeLockTimeout>1000</writeLockTimeout> <writeLockTimeout>1000</writeLockTimeout>
<commitLockTimeout>10000</commitLockTimeout> <commitLockTimeout>10000</commitLockTimeout>
<!--
Expert: Turn on Lucene's auto commit capability.
NOTE: Despite the name, this value does not have any relation to Solr's autoCommit functionality
-->
<luceneAutoCommit>false</luceneAutoCommit>
<!--
Expert:
The Merge Policy in Lucene controls how merging is handled by Lucene. The default in 2.3 is the LogByteSizeMergePolicy, previous
versions used LogDocMergePolicy.
LogByteSizeMergePolicy chooses segments to merge based on their size. The Lucene 2.2 default, LogDocMergePolicy chose when
to merge based on number of documents
Other implementations of MergePolicy must have a no-argument constructor
-->
<mergePolicy>org.apache.lucene.index.LogByteSizeMergePolicy</mergePolicy>
<!--
Expert:
The Merge Scheduler in Lucene controls how merges are performed. The ConcurrentMergeScheduler (Lucene 2.3 default)
can perform merges in the background using separate threads. The SerialMergeScheduler (Lucene 2.2 default) does not.
-->
<mergeScheduler>org.apache.lucene.index.ConcurrentMergeScheduler</mergeScheduler>
<!-- these are global... can't currently override per index -->
<writeLockTimeout>1000</writeLockTimeout> <writeLockTimeout>1000</writeLockTimeout>
<commitLockTimeout>10000</commitLockTimeout> <commitLockTimeout>10000</commitLockTimeout>
@ -86,7 +42,6 @@
</indexDefaults> </indexDefaults>
<mainIndex> <mainIndex>
<!-- lucene options specific to the main on-disk lucene index -->
<useCompoundFile>false</useCompoundFile> <useCompoundFile>false</useCompoundFile>
<mergeFactor>10</mergeFactor> <mergeFactor>10</mergeFactor>
<ramBufferSizeMB>32</ramBufferSizeMB> <ramBufferSizeMB>32</ramBufferSizeMB>
@ -97,193 +52,11 @@
</mainIndex> </mainIndex>
<updateHandler class="solr.DirectUpdateHandler2"> <updateHandler class="solr.DirectUpdateHandler2">
<!-- autocommit pending docs if certain criteria are met
<autoCommit>
<maxDocs>10000</maxDocs>
<maxTime>3600000</maxTime>
</autoCommit>
-->
<!-- represents a lower bound on the frequency that commits may
occur (in seconds). NOTE: not yet implemented
<commitIntervalLowerBound>0</commitIntervalLowerBound>
-->
<!-- The RunExecutableListener executes an external command.
exe - the name of the executable to run
dir - dir to use as the current working directory. default="."
wait - the calling thread waits until the executable returns. default="true"
args - the arguments to pass to the program. default=nothing
env - environment variables to set. default=nothing
-->
<!-- A postCommit event is fired after every commit
<listener event="postCommit" class="solr.RunExecutableListener">
<str name="exe">/var/opt/resin3/__PORT__/scripts/solr/snapshooter</str>
<str name="dir">/var/opt/resin3/__PORT__</str>
<bool name="wait">true</bool>
<arr name="args"> <str>arg1</str> <str>arg2</str> </arr>
<arr name="env"> <str>MYVAR=val1</str> </arr>
</listener>
-->
</updateHandler> </updateHandler>
<query>
<!-- Maximum number of clauses in a boolean query... can affect
range or wildcard queries that expand to big boolean
queries. An exception is thrown if exceeded.
-->
<maxBooleanClauses>1024</maxBooleanClauses>
<!-- Cache specification for Filters or DocSets - unordered set of *all* documents
that match a particular query.
-->
<filterCache
class="solr.search.LRUCache"
size="512"
initialSize="512"
autowarmCount="256"/>
<queryResultCache
class="solr.search.LRUCache"
size="512"
initialSize="512"
autowarmCount="1024"/>
<documentCache
class="solr.search.LRUCache"
size="512"
initialSize="512"
autowarmCount="0"/>
<!-- If true, stored fields that are not requested will be loaded lazily.
-->
<enableLazyFieldLoading>true</enableLazyFieldLoading>
<!--
<cache name="myUserCache"
class="solr.search.LRUCache"
size="4096"
initialSize="1024"
autowarmCount="1024"
regenerator="MyRegenerator"
/>
-->
<useFilterForSortedQuery>true</useFilterForSortedQuery>
<queryResultWindowSize>10</queryResultWindowSize>
<!-- set maxSize artificially low to exercise both types of sets -->
<HashDocSet maxSize="3" loadFactor="0.75"/>
<!-- boolToFilterOptimizer converts boolean clauses with zero boost
into cached filters if the number of docs selected by the clause exceeds
the threshold (represented as a fraction of the total index)
-->
<boolTofilterOptimizer enabled="false" cacheSize="32" threshold=".05"/>
<!-- a newSearcher event is fired whenever a new searcher is being prepared
and there is a current searcher handling requests (aka registered). -->
<!-- QuerySenderListener takes an array of NamedList and executes a
local query request for each NamedList in sequence. -->
<!--
<listener event="newSearcher" class="solr.QuerySenderListener">
<arr name="queries">
<lst> <str name="q">solr</str> <str name="start">0</str> <str name="rows">10</str> </lst>
<lst> <str name="q">rocks</str> <str name="start">0</str> <str name="rows">10</str> </lst>
</arr>
</listener>
-->
<!-- a firstSearcher event is fired whenever a new searcher is being
prepared but there is no current registered searcher to handle
requests or to gain prewarming data from. -->
<!--
<listener event="firstSearcher" class="solr.QuerySenderListener">
<arr name="queries">
<lst> <str name="q">fast_warm</str> <str name="start">0</str> <str name="rows">10</str> </lst>
</arr>
</listener>
-->
</query>
<!-- An alternate set representation that uses an integer hash to store filters (sets of docids).
If the set cardinality <= maxSize elements, then HashDocSet will be used instead of the bitset
based HashBitset. -->
<!-- requestHandler plugins... incoming queries will be dispatched to the
correct handler based on the qt (query type) param matching the
name of registered handlers.
The "standard" request handler is the default and will be used if qt
is not specified in the request.
-->
<requestHandler name="standard" class="solr.StandardRequestHandler"> <requestHandler name="standard" class="solr.StandardRequestHandler">
<bool name="httpCaching">true</bool> <bool name="httpCaching">true</bool>
</requestHandler> </requestHandler>
<requestHandler name="dismaxOldStyleDefaults"
class="solr.DisMaxRequestHandler" >
<!-- for historic reasons, DisMaxRequestHandler will use all of
it's init params as "defaults" if there is no "defaults" list
specified
-->
<float name="tie">0.01</float>
<str name="qf">
text^0.5 features_t^1.0 subject^1.4 title_stemmed^2.0
</str>
<str name="pf">
text^0.2 features_t^1.1 subject^1.4 title_stemmed^2.0 title^1.5
</str>
<str name="bf">
ord(weight)^0.5 recip(rord(iind),1,1000,1000)^0.3
</str>
<str name="mm">
3&lt;-1 5&lt;-2 6&lt;90%
</str>
<int name="ps">100</int>
</requestHandler>
<requestHandler name="dismax" class="solr.DisMaxRequestHandler" >
<lst name="defaults">
<str name="q.alt">*:*</str>
<float name="tie">0.01</float>
<str name="qf">
text^0.5 features_t^1.0 subject^1.4 title_stemmed^2.0
</str>
<str name="pf">
text^0.2 features_t^1.1 subject^1.4 title_stemmed^2.0 title^1.5
</str>
<str name="bf">
ord(weight)^0.5 recip(rord(iind),1,1000,1000)^0.3
</str>
<str name="mm">
3&lt;-1 5&lt;-2 6&lt;90%
</str>
<int name="ps">100</int>
</lst>
</requestHandler>
<requestHandler name="old" class="solr.tst.OldRequestHandler" >
<int name="myparam">1000</int>
<float name="ratio">1.4142135</float>
<arr name="myarr"><int>1</int><int>2</int></arr>
<str>foo</str>
</requestHandler>
<requestHandler name="oldagain" class="solr.tst.OldRequestHandler" >
<lst name="lst1"> <str name="op">sqrt</str> <int name="val">2</int> </lst>
<lst name="lst2"> <str name="op">log</str> <float name="val">10</float> </lst>
</requestHandler>
<requestHandler name="test" class="solr.tst.TestRequestHandler" />
<!-- test query parameter defaults --> <!-- test query parameter defaults -->
<requestHandler name="defaults" class="solr.StandardRequestHandler"> <requestHandler name="defaults" class="solr.StandardRequestHandler">
@ -303,123 +76,22 @@
</lst> </lst>
</requestHandler> </requestHandler>
<requestHandler name="/update" class="solr.XmlUpdateRequestHandler" /> <requestHandler name="/update" class="solr.XmlUpdateRequestHandler"/>
<requestHandler name="/update/csv" class="solr.CSVRequestHandler" startup="lazy">
<bool name="httpCaching">false</bool>
</requestHandler>
<!-- test elevation --> <requestHandler name="/replication" class="solr.ReplicationHandler">
<!--searchComponent name="elevate" class="org.apache.solr.handler.component.QueryElevationComponent" >
<str name="queryFieldType">string</str>
<str name="config-file">elevate.xml</str>
</searchComponent-->
<requestHandler name="/elevate" class="org.apache.solr.handler.component.SearchHandler">
<lst name="defaults">
<str name="echoParams">explicit</str>
</lst>
<arr name="last-components">
<!--str>elevate</str-->
</arr>
</requestHandler>
<searchComponent name="spellcheck" class="org.apache.solr.handler.component.SpellCheckComponent">
<str name="queryAnalyzerFieldType">lowerfilt</str>
<lst name="spellchecker">
<str name="name">default</str>
<str name="field">lowerfilt</str>
<str name="spellcheckIndexDir">spellchecker1</str>
<str name="buildOnCommit">true</str>
</lst>
<!-- Example of using different distance measure -->
<lst name="spellchecker">
<str name="name">jarowinkler</str>
<str name="field">lowerfilt</str>
<!-- Use a different Distance Measure -->
<str name="distanceMeasure">org.apache.lucene.search.spell.JaroWinklerDistance</str>
<str name="spellcheckIndexDir">spellchecker2</str>
</lst>
<lst name="spellchecker">
<str name="classname">solr.FileBasedSpellChecker</str>
<str name="name">external</str>
<str name="sourceLocation">spellings.txt</str>
<str name="characterEncoding">UTF-8</str>
<str name="spellcheckIndexDir">spellchecker3</str>
</lst>
</searchComponent>
<!--
The SpellingQueryConverter to convert raw (CommonParams.Q) queries into tokens. Uses a simple regular expression
to strip off field markup, boosts, ranges, etc. but it is not guaranteed to match an exact parse from the query parser.
-->
<queryConverter name="queryConverter" class="org.apache.solr.spelling.SpellingQueryConverter"/>
<requestHandler name="/replication" class="solr.ReplicationHandler" >
<lst name="slave"> <lst name="slave">
<str name="masterUrl">http://localhost:9999/solr/replication</str> <str name="masterUrl">http://localhost:9999/solr/replication</str>
<str name="pollInterval">00:00:01</str> <str name="pollInterval">00:00:01</str>
</lst> </lst>
</requestHandler>
<requestHandler name="spellCheckCompRH" class="org.apache.solr.handler.component.SearchHandler">
<lst name="defaults">
<!-- omp = Only More Popular -->
<str name="spellcheck.onlyMorePopular">false</str>
<!-- exr = Extended Results -->
<str name="spellcheck.extendedResults">false</str>
<!-- The number of suggestions to return -->
<str name="spellcheck.count">1</str>
</lst>
<arr name="last-components">
<str>spellcheck</str>
</arr>
</requestHandler> </requestHandler>
<highlighting>
<!-- Configure the standard fragmenter -->
<fragmenter name="gap" class="org.apache.solr.highlight.GapFragmenter" default="true">
<lst name="defaults">
<int name="hl.fragsize">100</int>
</lst>
</fragmenter>
<fragmenter name="regex" class="org.apache.solr.highlight.RegexFragmenter">
<lst name="defaults">
<int name="hl.fragsize">70</int>
</lst>
</fragmenter>
<!-- Configure the standard formatter -->
<formatter name="html" class="org.apache.solr.highlight.HtmlFormatter" default="true">
<lst name="defaults">
<str name="hl.simple.pre"><![CDATA[<em>]]></str>
<str name="hl.simple.post"><![CDATA[</em>]]></str>
</lst>
</formatter>
</highlighting>
<!-- enable streaming for testing... --> <!-- enable streaming for testing... -->
<requestDispatcher handleSelect="true" > <requestDispatcher handleSelect="true">
<requestParsers enableRemoteStreaming="true" multipartUploadLimitInKB="2048" /> <requestParsers enableRemoteStreaming="true" multipartUploadLimitInKB="2048"/>
<httpCaching lastModifiedFrom="openTime" etagSeed="Solr" never304="false"> <httpCaching lastModifiedFrom="openTime" etagSeed="Solr" never304="false">
<cacheControl>max-age=30, public</cacheControl> <cacheControl>max-age=30, public</cacheControl>
</httpCaching> </httpCaching>
</requestDispatcher> </requestDispatcher>
<admin>
<defaultQuery>solr</defaultQuery>
<gettableFiles>solrconfig.xml scheam.xml admin-extra.html</gettableFiles>
</admin>
<!-- test getting system property -->
<propTest attr1="${solr.test.sys.prop1}-$${literal}"
attr2="${non.existent.sys.prop:default-from-config}">prefix-${solr.test.sys.prop2}-suffix</propTest>
<queryParser name="foo" class="FooQParserPlugin"/>
</config> </config>