SOLR-11556: fixing multiple backup repository support.

This commit is contained in:
Mikhail Khludnev 2019-07-18 10:50:26 -04:00
parent ebeae0c889
commit c173c2e863
6 changed files with 143 additions and 21 deletions

View File

@ -100,6 +100,9 @@ Bug Fixes
* SOLR-13206: Fix AIOOBE when group.facet is specified with group.query and return proper error code. (Marek, Munendra S N)
* SOLR-11556: Backup and restore command really supports picking one of a few repositories by
repository parameter (Timothy Potter via Mikhail Khludnev)
Other Changes
----------------------
@ -215,7 +218,7 @@ Improvements
* SOLR-13602 SOLR-13588: Add a field type for Estonian language to default managed_schema,
document about Estonian language analysis in Solr Ref Guide (Tomoko Uchida)
* SOLR-13003: Query Result Cache does not honour maxRamBytes parameter. (ab, Brian Ecker)
* SOLR-13003: Query Result Cache does not honor maxRamBytes parameter. (ab, Brian Ecker)
Bug Fixes
----------------------

View File

@ -1076,11 +1076,11 @@ public class CollectionsHandler extends RequestHandlerBase implements Permission
BACKUP_OP(BACKUP, (req, rsp, h) -> {
req.getParams().required().check(NAME, COLLECTION_PROP);
String extCollectionName = req.getParams().get(COLLECTION_PROP);
boolean followAliases = req.getParams().getBool(FOLLOW_ALIASES, false);
String collectionName = followAliases ? h.coreContainer.getZkController().getZkStateReader()
final String extCollectionName = req.getParams().get(COLLECTION_PROP);
final boolean followAliases = req.getParams().getBool(FOLLOW_ALIASES, false);
final String collectionName = followAliases ? h.coreContainer.getZkController().getZkStateReader()
.getAliases().resolveSimpleAlias(extCollectionName) : extCollectionName;
ClusterState clusterState = h.coreContainer.getZkController().getClusterState();
final ClusterState clusterState = h.coreContainer.getZkController().getClusterState();
if (!clusterState.hasCollection(collectionName)) {
throw new SolrException(ErrorCode.BAD_REQUEST, "Collection '" + collectionName + "' does not exist, no action taken.");
}
@ -1101,7 +1101,7 @@ public class CollectionsHandler extends RequestHandlerBase implements Permission
}
// Check if the specified location is valid for this repository.
URI uri = repository.createURI(location);
final URI uri = repository.createURI(location);
try {
if (!repository.exists(uri)) {
throw new SolrException(ErrorCode.SERVER_ERROR, "specified location " + uri + " does not exist.");
@ -1115,16 +1115,20 @@ public class CollectionsHandler extends RequestHandlerBase implements Permission
throw new SolrException(ErrorCode.BAD_REQUEST, "Unknown index backup strategy " + strategy);
}
Map<String, Object> params = copy(req.getParams(), null, NAME, COLLECTION_PROP, FOLLOW_ALIASES, CoreAdminParams.COMMIT_NAME);
final Map<String, Object> params = copy(req.getParams(), null, NAME, COLLECTION_PROP, FOLLOW_ALIASES, CoreAdminParams.COMMIT_NAME);
params.put(CoreAdminParams.BACKUP_LOCATION, location);
if (repo != null) {
params.put(CoreAdminParams.BACKUP_REPOSITORY, repo);
}
params.put(CollectionAdminParams.INDEX_BACKUP_STRATEGY, strategy);
return params;
}),
RESTORE_OP(RESTORE, (req, rsp, h) -> {
req.getParams().required().check(NAME, COLLECTION_PROP);
String collectionName = SolrIdentifierValidator.validateCollectionName(req.getParams().get(COLLECTION_PROP));
ClusterState clusterState = h.coreContainer.getZkController().getClusterState();
final String collectionName = SolrIdentifierValidator.validateCollectionName(req.getParams().get(COLLECTION_PROP));
final ClusterState clusterState = h.coreContainer.getZkController().getClusterState();
//We always want to restore into an collection name which doesn't exist yet.
if (clusterState.hasCollection(collectionName)) {
throw new SolrException(ErrorCode.BAD_REQUEST, "Collection '" + collectionName + "' exists, no action taken.");
@ -1133,9 +1137,9 @@ public class CollectionsHandler extends RequestHandlerBase implements Permission
throw new SolrException(ErrorCode.BAD_REQUEST, "Collection '" + collectionName + "' is an existing alias, no action taken.");
}
CoreContainer cc = h.coreContainer;
String repo = req.getParams().get(CoreAdminParams.BACKUP_REPOSITORY);
BackupRepository repository = cc.newBackupRepository(Optional.ofNullable(repo));
final CoreContainer cc = h.coreContainer;
final String repo = req.getParams().get(CoreAdminParams.BACKUP_REPOSITORY);
final BackupRepository repository = cc.newBackupRepository(Optional.ofNullable(repo));
String location = repository.getBackupLocation(req.getParams().get(CoreAdminParams.BACKUP_LOCATION));
if (location == null) {
@ -1149,7 +1153,7 @@ public class CollectionsHandler extends RequestHandlerBase implements Permission
}
// Check if the specified location is valid for this repository.
URI uri = repository.createURI(location);
final URI uri = repository.createURI(location);
try {
if (!repository.exists(uri)) {
throw new SolrException(ErrorCode.SERVER_ERROR, "specified location " + uri + " does not exist.");
@ -1158,7 +1162,7 @@ public class CollectionsHandler extends RequestHandlerBase implements Permission
throw new SolrException(ErrorCode.SERVER_ERROR, "Failed to check the existance of " + uri + ". Is it valid?", ex);
}
String createNodeArg = req.getParams().get(CREATE_NODE_SET);
final String createNodeArg = req.getParams().get(CREATE_NODE_SET);
if (CREATE_NODE_SET_EMPTY.equals(createNodeArg)) {
throw new SolrException(
SolrException.ErrorCode.BAD_REQUEST,
@ -1170,8 +1174,11 @@ public class CollectionsHandler extends RequestHandlerBase implements Permission
"Cannot set both replicationFactor and nrtReplicas as they mean the same thing");
}
Map<String, Object> params = copy(req.getParams(), null, NAME, COLLECTION_PROP);
final Map<String, Object> params = copy(req.getParams(), null, NAME, COLLECTION_PROP);
params.put(CoreAdminParams.BACKUP_LOCATION, location);
if (repo != null) {
params.put(CoreAdminParams.BACKUP_REPOSITORY, repo);
}
// from CREATE_OP:
copy(req.getParams(), params, COLL_CONF, REPLICATION_FACTOR, NRT_REPLICAS, TLOG_REPLICAS,
PULL_REPLICAS, MAX_SHARDS_PER_NODE, STATE_FORMAT, AUTO_ADD_REPLICAS, CREATE_NODE_SET, CREATE_NODE_SET_SHUFFLE);

View File

@ -67,7 +67,7 @@ public abstract class AbstractCloudBackupRestoreTestCase extends SolrCloudTestCa
int numPullReplicas;
private static long docsSeed; // see indexDocs()
private String testSuffix = "test1";
protected String testSuffix = "test1";
@BeforeClass
public static void createCluster() throws Exception {

View File

@ -93,6 +93,9 @@ public class TestHdfsCloudBackupRestore extends AbstractCloudBackupRestoreTestCa
" <str name=\"solr.hdfs.home\">${solr.hdfs.home:}</str>\n" +
" <str name=\"solr.hdfs.confdir\">${solr.hdfs.confdir:}</str>\n" +
" </repository>\n" +
" <repository name=\"poisioned\" default=\"true\" "
+ "class=\"org.apache.solr.cloud.api.collections.TestLocalFSCloudBackupRestore$PoinsionedRepository\"> \n" +
" </repository>\n" +
" </backup>\n" +
" \n" +
"</solr>\n";

View File

@ -16,9 +16,22 @@
*/
package org.apache.solr.cloud.api.collections;
import java.io.IOException;
import java.io.OutputStream;
import java.net.URI;
import org.apache.hadoop.fs.Path;
import org.apache.lucene.util.LuceneTestCase.AwaitsFix;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.IOContext;
import org.apache.lucene.store.IndexInput;
import org.apache.solr.client.solrj.SolrServerException;
import org.apache.solr.client.solrj.impl.CloudSolrClient;
import org.apache.solr.client.solrj.request.CollectionAdminRequest;
import org.apache.solr.cloud.MiniSolrCloudCluster;
import org.apache.solr.common.SolrException;
import org.apache.solr.common.SolrException.ErrorCode;
import org.apache.solr.common.cloud.ZkConfigManager;
import org.apache.solr.core.backup.repository.LocalFileSystemRepository;
import org.junit.BeforeClass;
import org.junit.Test;
@ -27,15 +40,28 @@ import org.junit.Test;
* Solr backup/restore still requires a "shared" file-system. Its just that in this case such file-system would be
* exposed via local file-system API.
*/
@AwaitsFix(bugUrl = "https://issues.apache.org/jira/browse/SOLR-12866")
public class TestLocalFSCloudBackupRestore extends AbstractCloudBackupRestoreTestCase {
private static String backupLocation;
@BeforeClass
public static void setupClass() throws Exception {
String solrXml = MiniSolrCloudCluster.DEFAULT_CLOUD_SOLR_XML;
String poisioned =
" <repository name=\""+TestLocalFSCloudBackupRestore.poisioned+"\" default=\"true\" "
+ "class=\"org.apache.solr.cloud.api.collections.TestLocalFSCloudBackupRestore$PoinsionedRepository\"> \n" +
" </repository>\n";
String local =
" <repository name=\"local\" "
+ "class=\"org.apache.solr.core.backup.repository.LocalFileSystemRepository\"> \n" +
" </repository>\n";
solrXml = solrXml.replace("</solr>",
"<backup>" + (random().nextBoolean() ? poisioned+local: local+poisioned)
+ "</backup>"+ "</solr>");
configureCluster(NUM_SHARDS)// nodes
.addConfig("conf1", TEST_PATH().resolve("configsets").resolve("cloud-minimal").resolve("conf"))
.addConfig("confFaulty", TEST_PATH().resolve("configsets").resolve("cloud-minimal").resolve("conf"))
.withSolrXml(solrXml)
.configure();
cluster.getZkClient().delete(ZkConfigManager.CONFIGS_ZKNODE + Path.SEPARATOR + "confFaulty" + Path.SEPARATOR + "solrconfig.xml", -1, true);
@ -54,7 +80,7 @@ public class TestLocalFSCloudBackupRestore extends AbstractCloudBackupRestoreTes
@Override
public String getBackupRepoName() {
return null;
return "local";
}
@Override
@ -63,8 +89,72 @@ public class TestLocalFSCloudBackupRestore extends AbstractCloudBackupRestoreTes
}
@Override
@Test
@Test
public void test() throws Exception {
super.test();
CloudSolrClient solrClient = cluster.getSolrClient();
errorBackup(solrClient);
erroRestore(solrClient);
}
private void erroRestore(CloudSolrClient solrClient) throws SolrServerException, IOException {
String backupName = BACKUPNAME_PREFIX + testSuffix;
CollectionAdminRequest.Restore restore = CollectionAdminRequest.restoreCollection(getCollectionName()+"boo", backupName)
.setLocation(backupLocation)
;
if (random().nextBoolean()) {
restore.setRepositoryName(poisioned);
}
try {
restore.process(solrClient);
fail("This request should have failed since omitting repo, picks up default poisioned.");
} catch (SolrException ex) {
assertEquals(ErrorCode.SERVER_ERROR.code, ex.code());
assertTrue(ex.getMessage(), ex.getMessage().contains(poisioned));
}
}
private void errorBackup(CloudSolrClient solrClient)
throws SolrServerException, IOException {
CollectionAdminRequest.Backup backup = CollectionAdminRequest.backupCollection(getCollectionName(), "poisionedbackup")
.setLocation(getBackupLocation());
if (random().nextBoolean()) {
backup.setRepositoryName(poisioned);
} // otherwise we hit default
try {
backup.process(solrClient);
fail("This request should have failed since omitting repo, picks up default poisioned.");
} catch (SolrException ex) {
assertEquals(ErrorCode.SERVER_ERROR.code, ex.code());
}
}
private static final String poisioned = "poisioned";
// let it go through collection handler, and break only when real thing is doing: Restore/BackupCore
public static class PoinsionedRepository extends LocalFileSystemRepository {
public PoinsionedRepository() {
super();
}
@Override
public void copyFileFrom(Directory sourceDir, String fileName, URI dest) throws IOException {
throw new UnsupportedOperationException(poisioned);
}
@Override
public void copyFileTo(URI sourceDir, String fileName, Directory dest) throws IOException {
throw new UnsupportedOperationException(poisioned);
}
@Override
public IndexInput openInput(URI dirPath, String fileName, IOContext ctx) throws IOException {
throw new UnsupportedOperationException(poisioned);
}
@Override
public OutputStream createOutput(URI path) throws IOException {
throw new UnsupportedOperationException(poisioned);
}
}
}

View File

@ -17,6 +17,8 @@
package org.apache.solr.core;
import java.io.File;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
@ -26,6 +28,7 @@ import org.apache.solr.common.SolrException;
import org.apache.solr.common.params.CoreAdminParams;
import org.apache.solr.core.backup.repository.BackupRepository;
import org.apache.solr.core.backup.repository.BackupRepositoryFactory;
import org.apache.solr.core.backup.repository.HdfsBackupRepository;
import org.apache.solr.core.backup.repository.LocalFileSystemRepository;
import org.apache.solr.schema.FieldType;
import org.junit.AfterClass;
@ -119,7 +122,7 @@ public class TestBackupRepositoryFactory extends SolrTestCaseJ4 {
@Test
public void testRepositoryConfig() {
PluginInfo[] plugins = new PluginInfo[1];
PluginInfo[] plugins = new PluginInfo[2];
{
Map<String, Object> attrs = new HashMap<>();
@ -129,6 +132,14 @@ public class TestBackupRepositoryFactory extends SolrTestCaseJ4 {
attrs.put("location", "/tmp");
plugins[0] = new PluginInfo("repository", attrs);
}
{
Map<String, Object> attrs = new HashMap<>();
attrs.put(CoreAdminParams.NAME, "boom");
attrs.put(FieldType.CLASS_NAME, HdfsBackupRepository.class.getName());
attrs.put("location", "/tmp");
plugins[1] = new PluginInfo("repository", attrs);
}
Collections.shuffle(Arrays.asList(plugins), random());
BackupRepositoryFactory f = new BackupRepositoryFactory(plugins);
@ -145,5 +156,13 @@ public class TestBackupRepositoryFactory extends SolrTestCaseJ4 {
assertTrue(repo instanceof LocalFileSystemRepository);
assertEquals("/tmp", repo.getConfigProperty("location"));
}
{
try {
BackupRepository repo = f.newInstance(loader, "boom");
fail();
} catch (Exception e) {
}
}
}
}