diff --git a/solr/CHANGES.txt b/solr/CHANGES.txt index bb0c3484b4c..05d59fc6546 100644 --- a/solr/CHANGES.txt +++ b/solr/CHANGES.txt @@ -46,6 +46,9 @@ New Features * SOLR-9194: Enhance the bin/solr script to perform file operations to/from Zookeeper (Erick Erickson, janhoy) +* SOLR-9242: Collection Backup/Restore now supports specifying the directory implementation to use + via the "repository" parameter. (Hrishikesh Gadre, Varun Thacker) + Bug Fixes ---------------------- diff --git a/solr/core/src/java/org/apache/solr/cloud/OverseerCollectionMessageHandler.java b/solr/core/src/java/org/apache/solr/cloud/OverseerCollectionMessageHandler.java index a0ac7322175..27a2824336e 100644 --- a/solr/core/src/java/org/apache/solr/cloud/OverseerCollectionMessageHandler.java +++ b/solr/core/src/java/org/apache/solr/cloud/OverseerCollectionMessageHandler.java @@ -17,13 +17,8 @@ package org.apache.solr.cloud; import java.io.IOException; -import java.io.Reader; -import java.io.Writer; import java.lang.invoke.MethodHandles; -import java.nio.charset.StandardCharsets; -import java.nio.file.Files; -import java.nio.file.Path; -import java.nio.file.Paths; +import java.net.URI; import java.time.Instant; import java.util.ArrayList; import java.util.Arrays; @@ -36,6 +31,7 @@ import java.util.LinkedHashMap; import java.util.List; import java.util.Locale; import java.util.Map; +import java.util.Optional; import java.util.Properties; import java.util.Random; import java.util.Set; @@ -84,6 +80,9 @@ import org.apache.solr.common.util.SimpleOrderedMap; import org.apache.solr.common.util.StrUtils; import org.apache.solr.common.util.SuppressForbidden; import org.apache.solr.common.util.Utils; +import org.apache.solr.core.CoreContainer; +import org.apache.solr.core.backup.BackupManager; +import org.apache.solr.core.backup.repository.BackupRepository; import org.apache.solr.handler.component.ShardHandler; import org.apache.solr.handler.component.ShardHandlerFactory; import org.apache.solr.handler.component.ShardRequest; @@ -2215,21 +2214,28 @@ public class OverseerCollectionMessageHandler implements OverseerMessageHandler private void processBackupAction(ZkNodeProps message, NamedList results) throws IOException, KeeperException, InterruptedException { String collectionName = message.getStr(COLLECTION_PROP); String backupName = message.getStr(NAME); - String location = message.getStr(ZkStateReader.BACKUP_LOCATION); ShardHandler shardHandler = shardHandlerFactory.getShardHandler(); String asyncId = message.getStr(ASYNC); + String repo = message.getStr(CoreAdminParams.BACKUP_REPOSITORY); + String location = message.getStr(CoreAdminParams.BACKUP_LOCATION); + Map requestMap = new HashMap<>(); Instant startTime = Instant.now(); - // note: we assume a shared files system to backup a collection, since a collection is distributed - Path backupPath = Paths.get(location).resolve(backupName).toAbsolutePath(); + CoreContainer cc = this.overseer.getZkController().getCoreContainer(); + BackupRepository repository = cc.newBackupRepository(Optional.ofNullable(repo)); + BackupManager backupMgr = new BackupManager(repository, zkStateReader, collectionName); + + // Backup location + URI backupPath = repository.createURI(location, backupName); //Validating if the directory already exists. - if (Files.exists(backupPath)) { - throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, - "Backup directory already exists: " + backupPath); + if (repository.exists(backupPath)) { + throw new SolrException(ErrorCode.BAD_REQUEST, "The backup directory already exists: " + backupPath); } - Files.createDirectory(backupPath); // create now + + // Create a directory to store backup details. + repository.createDirectory(backupPath); log.info("Starting backup of collection={} with backupName={} at location={}", collectionName, backupName, backupPath); @@ -2242,7 +2248,8 @@ public class OverseerCollectionMessageHandler implements OverseerMessageHandler ModifiableSolrParams params = new ModifiableSolrParams(); params.set(CoreAdminParams.ACTION, CoreAdminAction.BACKUPCORE.toString()); params.set(NAME, slice.getName()); - params.set("location", backupPath.toString()); // note: index dir will be here then the "snapshot." + slice name + params.set(CoreAdminParams.BACKUP_REPOSITORY, repo); + params.set(CoreAdminParams.BACKUP_LOCATION, backupPath.getPath()); // note: index dir will be here then the "snapshot." + slice name params.set(CORE_NAME_PROP, coreName); sendShardRequest(replica.getNodeName(), params, shardHandler, asyncId, requestMap); @@ -2256,29 +2263,24 @@ public class OverseerCollectionMessageHandler implements OverseerMessageHandler //Download the configs String configName = zkStateReader.readConfigName(collectionName); - Path zkBackup = backupPath.resolve("zk_backup"); - zkStateReader.getConfigManager().downloadConfigDir(configName, zkBackup.resolve("configs").resolve(configName)); + backupMgr.downloadConfigDir(location, backupName, configName); //Save the collection's state. Can be part of the monolithic clusterstate.json or a individual state.json //Since we don't want to distinguish we extract the state and back it up as a separate json - DocCollection collection = zkStateReader.getClusterState().getCollection(collectionName); - Files.write(zkBackup.resolve("collection_state.json"), - Utils.toJSON(Collections.singletonMap(collectionName, collection))); + DocCollection collectionState = zkStateReader.getClusterState().getCollection(collectionName); + backupMgr.writeCollectionState(location, backupName, collectionName, collectionState); - Path propertiesPath = backupPath.resolve("backup.properties"); Properties properties = new Properties(); - properties.put("backupName", backupName); - properties.put("collection", collectionName); - properties.put("collection.configName", configName); - properties.put("startTime", startTime.toString()); + properties.put(BackupManager.BACKUP_NAME_PROP, backupName); + properties.put(BackupManager.COLLECTION_NAME_PROP, collectionName); + properties.put(COLL_CONF, configName); + properties.put(BackupManager.START_TIME_PROP, startTime.toString()); //TODO: Add MD5 of the configset. If during restore the same name configset exists then we can compare checksums to see if they are the same. //if they are not the same then we can throw an error or have an 'overwriteConfig' flag //TODO save numDocs for the shardLeader. We can use it to sanity check the restore. - try (Writer os = Files.newBufferedWriter(propertiesPath, StandardCharsets.UTF_8)) { - properties.store(os, "Snapshot properties file"); - } + backupMgr.writeBackupProperties(location, backupName, properties); log.info("Completed backing up ZK data for backupName={}", backupName); } @@ -2287,26 +2289,21 @@ public class OverseerCollectionMessageHandler implements OverseerMessageHandler // TODO maybe we can inherit createCollection's options/code String restoreCollectionName = message.getStr(COLLECTION_PROP); String backupName = message.getStr(NAME); // of backup - String location = message.getStr(ZkStateReader.BACKUP_LOCATION); ShardHandler shardHandler = shardHandlerFactory.getShardHandler(); String asyncId = message.getStr(ASYNC); + String repo = message.getStr(CoreAdminParams.BACKUP_REPOSITORY); + String location = message.getStr(CoreAdminParams.BACKUP_LOCATION); Map requestMap = new HashMap<>(); - Path backupPath = Paths.get(location).resolve(backupName).toAbsolutePath(); - if (!Files.exists(backupPath)) { - throw new SolrException(ErrorCode.SERVER_ERROR, "Couldn't restore since doesn't exist: " + backupPath); - } - Path backupZkPath = backupPath.resolve("zk_backup"); + CoreContainer cc = this.overseer.getZkController().getCoreContainer(); + BackupRepository repository = cc.newBackupRepository(Optional.ofNullable(repo)); - Properties properties = new Properties(); - try (Reader in = Files.newBufferedReader(backupPath.resolve("backup.properties"), StandardCharsets.UTF_8)) { - properties.load(in); - } + URI backupPath = repository.createURI(location, backupName); + BackupManager backupMgr = new BackupManager(repository, zkStateReader, restoreCollectionName); - String backupCollection = (String) properties.get("collection"); - byte[] data = Files.readAllBytes(backupZkPath.resolve("collection_state.json")); - ClusterState backupClusterState = ClusterState.load(-1, data, Collections.emptySet()); - DocCollection backupCollectionState = backupClusterState.getCollection(backupCollection); + Properties properties = backupMgr.readBackupProperties(location, backupName); + String backupCollection = properties.getProperty(BackupManager.COLLECTION_NAME_PROP); + DocCollection backupCollectionState = backupMgr.readCollectionState(location, backupName, backupCollection); //Upload the configs String configName = (String) properties.get(COLL_CONF); @@ -2316,11 +2313,11 @@ public class OverseerCollectionMessageHandler implements OverseerMessageHandler //TODO add overwrite option? } else { log.info("Uploading config {}", restoreConfigName); - zkStateReader.getConfigManager().uploadConfigDir(backupZkPath.resolve("configs").resolve(configName), restoreConfigName); + backupMgr.uploadConfigDir(location, backupName, configName, restoreConfigName); } log.info("Starting restore into collection={} with backup_name={} at location={}", restoreCollectionName, backupName, - backupPath); + location); //Create core-less collection { @@ -2410,7 +2407,9 @@ public class OverseerCollectionMessageHandler implements OverseerMessageHandler ModifiableSolrParams params = new ModifiableSolrParams(); params.set(CoreAdminParams.ACTION, CoreAdminAction.RESTORECORE.toString()); params.set(NAME, "snapshot." + slice.getName()); - params.set("location", backupPath.toString()); + params.set(CoreAdminParams.BACKUP_LOCATION, backupPath.getPath()); + params.set(CoreAdminParams.BACKUP_REPOSITORY, repo); + sliceCmd(clusterState, params, null, slice, shardHandler, asyncId, requestMap); } processResponses(new NamedList(), shardHandler, true, "Could not restore core", asyncId, requestMap); diff --git a/solr/core/src/java/org/apache/solr/core/CoreContainer.java b/solr/core/src/java/org/apache/solr/core/CoreContainer.java index 422a7616119..a6d40664ee0 100644 --- a/solr/core/src/java/org/apache/solr/core/CoreContainer.java +++ b/solr/core/src/java/org/apache/solr/core/CoreContainer.java @@ -25,6 +25,7 @@ import java.util.Collection; import java.util.List; import java.util.Locale; import java.util.Map; +import java.util.Optional; import java.util.Properties; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ExecutionException; @@ -44,6 +45,7 @@ import org.apache.solr.common.cloud.ZkStateReader; import org.apache.solr.common.util.ExecutorUtil; import org.apache.solr.common.util.IOUtils; import org.apache.solr.common.util.Utils; +import org.apache.solr.core.backup.repository.BackupRepository; import org.apache.solr.core.backup.repository.BackupRepositoryFactory; import org.apache.solr.handler.RequestHandlerBase; import org.apache.solr.handler.admin.CollectionsHandler; @@ -149,8 +151,21 @@ public class CoreContainer { private BackupRepositoryFactory backupRepoFactory; - public BackupRepositoryFactory getBackupRepoFactory() { - return backupRepoFactory; + /** + * This method instantiates a new instance of {@linkplain BackupRepository}. + * + * @param repositoryName The name of the backup repository (Optional). + * If not specified, a default implementation is used. + * @return a new instance of {@linkplain BackupRepository}. + */ + public BackupRepository newBackupRepository(Optional repositoryName) { + BackupRepository repository; + if (repositoryName.isPresent()) { + repository = backupRepoFactory.newInstance(getResourceLoader(), repositoryName.get()); + } else { + repository = backupRepoFactory.newInstance(getResourceLoader()); + } + return repository; } public ExecutorService getCoreZkRegisterExecutorService() { diff --git a/solr/core/src/java/org/apache/solr/core/backup/BackupManager.java b/solr/core/src/java/org/apache/solr/core/backup/BackupManager.java new file mode 100644 index 00000000000..0575bff95ac --- /dev/null +++ b/solr/core/src/java/org/apache/solr/core/backup/BackupManager.java @@ -0,0 +1,250 @@ +package org.apache.solr.core.backup; + +import java.io.IOException; +import java.io.InputStreamReader; +import java.io.OutputStream; +import java.io.OutputStreamWriter; +import java.io.Reader; +import java.io.Writer; +import java.lang.invoke.MethodHandles; +import java.net.URI; +import java.nio.charset.StandardCharsets; +import java.util.Collections; +import java.util.List; +import java.util.Properties; + +import com.google.common.base.Preconditions; +import org.apache.lucene.store.IOContext; +import org.apache.lucene.store.IndexInput; +import org.apache.solr.common.SolrException; +import org.apache.solr.common.SolrException.ErrorCode; +import org.apache.solr.common.cloud.ClusterState; +import org.apache.solr.common.cloud.DocCollection; +import org.apache.solr.common.cloud.SolrZkClient; +import org.apache.solr.common.cloud.ZkConfigManager; +import org.apache.solr.common.cloud.ZkStateReader; +import org.apache.solr.common.util.Utils; +import org.apache.solr.core.backup.repository.BackupRepository; +import org.apache.solr.core.backup.repository.BackupRepository.PathType; +import org.apache.solr.util.PropertiesInputStream; +import org.apache.zookeeper.KeeperException; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * This class implements functionality to create a backup with extension points provided to integrate with different + * types of file-systems. + */ +public class BackupManager { + private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass()); + public static final String COLLECTION_PROPS_FILE = "collection_state.json"; + public static final String BACKUP_PROPS_FILE = "backup.properties"; + public static final String ZK_STATE_DIR = "zk_backup"; + public static final String CONFIG_STATE_DIR = "configs"; + + // Backup properties + public static final String COLLECTION_NAME_PROP = "collection"; + public static final String BACKUP_NAME_PROP = "backupName"; + public static final String INDEX_VERSION_PROP = "index.version"; + public static final String START_TIME_PROP = "startTime"; + + protected final ZkStateReader zkStateReader; + protected final BackupRepository repository; + + public BackupManager(BackupRepository repository, ZkStateReader zkStateReader, String collectionName) { + this.repository = Preconditions.checkNotNull(repository); + this.zkStateReader = Preconditions.checkNotNull(zkStateReader); + } + + /** + * @return The version of this backup implementation. + */ + public final String getVersion() { + return "1.0"; + } + + /** + * This method returns the configuration parameters for the specified backup. + * + * @param backupLoc The base path used to store the backup data. + * @param backupId The unique name for the backup whose configuration params are required. + * @return the configuration parameters for the specified backup. + * @throws IOException In case of errors. + */ + public Properties readBackupProperties(String backupLoc, String backupId) throws IOException { + Preconditions.checkNotNull(backupLoc); + Preconditions.checkNotNull(backupId); + + // Backup location + URI backupPath = repository.createURI(backupLoc, backupId); + if (!repository.exists(backupPath)) { + throw new SolrException(ErrorCode.SERVER_ERROR, "Couldn't restore since doesn't exist: " + backupPath); + } + + Properties props = new Properties(); + try (Reader is = new InputStreamReader(new PropertiesInputStream( + repository.openInput(backupPath, BACKUP_PROPS_FILE, IOContext.DEFAULT)), StandardCharsets.UTF_8)) { + props.load(is); + return props; + } + } + + /** + * This method stores the backup properties at the specified location in the repository. + * + * @param backupLoc The base path used to store the backup data. + * @param backupId The unique name for the backup whose configuration params are required. + * @param props The backup properties + * @throws IOException in case of I/O error + */ + public void writeBackupProperties(String backupLoc, String backupId, Properties props) throws IOException { + URI dest = repository.createURI(backupLoc, backupId, BACKUP_PROPS_FILE); + try (Writer propsWriter = new OutputStreamWriter(repository.createOutput(dest), StandardCharsets.UTF_8)) { + props.store(propsWriter, "Backup properties file"); + } + } + + /** + * This method reads the meta-data information for the backed-up collection. + * + * @param backupLoc The base path used to store the backup data. + * @param backupId The unique name for the backup. + * @return the meta-data information for the backed-up collection. + * @throws IOException in case of errors. + */ + public DocCollection readCollectionState(String backupLoc, String backupId, String collectionName) throws IOException { + Preconditions.checkNotNull(collectionName); + + URI zkStateDir = repository.createURI(backupLoc, backupId, ZK_STATE_DIR); + try (IndexInput is = repository.openInput(zkStateDir, COLLECTION_PROPS_FILE, IOContext.DEFAULT)) { + byte[] arr = new byte[(int) is.length()]; // probably ok since the json file should be small. + is.readBytes(arr, 0, (int) is.length()); + ClusterState c_state = ClusterState.load(-1, arr, Collections.emptySet()); + return c_state.getCollection(collectionName); + } + } + + /** + * This method writes the collection meta-data to the specified location in the repository. + * + * @param backupLoc The base path used to store the backup data. + * @param backupId The unique name for the backup. + * @param collectionName The name of the collection whose meta-data is being stored. + * @param collectionState The collection meta-data to be stored. + * @throws IOException in case of I/O errors. + */ + public void writeCollectionState(String backupLoc, String backupId, String collectionName, + DocCollection collectionState) throws IOException { + URI dest = repository.createURI(backupLoc, backupId, ZK_STATE_DIR, COLLECTION_PROPS_FILE); + try (OutputStream collectionStateOs = repository.createOutput(dest)) { + collectionStateOs.write(Utils.toJSON(Collections.singletonMap(collectionName, collectionState))); + } + } + + /** + * This method uploads the Solr configuration files to the desired location in Zookeeper. + * + * @param backupLoc The base path used to store the backup data. + * @param backupId The unique name for the backup. + * @param sourceConfigName The name of the config to be copied + * @param targetConfigName The name of the config to be created. + * @throws IOException in case of I/O errors. + */ + public void uploadConfigDir(String backupLoc, String backupId, String sourceConfigName, String targetConfigName) + throws IOException { + URI source = repository.createURI(backupLoc, backupId, ZK_STATE_DIR, CONFIG_STATE_DIR, sourceConfigName); + String zkPath = ZkConfigManager.CONFIGS_ZKNODE + "/" + targetConfigName; + uploadToZk(zkStateReader.getZkClient(), source, zkPath); + } + + /** + * This method stores the contents of a specified Solr config at the specified location in repository. + * + * @param backupLoc The base path used to store the backup data. + * @param backupId The unique name for the backup. + * @param configName The name of the config to be saved. + * @throws IOException in case of I/O errors. + */ + public void downloadConfigDir(String backupLoc, String backupId, String configName) throws IOException { + URI dest = repository.createURI(backupLoc, backupId, ZK_STATE_DIR, CONFIG_STATE_DIR, configName); + repository.createDirectory(repository.createURI(backupLoc, backupId, ZK_STATE_DIR)); + repository.createDirectory(repository.createURI(backupLoc, backupId, ZK_STATE_DIR, CONFIG_STATE_DIR)); + repository.createDirectory(dest); + + downloadFromZK(zkStateReader.getZkClient(), ZkConfigManager.CONFIGS_ZKNODE + "/" + configName, dest); + } + + private void downloadFromZK(SolrZkClient zkClient, String zkPath, URI dir) throws IOException { + try { + if (!repository.exists(dir)) { + repository.createDirectory(dir); + } + List files = zkClient.getChildren(zkPath, null, true); + for (String file : files) { + List children = zkClient.getChildren(zkPath + "/" + file, null, true); + if (children.size() == 0) { + log.info("Writing file {}", file); + byte[] data = zkClient.getData(zkPath + "/" + file, null, null, true); + try (OutputStream os = repository.createOutput(repository.createURI(dir.getPath(), file))) { + os.write(data); + } + } else { + downloadFromZK(zkClient, zkPath + "/" + file, repository.createURI(dir.getPath(), file)); + } + } + } catch (KeeperException | InterruptedException e) { + throw new IOException("Error downloading files from zookeeper path " + zkPath + " to " + dir.toString(), + SolrZkClient.checkInterrupted(e)); + } + } + + private void uploadToZk(SolrZkClient zkClient, URI sourceDir, String destZkPath) throws IOException { + Preconditions.checkArgument(repository.exists(sourceDir), "Path {} does not exist", sourceDir); + Preconditions.checkArgument(repository.getPathType(sourceDir) == PathType.DIRECTORY, + "Path {} is not a directory", sourceDir); + + for (String file : repository.listAll(sourceDir)) { + String zkNodePath = destZkPath + "/" + file; + URI path = repository.createURI(sourceDir.getPath(), file); + PathType t = repository.getPathType(path); + switch (t) { + case FILE: { + try (IndexInput is = repository.openInput(sourceDir, file, IOContext.DEFAULT)) { + byte[] arr = new byte[(int) is.length()]; // probably ok since the config file should be small. + is.readBytes(arr, 0, (int) is.length()); + zkClient.makePath(zkNodePath, arr, true); + } catch (KeeperException | InterruptedException e) { + throw new IOException(e); + } + break; + } + + case DIRECTORY: { + if (!file.startsWith(".")) { + uploadToZk(zkClient, path, zkNodePath); + } + break; + } + default: + throw new IllegalStateException("Unknown path type " + t); + } + } + } +} diff --git a/solr/core/src/java/org/apache/solr/core/backup/package-info.java b/solr/core/src/java/org/apache/solr/core/backup/package-info.java new file mode 100644 index 00000000000..defcad6b55c --- /dev/null +++ b/solr/core/src/java/org/apache/solr/core/backup/package-info.java @@ -0,0 +1,22 @@ +/* +* Licensed to the Apache Software Foundation (ASF) under one or more +* contributor license agreements. See the NOTICE file distributed with +* this work for additional information regarding copyright ownership. +* The ASF licenses this file to You under the Apache License, Version 2.0 +* (the "License"); you may not use this file except in compliance with +* the License. You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. +*/ + + +/** + * Core classes for Solr's Backup/Restore functionality + */ +package org.apache.solr.core.backup; \ No newline at end of file diff --git a/solr/core/src/java/org/apache/solr/core/backup/repository/BackupRepository.java b/solr/core/src/java/org/apache/solr/core/backup/repository/BackupRepository.java index f209b874a5f..20d8628a50c 100644 --- a/solr/core/src/java/org/apache/solr/core/backup/repository/BackupRepository.java +++ b/solr/core/src/java/org/apache/solr/core/backup/repository/BackupRepository.java @@ -21,20 +21,18 @@ import java.io.Closeable; import java.io.IOException; import java.io.OutputStream; import java.net.URI; +import java.util.Optional; + import org.apache.lucene.store.Directory; import org.apache.lucene.store.IOContext; import org.apache.lucene.store.IndexInput; +import org.apache.solr.common.params.CoreAdminParams; import org.apache.solr.util.plugin.NamedListInitializedPlugin; /** * This interface defines the functionality required to backup/restore Solr indexes to an arbitrary storage system. */ public interface BackupRepository extends NamedListInitializedPlugin, Closeable { - /** - * A parameter to specify the name of the backup repository to be used. - */ - String REPOSITORY_PROPERTY_NAME = "repository"; - /** * This enumeration defines the type of a given path. @@ -43,6 +41,17 @@ public interface BackupRepository extends NamedListInitializedPlugin, Closeable DIRECTORY, FILE } + /** + * This method returns the location where the backup should be stored (or restored from). + * + * @param override The location parameter supplied by the user. + * @return If override is not null then return the same value + * Otherwise return the default configuration value for the {@linkplain CoreAdminParams#BACKUP_LOCATION} parameter. + */ + default String getBackupLocation(String override) { + return Optional.ofNullable(override).orElse(getConfigProperty(CoreAdminParams.BACKUP_LOCATION)); + } + /** * This method returns the value of the specified configuration property. */ diff --git a/solr/core/src/java/org/apache/solr/handler/ReplicationHandler.java b/solr/core/src/java/org/apache/solr/handler/ReplicationHandler.java index 1893a7da52d..6e1b3a087a1 100644 --- a/solr/core/src/java/org/apache/solr/handler/ReplicationHandler.java +++ b/solr/core/src/java/org/apache/solr/handler/ReplicationHandler.java @@ -37,6 +37,7 @@ import java.util.Date; import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.Optional; import java.util.Properties; import java.util.Random; import java.util.concurrent.ExecutorService; @@ -67,8 +68,8 @@ import org.apache.lucene.store.IndexInput; import org.apache.lucene.store.RateLimiter; import org.apache.solr.common.SolrException; import org.apache.solr.common.SolrException.ErrorCode; -import org.apache.solr.common.cloud.ZkStateReader; import org.apache.solr.common.params.CommonParams; +import org.apache.solr.common.params.CoreAdminParams; import org.apache.solr.common.params.ModifiableSolrParams; import org.apache.solr.common.params.SolrParams; import org.apache.solr.common.util.ExecutorUtil; @@ -84,7 +85,6 @@ import org.apache.solr.core.IndexDeletionPolicyWrapper; import org.apache.solr.core.SolrCore; import org.apache.solr.core.SolrDeletionPolicy; import org.apache.solr.core.SolrEventListener; -import org.apache.solr.core.SolrResourceLoader; import org.apache.solr.core.backup.repository.BackupRepository; import org.apache.solr.core.backup.repository.LocalFileSystemRepository; import org.apache.solr.request.SolrQueryRequest; @@ -331,7 +331,7 @@ public class ReplicationHandler extends RequestHandlerBase implements SolrCoreAw throw new SolrException(ErrorCode.BAD_REQUEST, "Missing mandatory param: name"); } - SnapShooter snapShooter = new SnapShooter(core, params.get(LOCATION), params.get(NAME)); + SnapShooter snapShooter = new SnapShooter(core, params.get(CoreAdminParams.BACKUP_LOCATION), params.get(NAME)); snapShooter.validateDeleteSnapshot(); snapShooter.deleteSnapAsync(this); } @@ -412,19 +412,16 @@ public class ReplicationHandler extends RequestHandlerBase implements SolrCoreAw "for the same core"); } String name = params.get(NAME); - String location = params.get(LOCATION); + String location = params.get(CoreAdminParams.BACKUP_LOCATION); - String repoName = params.get(BackupRepository.REPOSITORY_PROPERTY_NAME); + String repoName = params.get(CoreAdminParams.BACKUP_REPOSITORY); CoreContainer cc = core.getCoreDescriptor().getCoreContainer(); - SolrResourceLoader rl = cc.getResourceLoader(); BackupRepository repo = null; - if(repoName != null) { - repo = cc.getBackupRepoFactory().newInstance(rl, repoName); + if (repoName != null) { + repo = cc.newBackupRepository(Optional.of(repoName)); + location = repo.getBackupLocation(location); if (location == null) { - location = repo.getConfigProperty(ZkStateReader.BACKUP_LOCATION); - if(location == null) { - throw new IllegalArgumentException("location is required"); - } + throw new IllegalArgumentException("location is required"); } } else { repo = new LocalFileSystemRepository(); @@ -520,18 +517,15 @@ public class ReplicationHandler extends RequestHandlerBase implements SolrCoreAw indexCommit = req.getSearcher().getIndexReader().getIndexCommit(); } - String location = params.get(ZkStateReader.BACKUP_LOCATION); - String repoName = params.get(BackupRepository.REPOSITORY_PROPERTY_NAME); + String location = params.get(CoreAdminParams.BACKUP_LOCATION); + String repoName = params.get(CoreAdminParams.BACKUP_REPOSITORY); CoreContainer cc = core.getCoreDescriptor().getCoreContainer(); - SolrResourceLoader rl = cc.getResourceLoader(); BackupRepository repo = null; - if(repoName != null) { - repo = cc.getBackupRepoFactory().newInstance(rl, repoName); + if (repoName != null) { + repo = cc.newBackupRepository(Optional.of(repoName)); + location = repo.getBackupLocation(location); if (location == null) { - location = repo.getConfigProperty(ZkStateReader.BACKUP_LOCATION); - if(location == null) { - throw new IllegalArgumentException("location is required"); - } + throw new IllegalArgumentException("location is required"); } } else { repo = new LocalFileSystemRepository(); @@ -1645,8 +1639,6 @@ public class ReplicationHandler extends RequestHandlerBase implements SolrCoreAw } } - private static final String LOCATION = "location"; - private static final String SUCCESS = "success"; private static final String FAILED = "failed"; diff --git a/solr/core/src/java/org/apache/solr/handler/admin/CollectionsHandler.java b/solr/core/src/java/org/apache/solr/handler/admin/CollectionsHandler.java index 85c98c1bba1..97fbd2d181d 100644 --- a/solr/core/src/java/org/apache/solr/handler/admin/CollectionsHandler.java +++ b/solr/core/src/java/org/apache/solr/handler/admin/CollectionsHandler.java @@ -18,6 +18,7 @@ package org.apache.solr.handler.admin; import java.io.IOException; import java.lang.invoke.MethodHandles; +import java.net.URI; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; @@ -26,6 +27,7 @@ import java.util.LinkedHashMap; import java.util.List; import java.util.Locale; import java.util.Map; +import java.util.Optional; import java.util.Set; import java.util.concurrent.TimeUnit; @@ -75,6 +77,7 @@ import org.apache.solr.common.util.SimpleOrderedMap; import org.apache.solr.common.util.Utils; import org.apache.solr.core.CloudConfig; import org.apache.solr.core.CoreContainer; +import org.apache.solr.core.backup.repository.BackupRepository; import org.apache.solr.handler.RequestHandlerBase; import org.apache.solr.handler.component.ShardHandler; import org.apache.solr.request.SolrQueryRequest; @@ -807,15 +810,32 @@ public class CollectionsHandler extends RequestHandlerBase implements Permission throw new SolrException(ErrorCode.BAD_REQUEST, "Collection '" + collectionName + "' does not exist, no action taken."); } - String location = req.getParams().get(ZkStateReader.BACKUP_LOCATION); + CoreContainer cc = h.coreContainer; + String repo = req.getParams().get(CoreAdminParams.BACKUP_REPOSITORY); + BackupRepository repository = cc.newBackupRepository(Optional.ofNullable(repo)); + + String location = repository.getBackupLocation(req.getParams().get(CoreAdminParams.BACKUP_LOCATION)); if (location == null) { - location = h.coreContainer.getZkController().getZkStateReader().getClusterProperty("location", (String) null); + // Check if the location is specified in the cluster property. + location = h.coreContainer.getZkController().getZkStateReader().getClusterProperty(CoreAdminParams.BACKUP_LOCATION, null); + if (location == null) { + throw new SolrException(ErrorCode.BAD_REQUEST, "'location' is not specified as a query" + + " parameter or as a default repository property or as a cluster property."); + } } - if (location == null) { - throw new SolrException(ErrorCode.BAD_REQUEST, "'location' is not specified as a query parameter or set as a cluster property"); + + // Check if the specified location is valid for this repository. + URI uri = repository.createURI(location); + try { + if (!repository.exists(uri)) { + throw new SolrException(ErrorCode.SERVER_ERROR, "specified location " + uri + " does not exist."); + } + } catch (IOException ex) { + throw new SolrException(ErrorCode.SERVER_ERROR, "Failed to check the existance of " + uri + ". Is it valid?", ex); } + Map params = req.getParams().getAll(null, NAME, COLLECTION_PROP); - params.put("location", location); + params.put(CoreAdminParams.BACKUP_LOCATION, location); return params; } }, @@ -831,16 +851,32 @@ public class CollectionsHandler extends RequestHandlerBase implements Permission throw new SolrException(ErrorCode.BAD_REQUEST, "Collection '" + collectionName + "' exists, no action taken."); } - String location = req.getParams().get(ZkStateReader.BACKUP_LOCATION); + CoreContainer cc = h.coreContainer; + String repo = req.getParams().get(CoreAdminParams.BACKUP_REPOSITORY); + BackupRepository repository = cc.newBackupRepository(Optional.ofNullable(repo)); + + String location = repository.getBackupLocation(req.getParams().get(CoreAdminParams.BACKUP_LOCATION)); if (location == null) { - location = h.coreContainer.getZkController().getZkStateReader().getClusterProperty("location", (String) null); + // Check if the location is specified in the cluster property. + location = h.coreContainer.getZkController().getZkStateReader().getClusterProperty("location", null); + if (location == null) { + throw new SolrException(ErrorCode.BAD_REQUEST, "'location' is not specified as a query" + + " parameter or as a default repository property or as a cluster property."); + } } - if (location == null) { - throw new SolrException(ErrorCode.BAD_REQUEST, "'location' is not specified as a query parameter or set as a cluster property"); + + // Check if the specified location is valid for this repository. + URI uri = repository.createURI(location); + try { + if (!repository.exists(uri)) { + throw new SolrException(ErrorCode.SERVER_ERROR, "specified location " + uri + " does not exist."); + } + } catch (IOException ex) { + throw new SolrException(ErrorCode.SERVER_ERROR, "Failed to check the existance of " + uri + ". Is it valid?", ex); } Map params = req.getParams().getAll(null, NAME, COLLECTION_PROP); - params.put("location", location); + params.put(CoreAdminParams.BACKUP_LOCATION, location); // from CREATE_OP: req.getParams().getAll(params, COLL_CONF, REPLICATION_FACTOR, MAX_SHARDS_PER_NODE, STATE_FORMAT, AUTO_ADD_REPLICAS); copyPropertiesWithPrefix(req.getParams(), params, COLL_PROP_PREFIX); diff --git a/solr/core/src/java/org/apache/solr/handler/admin/CoreAdminOperation.java b/solr/core/src/java/org/apache/solr/handler/admin/CoreAdminOperation.java index 3c52beace86..bf892277d78 100644 --- a/solr/core/src/java/org/apache/solr/handler/admin/CoreAdminOperation.java +++ b/solr/core/src/java/org/apache/solr/handler/admin/CoreAdminOperation.java @@ -27,6 +27,7 @@ import java.util.List; import java.util.Locale; import java.util.Map; import java.util.Map.Entry; +import java.util.Optional; import java.util.Set; import java.util.concurrent.Future; @@ -40,6 +41,7 @@ import org.apache.solr.cloud.CloudDescriptor; import org.apache.solr.cloud.SyncStrategy; import org.apache.solr.cloud.ZkController; import org.apache.solr.common.SolrException; +import org.apache.solr.common.SolrException.ErrorCode; import org.apache.solr.common.cloud.ClusterState; import org.apache.solr.common.cloud.DocCollection; import org.apache.solr.common.cloud.DocRouter; @@ -858,21 +860,13 @@ enum CoreAdminOperation { throw new IllegalArgumentException(CoreAdminParams.NAME + " is required"); } - SolrResourceLoader loader = callInfo.handler.coreContainer.getResourceLoader(); - BackupRepository repository; - String repoName = params.get(BackupRepository.REPOSITORY_PROPERTY_NAME); - if(repoName != null) { - repository = callInfo.handler.coreContainer.getBackupRepoFactory().newInstance(loader, repoName); - } else { // Fetch the default. - repository = callInfo.handler.coreContainer.getBackupRepoFactory().newInstance(loader); - } + String repoName = params.get(CoreAdminParams.BACKUP_REPOSITORY); + BackupRepository repository = callInfo.handler.coreContainer.newBackupRepository(Optional.ofNullable(repoName)); - String location = params.get(ZkStateReader.BACKUP_LOCATION); - if (location == null) { - location = repository.getConfigProperty(ZkStateReader.BACKUP_LOCATION); - if (location == null) { - throw new IllegalArgumentException("location is required"); - } + String location = repository.getBackupLocation(params.get(CoreAdminParams.BACKUP_LOCATION)); + if(location == null) { + throw new SolrException(ErrorCode.BAD_REQUEST, "'location' is not specified as a query" + + " parameter or as a default repository property"); } try (SolrCore core = callInfo.handler.coreContainer.getCore(cname)) { @@ -912,21 +906,13 @@ enum CoreAdminOperation { throw new IllegalArgumentException(CoreAdminParams.NAME + " is required"); } - SolrResourceLoader loader = callInfo.handler.coreContainer.getResourceLoader(); - BackupRepository repository; - String repoName = params.get(BackupRepository.REPOSITORY_PROPERTY_NAME); - if(repoName != null) { - repository = callInfo.handler.coreContainer.getBackupRepoFactory().newInstance(loader, repoName); - } else { // Fetch the default. - repository = callInfo.handler.coreContainer.getBackupRepoFactory().newInstance(loader); - } + String repoName = params.get(CoreAdminParams.BACKUP_REPOSITORY); + BackupRepository repository = callInfo.handler.coreContainer.newBackupRepository(Optional.ofNullable(repoName)); - String location = params.get(ZkStateReader.BACKUP_LOCATION); - if (location == null) { - location = repository.getConfigProperty(ZkStateReader.BACKUP_LOCATION); - if (location == null) { - throw new IllegalArgumentException("location is required"); - } + String location = repository.getBackupLocation(params.get(CoreAdminParams.BACKUP_LOCATION)); + if(location == null) { + throw new SolrException(ErrorCode.BAD_REQUEST, "'location' is not specified as a query" + + " parameter or as a default repository property"); } try (SolrCore core = callInfo.handler.coreContainer.getCore(cname)) { diff --git a/solr/core/src/test/org/apache/solr/cloud/TestCloudBackupRestore.java b/solr/core/src/test/org/apache/solr/cloud/AbstractCloudBackupRestoreTestCase.java similarity index 70% rename from solr/core/src/test/org/apache/solr/cloud/TestCloudBackupRestore.java rename to solr/core/src/test/org/apache/solr/cloud/AbstractCloudBackupRestoreTestCase.java index 5e35616db68..96faf923922 100644 --- a/solr/core/src/test/org/apache/solr/cloud/TestCloudBackupRestore.java +++ b/solr/core/src/test/org/apache/solr/cloud/AbstractCloudBackupRestoreTestCase.java @@ -1,3 +1,35 @@ +package org.apache.solr.cloud; + +import java.io.IOException; +import java.lang.invoke.MethodHandles; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.Properties; +import java.util.Random; +import java.util.TreeMap; + +import org.apache.lucene.util.TestUtil; +import org.apache.solr.client.solrj.SolrQuery; +import org.apache.solr.client.solrj.SolrServerException; +import org.apache.solr.client.solrj.impl.CloudSolrClient; +import org.apache.solr.client.solrj.impl.HttpSolrClient.RemoteSolrException; +import org.apache.solr.client.solrj.request.CollectionAdminRequest; +import org.apache.solr.client.solrj.request.CollectionAdminRequest.ClusterProp; +import org.apache.solr.client.solrj.response.RequestStatusState; +import org.apache.solr.common.SolrException.ErrorCode; +import org.apache.solr.common.SolrInputDocument; +import org.apache.solr.common.cloud.DocCollection; +import org.apache.solr.common.cloud.ImplicitDocRouter; +import org.apache.solr.common.cloud.Slice; +import org.apache.solr.common.params.CoreAdminParams; +import org.junit.BeforeClass; +import org.junit.Test; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import static org.apache.solr.common.params.ShardParams._ROUTE_; + /* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with @@ -15,58 +47,43 @@ * limitations under the License. */ -package org.apache.solr.cloud; - -import java.io.IOException; -import java.lang.invoke.MethodHandles; -import java.util.ArrayList; -import java.util.List; -import java.util.Map; -import java.util.Properties; -import java.util.Random; -import java.util.TreeMap; - -import org.apache.lucene.util.TestUtil; -import org.apache.solr.client.solrj.SolrQuery; -import org.apache.solr.client.solrj.SolrServerException; -import org.apache.solr.client.solrj.impl.CloudSolrClient; -import org.apache.solr.client.solrj.request.CollectionAdminRequest; -import org.apache.solr.client.solrj.response.RequestStatusState; -import org.apache.solr.common.SolrInputDocument; -import org.apache.solr.common.cloud.DocCollection; -import org.apache.solr.common.cloud.ImplicitDocRouter; -import org.apache.solr.common.cloud.Slice; -import org.junit.BeforeClass; -import org.junit.Test; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import static org.apache.solr.common.params.ShardParams._ROUTE_; - -public class TestCloudBackupRestore extends SolrCloudTestCase { - +/** + * This class implements the logic required to test Solr cloud backup/restore capability. + */ +public abstract class AbstractCloudBackupRestoreTestCase extends SolrCloudTestCase { private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass()); - private static final int NUM_SHARDS = 2;//granted we sometimes shard split to get more + protected static final int NUM_SHARDS = 2;//granted we sometimes shard split to get more private static long docsSeed; // see indexDocs() @BeforeClass public static void createCluster() throws Exception { - configureCluster(2)// nodes - .addConfig("conf1", TEST_PATH().resolve("configsets").resolve("cloud-minimal").resolve("conf")) - .configure(); - docsSeed = random().nextLong(); } + /** + * @return The name of the collection to use. + */ + public abstract String getCollectionName(); + + /** + * @return The name of the backup repository to use. + */ + public abstract String getBackupRepoName(); + + /** + * @return The absolute path for the backup location. + * Could return null. + */ + public abstract String getBackupLocation(); + @Test public void test() throws Exception { - String collectionName = "backuprestore"; boolean isImplicit = random().nextBoolean(); int replFactor = TestUtil.nextInt(random(), 1, 2); CollectionAdminRequest.Create create = - CollectionAdminRequest.createCollection(collectionName, "conf1", NUM_SHARDS, replFactor); + CollectionAdminRequest.createCollection(getCollectionName(), "conf1", NUM_SHARDS, replFactor); if (NUM_SHARDS * replFactor > cluster.getJettySolrRunners().size() || random().nextBoolean()) { create.setMaxShardsPerNode(NUM_SHARDS);//just to assert it survives the restoration } @@ -90,24 +107,62 @@ public class TestCloudBackupRestore extends SolrCloudTestCase { CloudSolrClient solrClient = cluster.getSolrClient(); create.process(solrClient); - indexDocs(collectionName); + indexDocs(getCollectionName()); if (!isImplicit && random().nextBoolean()) { // shard split the first shard - int prevActiveSliceCount = getActiveSliceCount(collectionName); - CollectionAdminRequest.SplitShard splitShard = CollectionAdminRequest.splitShard(collectionName); + int prevActiveSliceCount = getActiveSliceCount(getCollectionName()); + CollectionAdminRequest.SplitShard splitShard = CollectionAdminRequest.splitShard(getCollectionName()); splitShard.setShardName("shard1"); splitShard.process(solrClient); // wait until we see one more active slice... - for (int i = 0; getActiveSliceCount(collectionName) != prevActiveSliceCount + 1; i++) { + for (int i = 0; getActiveSliceCount(getCollectionName()) != prevActiveSliceCount + 1; i++) { assertTrue(i < 30); Thread.sleep(500); } // issue a hard commit. Split shard does a soft commit which isn't good enough for the backup/snapshooter to see - solrClient.commit(collectionName); + solrClient.commit(getCollectionName()); } - testBackupAndRestore(collectionName); + testBackupAndRestore(getCollectionName()); + testInvalidPath(getCollectionName()); + } + + // This test verifies the system behavior when the backup location cluster property is configured with an invalid + // value for the specified repository (and the default backup location is not configured in solr.xml). + private void testInvalidPath(String collectionName) throws Exception { + // Execute this test only if the default backup location is NOT configured in solr.xml + if (getBackupLocation() == null) { + return; + } + + String backupName = "invalidbackuprequest"; + CloudSolrClient solrClient = cluster.getSolrClient(); + + ClusterProp req = CollectionAdminRequest.setClusterProperty(CoreAdminParams.BACKUP_LOCATION, "/location/does/not/exist"); + assertEquals(0, req.process(solrClient).getStatus()); + + // Do not specify the backup location. + CollectionAdminRequest.Backup backup = CollectionAdminRequest.backupCollection(collectionName, backupName) + .setRepositoryName(getBackupRepoName()); + try { + backup.process(solrClient); + fail("This request should have failed since the cluster property value for backup location property is invalid."); + } catch (SolrServerException ex) { + assertTrue(ex.getCause() instanceof RemoteSolrException); + assertEquals(ErrorCode.SERVER_ERROR.code, ((RemoteSolrException)ex.getCause()).code()); + } + + String restoreCollectionName = collectionName + "_invalidrequest"; + CollectionAdminRequest.Restore restore = CollectionAdminRequest.restoreCollection(restoreCollectionName, backupName) + .setRepositoryName(getBackupRepoName()); + try { + restore.process(solrClient); + fail("This request should have failed since the cluster property value for backup location property is invalid."); + } catch (SolrServerException ex) { + assertTrue(ex.getCause() instanceof RemoteSolrException); + assertEquals(ErrorCode.SERVER_ERROR.code, ((RemoteSolrException)ex.getCause()).code()); + } } private int getActiveSliceCount(String collectionName) { @@ -134,6 +189,7 @@ public class TestCloudBackupRestore extends SolrCloudTestCase { } private void testBackupAndRestore(String collectionName) throws Exception { + String backupLocation = getBackupLocation(); String backupName = "mytestbackup"; CloudSolrClient client = cluster.getSolrClient(); @@ -142,13 +198,11 @@ public class TestCloudBackupRestore extends SolrCloudTestCase { Map origShardToDocCount = getShardToDocCountMap(client, backupCollection); assert origShardToDocCount.isEmpty() == false; - String location = createTempDir().toFile().getAbsolutePath(); - log.info("Triggering Backup command"); { CollectionAdminRequest.Backup backup = CollectionAdminRequest.backupCollection(collectionName, backupName) - .setLocation(location); + .setLocation(backupLocation).setRepositoryName(getBackupRepoName()); if (random().nextBoolean()) { assertEquals(0, backup.process(client).getStatus()); } else { @@ -163,7 +217,8 @@ public class TestCloudBackupRestore extends SolrCloudTestCase { { CollectionAdminRequest.Restore restore = CollectionAdminRequest.restoreCollection(restoreCollectionName, backupName) - .setLocation(location); + .setLocation(backupLocation).setRepositoryName(getBackupRepoName()); + if (origShardToDocCount.size() > cluster.getJettySolrRunners().size()) { // may need to increase maxShardsPerNode (e.g. if it was shard split, then now we need more) restore.setMaxShardsPerNode(origShardToDocCount.size()); @@ -215,5 +270,4 @@ public class TestCloudBackupRestore extends SolrCloudTestCase { } return shardToDocCount; } - } diff --git a/solr/core/src/test/org/apache/solr/cloud/TestHdfsCloudBackupRestore.java b/solr/core/src/test/org/apache/solr/cloud/TestHdfsCloudBackupRestore.java new file mode 100644 index 00000000000..a09fc2f5692 --- /dev/null +++ b/solr/core/src/test/org/apache/solr/cloud/TestHdfsCloudBackupRestore.java @@ -0,0 +1,148 @@ +package org.apache.solr.cloud; + +import java.io.IOException; +import java.lang.invoke.MethodHandles; +import java.net.URI; +import java.net.URISyntaxException; + +import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters; +import org.apache.commons.io.IOUtils; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.hdfs.DistributedFileSystem; +import org.apache.hadoop.hdfs.MiniDFSCluster; +import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction; +import org.apache.solr.cloud.hdfs.HdfsTestUtil; +import org.apache.solr.util.BadHdfsThreadsFilter; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * This class implements the tests for HDFS integration for Solr backup/restore capability. + */ +@ThreadLeakFilters(defaultFilters = true, filters = { + BadHdfsThreadsFilter.class // hdfs currently leaks thread(s) +}) +public class TestHdfsCloudBackupRestore extends AbstractCloudBackupRestoreTestCase { + public static final String SOLR_XML = "\n" + + "\n" + + " ${shareSchema:false}\n" + + " ${configSetBaseDir:configsets}\n" + + " ${coreRootDirectory:.}\n" + + "\n" + + " \n" + + " ${urlScheme:}\n" + + " ${socketTimeout:90000}\n" + + " ${connTimeout:15000}\n" + + " \n" + + "\n" + + " \n" + + " 127.0.0.1\n" + + " ${hostPort:8983}\n" + + " ${hostContext:solr}\n" + + " ${solr.zkclienttimeout:30000}\n" + + " ${genericCoreNodeNames:true}\n" + + " 10000\n" + + " ${distribUpdateConnTimeout:45000}\n" + + " ${distribUpdateSoTimeout:340000}\n" + + " \n" + + " \n" + + " \n" + + " \n" + + " ${solr.hdfs.default.backup.path}\n" + + " ${solr.hdfs.home:}\n" + + " ${solr.hdfs.confdir:}\n" + + " \n" + + " \n" + + " \n" + + "\n"; + + private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass()); + private static MiniDFSCluster dfsCluster; + private static String hdfsUri; + private static FileSystem fs; + + @BeforeClass + public static void setupClass() throws Exception { + dfsCluster = HdfsTestUtil.setupClass(createTempDir().toFile().getAbsolutePath()); + hdfsUri = HdfsTestUtil.getURI(dfsCluster); + try { + URI uri = new URI(hdfsUri); + Configuration conf = HdfsTestUtil.getClientConfiguration(dfsCluster); + conf.setBoolean("fs.hdfs.impl.disable.cache", true); + fs = FileSystem.get(uri, conf); + + if (fs instanceof DistributedFileSystem) { + // Make sure dfs is not in safe mode + while (((DistributedFileSystem) fs).setSafeMode(SafeModeAction.SAFEMODE_GET, true)) { + log.warn("The NameNode is in SafeMode - Solr will wait 5 seconds and try again."); + try { + Thread.sleep(5000); + } catch (InterruptedException e) { + Thread.interrupted(); + // continue + } + } + } + + fs.mkdirs(new org.apache.hadoop.fs.Path("/backup")); + } catch (IOException | URISyntaxException e) { + throw new RuntimeException(e); + } + + System.setProperty("solr.hdfs.default.backup.path", "/backup"); + System.setProperty("solr.hdfs.home", hdfsUri + "/solr"); + useFactory("solr.StandardDirectoryFactory"); + + configureCluster(NUM_SHARDS)// nodes + .addConfig("conf1", TEST_PATH().resolve("configsets").resolve("cloud-minimal").resolve("conf")) + .withSolrXml(SOLR_XML) + .configure(); + } + + @AfterClass + public static void teardownClass() throws Exception { + System.clearProperty("solr.hdfs.home"); + System.clearProperty("solr.hdfs.default.backup.path"); + System.clearProperty("test.build.data"); + System.clearProperty("test.cache.data"); + IOUtils.closeQuietly(fs); + fs = null; + HdfsTestUtil.teardownClass(dfsCluster); + dfsCluster = null; + } + + @Override + public String getCollectionName() { + return "hdfsbackuprestore"; + } + + @Override + public String getBackupRepoName() { + return "hdfs"; + } + + @Override + public String getBackupLocation() { + return null; + } +} diff --git a/solr/core/src/test/org/apache/solr/cloud/TestLocalFSCloudBackupRestore.java b/solr/core/src/test/org/apache/solr/cloud/TestLocalFSCloudBackupRestore.java new file mode 100644 index 00000000000..6f3e2bc8db0 --- /dev/null +++ b/solr/core/src/test/org/apache/solr/cloud/TestLocalFSCloudBackupRestore.java @@ -0,0 +1,50 @@ +package org.apache.solr.cloud; + +import org.junit.BeforeClass; + +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * This class implements the tests for local file-system integration for Solr backup/restore capability. + * Note that the Solr backup/restore still requires a "shared" file-system. Its just that in this case + * such file-system would be exposed via local file-system API. + */ +public class TestLocalFSCloudBackupRestore extends AbstractCloudBackupRestoreTestCase { + + @BeforeClass + public static void setupClass() throws Exception { + configureCluster(NUM_SHARDS)// nodes + .addConfig("conf1", TEST_PATH().resolve("configsets").resolve("cloud-minimal").resolve("conf")) + .configure(); + } + + @Override + public String getCollectionName() { + return "backuprestore"; + } + + @Override + public String getBackupRepoName() { + return null; + } + + @Override + public String getBackupLocation() { + return createTempDir().toFile().getAbsolutePath(); + } +} diff --git a/solr/core/src/test/org/apache/solr/core/TestBackupRepositoryFactory.java b/solr/core/src/test/org/apache/solr/core/TestBackupRepositoryFactory.java index 81d3c40cf67..a03d4c4d7bf 100644 --- a/solr/core/src/test/org/apache/solr/core/TestBackupRepositoryFactory.java +++ b/solr/core/src/test/org/apache/solr/core/TestBackupRepositoryFactory.java @@ -21,9 +21,9 @@ import java.io.File; import java.util.HashMap; import java.util.Map; +import com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule; import org.apache.solr.SolrTestCaseJ4; import org.apache.solr.common.SolrException; -import org.apache.solr.common.cloud.ZkStateReader; import org.apache.solr.common.params.CoreAdminParams; import org.apache.solr.core.backup.repository.BackupRepository; import org.apache.solr.core.backup.repository.BackupRepositoryFactory; @@ -37,8 +37,6 @@ import org.junit.rules.ExpectedException; import org.junit.rules.RuleChain; import org.junit.rules.TestRule; -import com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule; - public class TestBackupRepositoryFactory extends SolrTestCaseJ4 { @Rule public TestRule solrTestRules = RuleChain.outerRule(new SystemPropertiesRestoreRule()); @@ -129,7 +127,7 @@ public class TestBackupRepositoryFactory extends SolrTestCaseJ4 { attrs.put(CoreAdminParams.NAME, "repo1"); attrs.put(FieldType.CLASS_NAME, LocalFileSystemRepository.class.getName()); attrs.put("default" , "true"); - attrs.put(ZkStateReader.BACKUP_LOCATION, "/tmp"); + attrs.put("location", "/tmp"); plugins[0] = new PluginInfo("repository", attrs); } @@ -139,14 +137,14 @@ public class TestBackupRepositoryFactory extends SolrTestCaseJ4 { BackupRepository repo = f.newInstance(loader); assertTrue(repo instanceof LocalFileSystemRepository); - assertEquals("/tmp", repo.getConfigProperty(ZkStateReader.BACKUP_LOCATION)); + assertEquals("/tmp", repo.getConfigProperty("location")); } { BackupRepository repo = f.newInstance(loader, "repo1"); assertTrue(repo instanceof LocalFileSystemRepository); - assertEquals("/tmp", repo.getConfigProperty(ZkStateReader.BACKUP_LOCATION)); + assertEquals("/tmp", repo.getConfigProperty("location")); } } } diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/request/CollectionAdminRequest.java b/solr/solrj/src/java/org/apache/solr/client/solrj/request/CollectionAdminRequest.java index 452c7a15584..7bc9e4fd931 100644 --- a/solr/solrj/src/java/org/apache/solr/client/solrj/request/CollectionAdminRequest.java +++ b/solr/solrj/src/java/org/apache/solr/client/solrj/request/CollectionAdminRequest.java @@ -18,6 +18,7 @@ package org.apache.solr.client.solrj.request; import java.io.IOException; import java.util.Collection; +import java.util.Optional; import java.util.Properties; import java.util.UUID; import java.util.concurrent.TimeUnit; @@ -595,6 +596,7 @@ public abstract class CollectionAdminRequest // BACKUP request public static class Backup extends AsyncCollectionSpecificAdminRequest { protected final String name; + protected Optional repositoryName; protected String location; public Backup(String collection, String name) { @@ -625,12 +627,24 @@ public abstract class CollectionAdminRequest return this; } + public Optional getRepositoryName() { + return repositoryName; + } + + public Backup setRepositoryName(String repositoryName) { + this.repositoryName = Optional.ofNullable(repositoryName); + return this; + } + @Override public SolrParams getParams() { ModifiableSolrParams params = (ModifiableSolrParams) super.getParams(); params.set(CoreAdminParams.COLLECTION, collection); params.set(CoreAdminParams.NAME, name); - params.set("location", location); //note: optional + params.set(CoreAdminParams.BACKUP_LOCATION, location); //note: optional + if (repositoryName.isPresent()) { + params.set(CoreAdminParams.BACKUP_REPOSITORY, repositoryName.get()); + } return params; } @@ -643,6 +657,7 @@ public abstract class CollectionAdminRequest // RESTORE request public static class Restore extends AsyncCollectionSpecificAdminRequest { protected final String backupName; + protected Optional repositoryName; protected String location; // in common with collection creation: @@ -678,6 +693,15 @@ public abstract class CollectionAdminRequest return this; } + public Optional getRepositoryName() { + return repositoryName; + } + + public Restore setRepositoryName(String repositoryName) { + this.repositoryName = Optional.ofNullable(repositoryName); + return this; + } + // Collection creation params in common: public Restore setConfigName(String config) { this.configName = config; return this; } public String getConfigName() { return configName; } @@ -703,7 +727,7 @@ public abstract class CollectionAdminRequest ModifiableSolrParams params = (ModifiableSolrParams) super.getParams(); params.set(CoreAdminParams.COLLECTION, collection); params.set(CoreAdminParams.NAME, backupName); - params.set("location", location); //note: optional + params.set(CoreAdminParams.BACKUP_LOCATION, location); //note: optional params.set("collection.configName", configName); //note: optional if (maxShardsPerNode != null) { params.set( "maxShardsPerNode", maxShardsPerNode); @@ -717,6 +741,10 @@ public abstract class CollectionAdminRequest if (properties != null) { addProperties(params, properties); } + if (repositoryName.isPresent()) { + params.set(CoreAdminParams.BACKUP_REPOSITORY, repositoryName.get()); + } + return params; } diff --git a/solr/solrj/src/java/org/apache/solr/common/cloud/ZkStateReader.java b/solr/solrj/src/java/org/apache/solr/common/cloud/ZkStateReader.java index 709eebe745a..f106b9c8612 100644 --- a/solr/solrj/src/java/org/apache/solr/common/cloud/ZkStateReader.java +++ b/solr/solrj/src/java/org/apache/solr/common/cloud/ZkStateReader.java @@ -44,6 +44,7 @@ import java.util.concurrent.atomic.AtomicReference; import org.apache.solr.common.Callable; import org.apache.solr.common.SolrException; import org.apache.solr.common.SolrException.ErrorCode; +import org.apache.solr.common.params.CoreAdminParams; import org.apache.solr.common.util.ExecutorUtil; import org.apache.solr.common.util.Pair; import org.apache.solr.common.util.Utils; @@ -106,7 +107,6 @@ public class ZkStateReader implements Closeable { public static final String URL_SCHEME = "urlScheme"; - public static final String BACKUP_LOCATION = "location"; /** A view of the current state of all collections; combines all the different state sources into a single view. */ protected volatile ClusterState clusterState; @@ -160,7 +160,7 @@ public class ZkStateReader implements Closeable { LEGACY_CLOUD, URL_SCHEME, AUTO_ADD_REPLICAS, - BACKUP_LOCATION, + CoreAdminParams.BACKUP_LOCATION, MAX_CORES_PER_NODE))); /** diff --git a/solr/solrj/src/java/org/apache/solr/common/params/CoreAdminParams.java b/solr/solrj/src/java/org/apache/solr/common/params/CoreAdminParams.java index 716dfee0c88..7455cbf1071 100644 --- a/solr/solrj/src/java/org/apache/solr/common/params/CoreAdminParams.java +++ b/solr/solrj/src/java/org/apache/solr/common/params/CoreAdminParams.java @@ -108,6 +108,16 @@ public abstract class CoreAdminParams // Node to create a replica on for ADDREPLICA at least. public static final String NODE = "node"; + /** + * A parameter to specify the name of the backup repository to be used. + */ + public static final String BACKUP_REPOSITORY = "repository"; + + /** + * A parameter to specify the location where the backup should be stored. + */ + public static final String BACKUP_LOCATION = "location"; + public enum CoreAdminAction { STATUS(true), UNLOAD,