mirror of
https://github.com/honeymoose/OpenSearch.git
synced 2025-03-24 17:09:48 +00:00
Snapshot/Restore: fix FSRepository location configuration
Closes #11068
This commit is contained in:
parent
14c1743f30
commit
dd41c68741
@ -488,6 +488,18 @@ As a consequence the `query` filter serves no purpose anymore and is deprecated.
|
||||
|
||||
=== Snapshot and Restore
|
||||
|
||||
Locations of file system repositories has to be now registered using `path.repo` setting. The `path.repo`
|
||||
setting can contain one or more repository locations:
|
||||
|
||||
[source,yaml]
|
||||
---------------
|
||||
path.repo: ["/mnt/daily", "/mnt/weekly"]
|
||||
---------------
|
||||
|
||||
If the file system repository location is specified as an absolute path it has to start with one of the locations
|
||||
specified in `path.repo`. If the location is specified as a relative path, it will be resolved against the first
|
||||
location specified in the `path.repo` setting.
|
||||
|
||||
The obsolete parameters `expand_wildcards_open` and `expand_wildcards_close` are no longer
|
||||
supported by the snapshot and restore operations. These parameters have been replaced by
|
||||
a single `expand_wildcards` parameter. See <<multi-index,the multi-index docs>> for more.
|
||||
|
@ -9,16 +9,14 @@ backends are available via officially supported repository plugins.
|
||||
=== Repositories
|
||||
|
||||
Before any snapshot or restore operation can be performed a snapshot repository should be registered in
|
||||
Elasticsearch. The following command registers a shared file system repository with the name `my_backup` that
|
||||
will use location `/mount/backups/my_backup` to store snapshots.
|
||||
Elasticsearch. The repository settings are repository-type specific. See below for details.
|
||||
|
||||
[source,js]
|
||||
-----------------------------------
|
||||
$ curl -XPUT 'http://localhost:9200/_snapshot/my_backup' -d '{
|
||||
"type": "fs",
|
||||
"type": "REPOSITORY TYPE",
|
||||
"settings": {
|
||||
"location": "/mount/backups/my_backup",
|
||||
"compress": true
|
||||
... repository specific settings ...
|
||||
}
|
||||
}'
|
||||
-----------------------------------
|
||||
@ -60,9 +58,48 @@ $ curl -XGET 'http://localhost:9200/_snapshot/_all'
|
||||
[float]
|
||||
===== Shared File System Repository
|
||||
|
||||
The shared file system repository (`"type": "fs"`) uses the shared file system to store snapshots. The path
|
||||
specified in the `location` parameter should point to the same location in the shared filesystem and be accessible
|
||||
on all data and master nodes. The following settings are supported:
|
||||
The shared file system repository (`"type": "fs"`) uses the shared file system to store snapshots. In order to register
|
||||
the shared file system repository it is necessary to mount the same shared filesystem to the same location on all
|
||||
master and data nodes. This location (or one of its parent directories) has to be registered in the `path.repo`
|
||||
setting on all master and data nodes.
|
||||
|
||||
Assuming that the shared filesystem is mounted to `/mount/backups/my_backup`, the following setting should be added to
|
||||
`elasticsearch.yml` file:
|
||||
|
||||
[source,yaml]
|
||||
--------------
|
||||
path.repo: ["/mount/backups", "/mount/longterm_backups"]
|
||||
--------------
|
||||
|
||||
After all nodes are restarted, the following command can be used to register the shared file system repository with
|
||||
the name `my_backup`:
|
||||
|
||||
[source,js]
|
||||
-----------------------------------
|
||||
$ curl -XPUT 'http://localhost:9200/_snapshot/my_backup' -d '{
|
||||
"type": "fs",
|
||||
"settings": {
|
||||
"location": "/mount/backups/my_backup",
|
||||
"compress": true
|
||||
}
|
||||
}'
|
||||
-----------------------------------
|
||||
|
||||
If the repository location is specified as a relative path this path will be resolved against the first path specified
|
||||
in `path.repo`:
|
||||
|
||||
[source,js]
|
||||
-----------------------------------
|
||||
$ curl -XPUT 'http://localhost:9200/_snapshot/my_backup' -d '{
|
||||
"type": "fs",
|
||||
"settings": {
|
||||
"location": "my_backup",
|
||||
"compress": true
|
||||
}
|
||||
}'
|
||||
-----------------------------------
|
||||
|
||||
The following settings are supported:
|
||||
|
||||
[horizontal]
|
||||
`location`:: Location of the snapshots. Mandatory.
|
||||
|
@ -16,6 +16,9 @@ on the node. Can hold multiple locations. | {path.home}/data| path.data
|
||||
| logs | Log files location. | {path.home}/logs | path.logs
|
||||
|
||||
| plugins | Plugin files location. Each plugin will be contained in a subdirectory. | {path.home}/plugins | path.plugins
|
||||
|
||||
| repo | Shared file system repository locations. Can hold multiple locations. A file system repository can be placed in to any subdirectory of any directory specified here. | empty | path.repo
|
||||
|
||||
|=======================================================================
|
||||
|
||||
Multiple `data` paths may be specified, in order to spread data across
|
||||
|
@ -70,6 +70,9 @@ class Security {
|
||||
for (Path path : environment.dataWithClusterFiles()) {
|
||||
addPath(policy, path, "read,readlink,write,delete");
|
||||
}
|
||||
for (Path path : environment.repoFiles()) {
|
||||
addPath(policy, path, "read,readlink,write,delete");
|
||||
}
|
||||
if (environment.pidFile() != null) {
|
||||
addPath(policy, environment.pidFile().getParent(), "read,readlink,write,delete");
|
||||
}
|
||||
|
@ -75,7 +75,22 @@ public final class PathUtils {
|
||||
return Paths.get(uri);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Tries to resolve the given path against the list of available roots.
|
||||
*
|
||||
* If path starts with one of the listed roots, it returned back by this method, otherwise null is returned.
|
||||
*/
|
||||
public static Path get(Path[] roots, String path) {
|
||||
for (Path root : roots) {
|
||||
Path normalizedPath = root.resolve(path).normalize();
|
||||
if(normalizedPath.startsWith(root)) {
|
||||
return normalizedPath;
|
||||
}
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the default FileSystem.
|
||||
*/
|
||||
|
@ -50,6 +50,8 @@ public class Environment {
|
||||
|
||||
private final Path[] dataWithClusterFiles;
|
||||
|
||||
private final Path[] repoFiles;
|
||||
|
||||
private final Path configFile;
|
||||
|
||||
private final Path pluginsFile;
|
||||
@ -110,7 +112,15 @@ public class Environment {
|
||||
dataFiles = new Path[]{homeFile.resolve("data")};
|
||||
dataWithClusterFiles = new Path[]{homeFile.resolve("data").resolve(ClusterName.clusterNameFromSettings(settings).value())};
|
||||
}
|
||||
|
||||
String[] repoPaths = settings.getAsArray("path.repo");
|
||||
if (repoPaths.length > 0) {
|
||||
repoFiles = new Path[repoPaths.length];
|
||||
for (int i = 0; i < repoPaths.length; i++) {
|
||||
repoFiles[i] = PathUtils.get(repoPaths[i]);
|
||||
}
|
||||
} else {
|
||||
repoFiles = new Path[0];
|
||||
}
|
||||
if (settings.get("path.logs") != null) {
|
||||
logsFile = PathUtils.get(cleanPath(settings.get("path.logs")));
|
||||
} else {
|
||||
@ -152,6 +162,22 @@ public class Environment {
|
||||
return dataWithClusterFiles;
|
||||
}
|
||||
|
||||
/**
|
||||
* The shared filesystem repo locations.
|
||||
*/
|
||||
public Path[] repoFiles() {
|
||||
return repoFiles;
|
||||
}
|
||||
|
||||
/**
|
||||
* Resolves the specified location against the list of configured repository roots
|
||||
*
|
||||
* If the specified location doesn't match any of the roots, returns null.
|
||||
*/
|
||||
public Path resolveRepoFile(String location) {
|
||||
return PathUtils.get(repoFiles, location);
|
||||
}
|
||||
|
||||
/**
|
||||
* The config location.
|
||||
*/
|
||||
|
@ -19,14 +19,12 @@
|
||||
|
||||
package org.elasticsearch.repositories.fs;
|
||||
|
||||
import org.elasticsearch.common.SuppressForbidden;
|
||||
import org.elasticsearch.common.blobstore.BlobPath;
|
||||
import org.elasticsearch.common.blobstore.BlobStore;
|
||||
import org.elasticsearch.common.blobstore.fs.FsBlobStore;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.io.FileSystemUtils;
|
||||
import org.elasticsearch.common.io.PathUtils;
|
||||
import org.elasticsearch.common.unit.ByteSizeValue;
|
||||
import org.elasticsearch.env.Environment;
|
||||
import org.elasticsearch.index.snapshots.IndexShardRepository;
|
||||
import org.elasticsearch.repositories.RepositoryException;
|
||||
import org.elasticsearch.repositories.RepositoryName;
|
||||
@ -68,17 +66,26 @@ public class FsRepository extends BlobStoreRepository {
|
||||
* @param indexShardRepository index shard repository
|
||||
* @throws IOException
|
||||
*/
|
||||
@Inject @SuppressForbidden(reason = "needs fixing: https://github.com/elastic/elasticsearch/issues/11068")
|
||||
public FsRepository(RepositoryName name, RepositorySettings repositorySettings, IndexShardRepository indexShardRepository) throws IOException {
|
||||
@Inject
|
||||
public FsRepository(RepositoryName name, RepositorySettings repositorySettings, IndexShardRepository indexShardRepository, Environment environment) throws IOException {
|
||||
super(name.getName(), repositorySettings, indexShardRepository);
|
||||
Path locationFile;
|
||||
String location = repositorySettings.settings().get("location", settings.get("repositories.fs.location"));
|
||||
if (location == null) {
|
||||
logger.warn("using local fs location for gateway, should be changed to be a shared location across nodes");
|
||||
logger.warn("the repository location is missing, it should point to a shared file system location that is available on all master and data nodes");
|
||||
throw new RepositoryException(name.name(), "missing location");
|
||||
} else {
|
||||
locationFile = PathUtils.get(location);
|
||||
}
|
||||
locationFile = environment.resolveRepoFile(location);
|
||||
if (locationFile == null) {
|
||||
if (environment.repoFiles().length > 0) {
|
||||
logger.warn("The specified location [{}] doesn't start with any repository paths specified by the path.repo setting: [{}] ", location, environment.repoFiles());
|
||||
throw new RepositoryException(name.name(), "location [" + location + "] doesn't match any of the locations specified by path.repo");
|
||||
} else {
|
||||
logger.warn("The specified location [{}] should start with a repository path specified by the path.repo setting, but the path.repo setting was not set on this node", location);
|
||||
throw new RepositoryException(name.name(), "location [" + location + "] doesn't match any of the locations specified by path.repo because this setting is empty");
|
||||
}
|
||||
}
|
||||
|
||||
blobStore = new FsBlobStore(settings, locationFile);
|
||||
this.chunkSize = repositorySettings.settings().getAsBytesSize("chunk_size", settings.getAsBytesSize("repositories.fs.chunk_size", null));
|
||||
this.compress = repositorySettings.settings().getAsBoolean("compress", settings.getAsBoolean("repositories.fs.compress", false));
|
||||
|
@ -48,7 +48,7 @@ public class RepositoryBlocksTests extends ElasticsearchIntegrationTest {
|
||||
assertBlocked(client().admin().cluster().preparePutRepository("test-repo-blocks")
|
||||
.setType("fs")
|
||||
.setVerify(false)
|
||||
.setSettings(ImmutableSettings.settingsBuilder().put("location", createTempDir())), MetaData.CLUSTER_READ_ONLY_BLOCK);
|
||||
.setSettings(ImmutableSettings.settingsBuilder().put("location", randomRepoPath())), MetaData.CLUSTER_READ_ONLY_BLOCK);
|
||||
} finally {
|
||||
setClusterReadOnly(false);
|
||||
}
|
||||
@ -57,7 +57,7 @@ public class RepositoryBlocksTests extends ElasticsearchIntegrationTest {
|
||||
assertAcked(client().admin().cluster().preparePutRepository("test-repo-blocks")
|
||||
.setType("fs")
|
||||
.setVerify(false)
|
||||
.setSettings(ImmutableSettings.settingsBuilder().put("location", createTempDir())));
|
||||
.setSettings(ImmutableSettings.settingsBuilder().put("location", randomRepoPath())));
|
||||
}
|
||||
|
||||
@Test
|
||||
@ -65,7 +65,7 @@ public class RepositoryBlocksTests extends ElasticsearchIntegrationTest {
|
||||
assertAcked(client().admin().cluster().preparePutRepository("test-repo-blocks")
|
||||
.setType("fs")
|
||||
.setVerify(false)
|
||||
.setSettings(ImmutableSettings.settingsBuilder().put("location", createTempDir())));
|
||||
.setSettings(ImmutableSettings.settingsBuilder().put("location", randomRepoPath())));
|
||||
|
||||
// This test checks that the Get Repository operation is never blocked, even if the cluster is read only.
|
||||
try {
|
||||
@ -82,7 +82,7 @@ public class RepositoryBlocksTests extends ElasticsearchIntegrationTest {
|
||||
assertAcked(client().admin().cluster().preparePutRepository("test-repo-blocks")
|
||||
.setType("fs")
|
||||
.setVerify(false)
|
||||
.setSettings(ImmutableSettings.settingsBuilder().put("location", createTempDir())));
|
||||
.setSettings(ImmutableSettings.settingsBuilder().put("location", randomRepoPath())));
|
||||
|
||||
logger.info("--> deleting a repository is blocked when the cluster is read only");
|
||||
try {
|
||||
@ -101,7 +101,7 @@ public class RepositoryBlocksTests extends ElasticsearchIntegrationTest {
|
||||
assertAcked(client().admin().cluster().preparePutRepository("test-repo-blocks")
|
||||
.setType("fs")
|
||||
.setVerify(false)
|
||||
.setSettings(ImmutableSettings.settingsBuilder().put("location", createTempDir())));
|
||||
.setSettings(ImmutableSettings.settingsBuilder().put("location", randomRepoPath())));
|
||||
|
||||
// This test checks that the Get Repository operation is never blocked, even if the cluster is read only.
|
||||
try {
|
||||
|
@ -61,7 +61,7 @@ public class SnapshotBlocksTests extends ElasticsearchIntegrationTest {
|
||||
logger.info("--> register a repository");
|
||||
assertAcked(client().admin().cluster().preparePutRepository(REPOSITORY_NAME)
|
||||
.setType("fs")
|
||||
.setSettings(ImmutableSettings.settingsBuilder().put("location", createTempDir())));
|
||||
.setSettings(ImmutableSettings.settingsBuilder().put("location", randomRepoPath())));
|
||||
|
||||
logger.info("--> verify the repository");
|
||||
VerifyRepositoryResponse verifyResponse = client().admin().cluster().prepareVerifyRepository(REPOSITORY_NAME).get();
|
||||
|
@ -27,6 +27,7 @@ import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.cluster.metadata.IndexTemplateMetaData;
|
||||
import org.elasticsearch.cluster.routing.allocation.decider.FilterAllocationDecider;
|
||||
import org.elasticsearch.common.settings.ImmutableSettings;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.rest.RestStatus;
|
||||
import org.elasticsearch.snapshots.AbstractSnapshotTests;
|
||||
import org.elasticsearch.snapshots.RestoreInfo;
|
||||
@ -47,6 +48,7 @@ import java.util.SortedSet;
|
||||
import java.util.TreeSet;
|
||||
|
||||
import static com.google.common.collect.Lists.newArrayList;
|
||||
import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
|
||||
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
|
||||
import static org.hamcrest.Matchers.*;
|
||||
|
||||
@ -54,6 +56,14 @@ import static org.hamcrest.Matchers.*;
|
||||
@ClusterScope(scope = Scope.TEST)
|
||||
public class RestoreBackwardsCompatTests extends AbstractSnapshotTests {
|
||||
|
||||
@Override
|
||||
protected Settings nodeSettings(int nodeOrdinal) {
|
||||
return settingsBuilder()
|
||||
.put(super.nodeSettings(nodeOrdinal))
|
||||
.put("path.repo", reposRoot())
|
||||
.build();
|
||||
}
|
||||
|
||||
@Test
|
||||
public void restoreOldSnapshots() throws Exception {
|
||||
String repo = "test_repo";
|
||||
@ -103,6 +113,10 @@ public class RestoreBackwardsCompatTests extends AbstractSnapshotTests {
|
||||
}
|
||||
}
|
||||
|
||||
private Path reposRoot() {
|
||||
return getDataPath(".");
|
||||
}
|
||||
|
||||
private List<String> repoVersions() throws Exception {
|
||||
return listRepoVersions("repo");
|
||||
}
|
||||
@ -113,7 +127,7 @@ public class RestoreBackwardsCompatTests extends AbstractSnapshotTests {
|
||||
|
||||
private List<String> listRepoVersions(String prefix) throws Exception {
|
||||
List<String> repoVersions = newArrayList();
|
||||
Path repoFiles = getDataPath(".");
|
||||
Path repoFiles = reposRoot();
|
||||
try (DirectoryStream<Path> stream = Files.newDirectoryStream(repoFiles, prefix + "-*.zip")) {
|
||||
for (Path entry : stream) {
|
||||
String fileName = entry.getFileName().toString();
|
||||
@ -131,7 +145,7 @@ public class RestoreBackwardsCompatTests extends AbstractSnapshotTests {
|
||||
URI repoJarUri = new URI("jar:" + repoFileUri.toString() + "!/repo/");
|
||||
logger.info("--> creating repository [{}] for version [{}]", repo, version);
|
||||
assertAcked(client().admin().cluster().preparePutRepository(repo)
|
||||
.setType("url").setSettings(ImmutableSettings.settingsBuilder()
|
||||
.setType("url").setSettings(settingsBuilder()
|
||||
.put("url", repoJarUri.toString())));
|
||||
}
|
||||
|
||||
|
@ -30,6 +30,10 @@ import java.io.BufferedReader;
|
||||
import java.io.IOException;
|
||||
import java.net.URL;
|
||||
|
||||
import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
|
||||
import static org.hamcrest.CoreMatchers.notNullValue;
|
||||
import static org.hamcrest.CoreMatchers.nullValue;
|
||||
|
||||
/**
|
||||
* Simple unit-tests for Environment.java
|
||||
*/
|
||||
@ -68,4 +72,19 @@ public class EnvironmentTests extends ElasticsearchTestCase {
|
||||
assertEquals(string, "tool help");
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testRepositoryResolution() throws IOException {
|
||||
Environment environment = newEnvironment();
|
||||
assertThat(environment.resolveRepoFile("/test/repos/repo1"), nullValue());
|
||||
assertThat(environment.resolveRepoFile("test/repos/repo1"), nullValue());
|
||||
environment = newEnvironment(settingsBuilder().putArray("path.repo", "/test/repos", "/another/repos").build());
|
||||
assertThat(environment.resolveRepoFile("/test/repos/repo1"), notNullValue());
|
||||
assertThat(environment.resolveRepoFile("test/repos/repo1"), notNullValue());
|
||||
assertThat(environment.resolveRepoFile("/another/repos/repo1"), notNullValue());
|
||||
assertThat(environment.resolveRepoFile("/test/repos/../repo1"), nullValue());
|
||||
assertThat(environment.resolveRepoFile("/test/repos/../repos/repo1"), notNullValue());
|
||||
assertThat(environment.resolveRepoFile("/somethingeles/repos/repo1"), nullValue());
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -97,7 +97,7 @@ public class IndexWithShadowReplicasTests extends ElasticsearchIntegrationTest {
|
||||
|
||||
assertAcked(client().admin().cluster().preparePutRepository("test-repo")
|
||||
.setType("fs").setSettings(ImmutableSettings.settingsBuilder()
|
||||
.put("location", createTempDir())));
|
||||
.put("location", randomRepoPath())));
|
||||
CreateSnapshotResponse createSnapshotResponse = client().admin().cluster().prepareCreateSnapshot("test-repo", "test-snap").setWaitForCompletion(true).setIndices("foo").get();
|
||||
assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), greaterThan(0));
|
||||
assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), equalTo(createSnapshotResponse.getSnapshotInfo().totalShards()));
|
||||
|
@ -489,7 +489,7 @@ public class CorruptedFileTest extends ElasticsearchIntegrationTest {
|
||||
logger.info("--> creating repository");
|
||||
assertAcked(client().admin().cluster().preparePutRepository("test-repo")
|
||||
.setType("fs").setSettings(settingsBuilder()
|
||||
.put("location", createTempDir().toAbsolutePath())
|
||||
.put("location", randomRepoPath().toAbsolutePath())
|
||||
.put("compress", randomBoolean())
|
||||
.put("chunk_size", randomIntBetween(100, 1000))));
|
||||
logger.info("--> snapshot");
|
||||
|
@ -317,7 +317,7 @@ public class IndicesOptionsIntegrationTests extends ElasticsearchIntegrationTest
|
||||
waitForRelocation();
|
||||
|
||||
PutRepositoryResponse putRepositoryResponse = client().admin().cluster().preparePutRepository("dummy-repo")
|
||||
.setType("fs").setSettings(ImmutableSettings.settingsBuilder().put("location", createTempDir())).get();
|
||||
.setType("fs").setSettings(ImmutableSettings.settingsBuilder().put("location", randomRepoPath())).get();
|
||||
assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true));
|
||||
client().admin().cluster().prepareCreateSnapshot("dummy-repo", "snap1").setWaitForCompletion(true).get();
|
||||
|
||||
@ -469,7 +469,7 @@ public class IndicesOptionsIntegrationTests extends ElasticsearchIntegrationTest
|
||||
waitForRelocation();
|
||||
|
||||
PutRepositoryResponse putRepositoryResponse = client().admin().cluster().preparePutRepository("dummy-repo")
|
||||
.setType("fs").setSettings(ImmutableSettings.settingsBuilder().put("location", createTempDir())).get();
|
||||
.setType("fs").setSettings(ImmutableSettings.settingsBuilder().put("location", randomRepoPath())).get();
|
||||
assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true));
|
||||
client().admin().cluster().prepareCreateSnapshot("dummy-repo", "snap1").setWaitForCompletion(true).get();
|
||||
|
||||
|
@ -426,7 +426,7 @@ public class IndexRecoveryTests extends ElasticsearchIntegrationTest {
|
||||
logger.info("--> create repository");
|
||||
assertAcked(client().admin().cluster().preparePutRepository(REPO_NAME)
|
||||
.setType("fs").setSettings(ImmutableSettings.settingsBuilder()
|
||||
.put("location", createTempDir())
|
||||
.put("location", randomRepoPath())
|
||||
.put("compress", false)
|
||||
).get());
|
||||
|
||||
|
@ -113,7 +113,7 @@ public class DedicatedClusterSnapshotRestoreTests extends AbstractSnapshotTests
|
||||
|
||||
logger.info("--> create repository");
|
||||
PutRepositoryResponse putRepositoryResponse = client.admin().cluster().preparePutRepository("test-repo")
|
||||
.setType("fs").setSettings(ImmutableSettings.settingsBuilder().put("location", createTempDir())).execute().actionGet();
|
||||
.setType("fs").setSettings(ImmutableSettings.settingsBuilder().put("location", randomRepoPath())).execute().actionGet();
|
||||
assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true));
|
||||
|
||||
logger.info("--> start snapshot");
|
||||
@ -146,7 +146,7 @@ public class DedicatedClusterSnapshotRestoreTests extends AbstractSnapshotTests
|
||||
|
||||
@Test
|
||||
public void restoreCustomMetadata() throws Exception {
|
||||
Path tempDir = createTempDir();
|
||||
Path tempDir = randomRepoPath();
|
||||
|
||||
logger.info("--> start node");
|
||||
internalCluster().startNode();
|
||||
@ -293,7 +293,7 @@ public class DedicatedClusterSnapshotRestoreTests extends AbstractSnapshotTests
|
||||
PutRepositoryResponse putRepositoryResponse = client.admin().cluster().preparePutRepository("test-repo")
|
||||
.setType(MockRepositoryModule.class.getCanonicalName()).setSettings(
|
||||
ImmutableSettings.settingsBuilder()
|
||||
.put("location", createTempDir())
|
||||
.put("location", randomRepoPath())
|
||||
.put("random", randomAsciiOfLength(10))
|
||||
.put("wait_after_unblock", 200)
|
||||
).get();
|
||||
@ -338,7 +338,7 @@ public class DedicatedClusterSnapshotRestoreTests extends AbstractSnapshotTests
|
||||
assertThat(client.prepareCount("test-idx").get().getCount(), equalTo(100L));
|
||||
|
||||
logger.info("--> creating repository");
|
||||
Path repo = createTempDir();
|
||||
Path repo = randomRepoPath();
|
||||
PutRepositoryResponse putRepositoryResponse = client.admin().cluster().preparePutRepository("test-repo")
|
||||
.setType(MockRepositoryModule.class.getCanonicalName()).setSettings(
|
||||
ImmutableSettings.settingsBuilder()
|
||||
@ -433,7 +433,7 @@ public class DedicatedClusterSnapshotRestoreTests extends AbstractSnapshotTests
|
||||
logger.info("--> create repository");
|
||||
logger.info("--> creating repository");
|
||||
PutRepositoryResponse putRepositoryResponse = client().admin().cluster().preparePutRepository("test-repo")
|
||||
.setType("fs").setSettings(ImmutableSettings.settingsBuilder().put("location", createTempDir())).execute().actionGet();
|
||||
.setType("fs").setSettings(ImmutableSettings.settingsBuilder().put("location", randomRepoPath())).execute().actionGet();
|
||||
assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true));
|
||||
|
||||
logger.info("--> start snapshot with default settings - should fail");
|
||||
@ -544,7 +544,7 @@ public class DedicatedClusterSnapshotRestoreTests extends AbstractSnapshotTests
|
||||
|
||||
logger.info("--> create repository");
|
||||
PutRepositoryResponse putRepositoryResponse = client().admin().cluster().preparePutRepository("test-repo")
|
||||
.setType("fs").setSettings(ImmutableSettings.settingsBuilder().put("location", createTempDir())).execute().actionGet();
|
||||
.setType("fs").setSettings(ImmutableSettings.settingsBuilder().put("location", randomRepoPath())).execute().actionGet();
|
||||
assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true));
|
||||
int numberOfShards = 6;
|
||||
logger.info("--> create an index that will have some unallocated shards");
|
||||
@ -603,12 +603,12 @@ public class DedicatedClusterSnapshotRestoreTests extends AbstractSnapshotTests
|
||||
for (int i = 0; i < 5; i++) {
|
||||
client().admin().cluster().preparePutRepository("test-repo" + i)
|
||||
.setType("mock").setSettings(ImmutableSettings.settingsBuilder()
|
||||
.put("location", createTempDir())).setVerify(false).get();
|
||||
.put("location", randomRepoPath())).setVerify(false).get();
|
||||
}
|
||||
logger.info("--> make sure that properly setup repository can be registered on all nodes");
|
||||
client().admin().cluster().preparePutRepository("test-repo-0")
|
||||
.setType("fs").setSettings(ImmutableSettings.settingsBuilder()
|
||||
.put("location", createTempDir())).get();
|
||||
.put("location", randomRepoPath())).get();
|
||||
|
||||
}
|
||||
|
||||
@ -626,7 +626,7 @@ public class DedicatedClusterSnapshotRestoreTests extends AbstractSnapshotTests
|
||||
logger.info("--> creating repository");
|
||||
assertAcked(client().admin().cluster().preparePutRepository("test-repo")
|
||||
.setType("fs").setSettings(ImmutableSettings.settingsBuilder()
|
||||
.put("location", createTempDir())
|
||||
.put("location", randomRepoPath())
|
||||
.put("compress", randomBoolean())
|
||||
.put("chunk_size", randomIntBetween(100, 1000))));
|
||||
|
||||
|
@ -56,7 +56,7 @@ public class RepositoriesTests extends AbstractSnapshotTests {
|
||||
public void testRepositoryCreation() throws Exception {
|
||||
Client client = client();
|
||||
|
||||
Path location = createTempDir();
|
||||
Path location = randomRepoPath();
|
||||
|
||||
logger.info("--> creating repository");
|
||||
PutRepositoryResponse putRepositoryResponse = client.admin().cluster().preparePutRepository("test-repo-1")
|
||||
@ -84,7 +84,7 @@ public class RepositoriesTests extends AbstractSnapshotTests {
|
||||
logger.info("--> creating another repository");
|
||||
putRepositoryResponse = client.admin().cluster().preparePutRepository("test-repo-2")
|
||||
.setType("fs").setSettings(ImmutableSettings.settingsBuilder()
|
||||
.put("location", createTempDir())
|
||||
.put("location", randomRepoPath())
|
||||
).get();
|
||||
assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true));
|
||||
|
||||
@ -135,7 +135,18 @@ public class RepositoriesTests extends AbstractSnapshotTests {
|
||||
client.admin().cluster().preparePutRepository("test-repo").setType("fs").get();
|
||||
fail("Shouldn't be here");
|
||||
} catch (RepositoryException ex) {
|
||||
// Expected
|
||||
assertThat(ex.toString(), containsString("missing location"));
|
||||
}
|
||||
|
||||
logger.info("--> trying creating repository with location that is not registered in path.repo setting");
|
||||
String location = createTempDir().toAbsolutePath().toString();
|
||||
try {
|
||||
client().admin().cluster().preparePutRepository("test-repo")
|
||||
.setType("fs").setSettings(ImmutableSettings.settingsBuilder().put("location", location))
|
||||
.get();
|
||||
fail("Shouldn't be here");
|
||||
} catch (RepositoryException ex) {
|
||||
assertThat(ex.toString(), containsString("location [" + location + "] doesn't match any of the locations specified by path.repo"));
|
||||
}
|
||||
}
|
||||
|
||||
@ -144,7 +155,7 @@ public class RepositoriesTests extends AbstractSnapshotTests {
|
||||
logger.info("--> creating repository test-repo-1 with 0s timeout - shouldn't ack");
|
||||
PutRepositoryResponse putRepositoryResponse = client().admin().cluster().preparePutRepository("test-repo-1")
|
||||
.setType("fs").setSettings(ImmutableSettings.settingsBuilder()
|
||||
.put("location", createTempDir())
|
||||
.put("location", randomRepoPath())
|
||||
.put("compress", randomBoolean())
|
||||
.put("chunk_size", randomIntBetween(5, 100))
|
||||
)
|
||||
@ -154,7 +165,7 @@ public class RepositoriesTests extends AbstractSnapshotTests {
|
||||
logger.info("--> creating repository test-repo-2 with standard timeout - should ack");
|
||||
putRepositoryResponse = client().admin().cluster().preparePutRepository("test-repo-2")
|
||||
.setType("fs").setSettings(ImmutableSettings.settingsBuilder()
|
||||
.put("location", createTempDir())
|
||||
.put("location", randomRepoPath())
|
||||
.put("compress", randomBoolean())
|
||||
.put("chunk_size", randomIntBetween(5, 100))
|
||||
).get();
|
||||
@ -175,7 +186,7 @@ public class RepositoriesTests extends AbstractSnapshotTests {
|
||||
Client client = client();
|
||||
|
||||
Settings settings = ImmutableSettings.settingsBuilder()
|
||||
.put("location", createTempDir())
|
||||
.put("location", randomRepoPath())
|
||||
.put("random_control_io_exception_rate", 1.0).build();
|
||||
logger.info("--> creating repository that cannot write any files - should fail");
|
||||
assertThrows(client.admin().cluster().preparePutRepository("test-repo-1")
|
||||
@ -189,7 +200,7 @@ public class RepositoriesTests extends AbstractSnapshotTests {
|
||||
logger.info("--> verifying repository");
|
||||
assertThrows(client.admin().cluster().prepareVerifyRepository("test-repo-1"), RepositoryVerificationException.class);
|
||||
|
||||
Path location = createTempDir();
|
||||
Path location = randomRepoPath();
|
||||
|
||||
logger.info("--> creating repository");
|
||||
try {
|
||||
@ -210,7 +221,7 @@ public class RepositoriesTests extends AbstractSnapshotTests {
|
||||
Client client = client();
|
||||
|
||||
Settings settings = ImmutableSettings.settingsBuilder()
|
||||
.put("location", createTempDir())
|
||||
.put("location", randomRepoPath())
|
||||
.put("random_control_io_exception_rate", 1.0).build();
|
||||
logger.info("--> creating repository that cannot write any files - should fail");
|
||||
assertThrows(client.admin().cluster().preparePutRepository("test-repo-1")
|
||||
@ -224,7 +235,7 @@ public class RepositoriesTests extends AbstractSnapshotTests {
|
||||
logger.info("--> verifying repository");
|
||||
assertThrows(client.admin().cluster().prepareVerifyRepository("test-repo-1"), RepositoryVerificationException.class);
|
||||
|
||||
Path location = createTempDir();
|
||||
Path location = randomRepoPath();
|
||||
|
||||
logger.info("--> creating repository");
|
||||
try {
|
||||
|
@ -90,7 +90,7 @@ public class SharedClusterSnapshotRestoreTests extends AbstractSnapshotTests {
|
||||
logger.info("--> creating repository");
|
||||
assertAcked(client.admin().cluster().preparePutRepository("test-repo")
|
||||
.setType("fs").setSettings(ImmutableSettings.settingsBuilder()
|
||||
.put("location", createTempDir())
|
||||
.put("location", randomRepoPath())
|
||||
.put("compress", randomBoolean())
|
||||
.put("chunk_size", randomIntBetween(100, 1000))));
|
||||
|
||||
@ -185,7 +185,7 @@ public class SharedClusterSnapshotRestoreTests extends AbstractSnapshotTests {
|
||||
String indexName = "testindex";
|
||||
String repoName = "test-restore-snapshot-repo";
|
||||
String snapshotName = "test-restore-snapshot";
|
||||
String absolutePath = createTempDir().toAbsolutePath().toString();
|
||||
String absolutePath = randomRepoPath().toAbsolutePath().toString();
|
||||
logger.info("Path [{}]", absolutePath);
|
||||
String restoredIndexName = indexName + "-restored";
|
||||
String typeName = "actions";
|
||||
@ -231,7 +231,7 @@ public class SharedClusterSnapshotRestoreTests extends AbstractSnapshotTests {
|
||||
logger.info("--> creating repository");
|
||||
assertAcked(client.admin().cluster().preparePutRepository("test-repo")
|
||||
.setType("fs").setSettings(ImmutableSettings.settingsBuilder()
|
||||
.put("location", createTempDir())
|
||||
.put("location", randomRepoPath())
|
||||
.put("compress", randomBoolean())
|
||||
.put("chunk_size", randomIntBetween(100, 1000))));
|
||||
|
||||
@ -280,7 +280,7 @@ public class SharedClusterSnapshotRestoreTests extends AbstractSnapshotTests {
|
||||
logger.info("--> creating repository");
|
||||
assertAcked(client.admin().cluster().preparePutRepository("test-repo")
|
||||
.setType("fs").setSettings(ImmutableSettings.settingsBuilder()
|
||||
.put("location", createTempDir())
|
||||
.put("location", randomRepoPath())
|
||||
.put("compress", randomBoolean())
|
||||
.put("chunk_size", randomIntBetween(100, 1000))));
|
||||
|
||||
@ -328,7 +328,7 @@ public class SharedClusterSnapshotRestoreTests extends AbstractSnapshotTests {
|
||||
|
||||
logger.info("--> creating repository");
|
||||
PutRepositoryResponse putRepositoryResponse = client.admin().cluster().preparePutRepository("test-repo")
|
||||
.setType("fs").setSettings(ImmutableSettings.settingsBuilder().put("location", createTempDir())).get();
|
||||
.setType("fs").setSettings(ImmutableSettings.settingsBuilder().put("location", randomRepoPath())).get();
|
||||
assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true));
|
||||
|
||||
logger.info("--> snapshot");
|
||||
@ -345,7 +345,7 @@ public class SharedClusterSnapshotRestoreTests extends AbstractSnapshotTests {
|
||||
|
||||
logger.info("--> creating repository");
|
||||
assertAcked(client.admin().cluster().preparePutRepository("test-repo")
|
||||
.setType("fs").setSettings(ImmutableSettings.settingsBuilder().put("location", createTempDir())));
|
||||
.setType("fs").setSettings(ImmutableSettings.settingsBuilder().put("location", randomRepoPath())));
|
||||
|
||||
logger.info("--> create test indices");
|
||||
createIndex("test-idx-1", "test-idx-2", "test-idx-3");
|
||||
@ -401,7 +401,7 @@ public class SharedClusterSnapshotRestoreTests extends AbstractSnapshotTests {
|
||||
|
||||
logger.info("--> creating repository");
|
||||
assertAcked(client.admin().cluster().preparePutRepository("test-repo")
|
||||
.setType("fs").setSettings(ImmutableSettings.settingsBuilder().put("location", createTempDir())));
|
||||
.setType("fs").setSettings(ImmutableSettings.settingsBuilder().put("location", randomRepoPath())));
|
||||
|
||||
logger.info("--> creating test template");
|
||||
assertThat(client.admin().indices().preparePutTemplate("test-template").setTemplate("te*").addMapping("test-mapping", "{}").get().isAcknowledged(), equalTo(true));
|
||||
@ -433,7 +433,7 @@ public class SharedClusterSnapshotRestoreTests extends AbstractSnapshotTests {
|
||||
Client client = client();
|
||||
|
||||
logger.info("--> creating repository");
|
||||
Path location = createTempDir();
|
||||
Path location = randomRepoPath();
|
||||
assertAcked(client.admin().cluster().preparePutRepository("test-repo")
|
||||
.setType("fs").setSettings(ImmutableSettings.settingsBuilder().put("location", location)));
|
||||
|
||||
@ -515,7 +515,7 @@ public class SharedClusterSnapshotRestoreTests extends AbstractSnapshotTests {
|
||||
assertAcked(client.admin().cluster().preparePutRepository("test-repo")
|
||||
.setType(MockRepositoryModule.class.getCanonicalName()).setSettings(
|
||||
ImmutableSettings.settingsBuilder()
|
||||
.put("location", createTempDir())
|
||||
.put("location", randomRepoPath())
|
||||
.put("random", randomAsciiOfLength(10))
|
||||
.put("random_control_io_exception_rate", 0.2))
|
||||
.setVerify(false));
|
||||
@ -565,7 +565,7 @@ public class SharedClusterSnapshotRestoreTests extends AbstractSnapshotTests {
|
||||
assertAcked(client.admin().cluster().preparePutRepository("test-repo")
|
||||
.setType(MockRepositoryModule.class.getCanonicalName()).setSettings(
|
||||
ImmutableSettings.settingsBuilder()
|
||||
.put("location", createTempDir())
|
||||
.put("location", randomRepoPath())
|
||||
.put("random", randomAsciiOfLength(10))
|
||||
.put("random_data_file_io_exception_rate", 0.3)));
|
||||
|
||||
@ -627,7 +627,7 @@ public class SharedClusterSnapshotRestoreTests extends AbstractSnapshotTests {
|
||||
|
||||
@Test
|
||||
public void dataFileFailureDuringRestoreTest() throws Exception {
|
||||
Path repositoryLocation = createTempDir();
|
||||
Path repositoryLocation = randomRepoPath();
|
||||
Client client = client();
|
||||
logger.info("--> creating repository");
|
||||
assertAcked(client.admin().cluster().preparePutRepository("test-repo")
|
||||
@ -669,7 +669,7 @@ public class SharedClusterSnapshotRestoreTests extends AbstractSnapshotTests {
|
||||
|
||||
@Test
|
||||
public void deletionOfFailingToRecoverIndexShouldStopRestore() throws Exception {
|
||||
Path repositoryLocation = createTempDir();
|
||||
Path repositoryLocation = randomRepoPath();
|
||||
Client client = client();
|
||||
logger.info("--> creating repository");
|
||||
assertAcked(client.admin().cluster().preparePutRepository("test-repo")
|
||||
@ -738,7 +738,7 @@ public class SharedClusterSnapshotRestoreTests extends AbstractSnapshotTests {
|
||||
logger.info("--> creating repository");
|
||||
assertAcked(client.admin().cluster().preparePutRepository("test-repo")
|
||||
.setType("fs").setSettings(ImmutableSettings.settingsBuilder()
|
||||
.put("location", createTempDir())));
|
||||
.put("location", randomRepoPath())));
|
||||
|
||||
logger.info("--> creating index that cannot be allocated");
|
||||
prepareCreate("test-idx", 2, ImmutableSettings.builder().put(FilterAllocationDecider.INDEX_ROUTING_INCLUDE_GROUP + ".tag", "nowhere").put("index.number_of_shards", 3)).get();
|
||||
@ -756,7 +756,7 @@ public class SharedClusterSnapshotRestoreTests extends AbstractSnapshotTests {
|
||||
final int numberOfSnapshots = between(5, 15);
|
||||
Client client = client();
|
||||
|
||||
Path repo = createTempDir();
|
||||
Path repo = randomRepoPath();
|
||||
logger.info("--> creating repository at " + repo.toAbsolutePath());
|
||||
assertAcked(client.admin().cluster().preparePutRepository("test-repo")
|
||||
.setType("fs").setSettings(ImmutableSettings.settingsBuilder()
|
||||
@ -813,7 +813,7 @@ public class SharedClusterSnapshotRestoreTests extends AbstractSnapshotTests {
|
||||
public void deleteSnapshotWithMissingIndexAndShardMetadataTest() throws Exception {
|
||||
Client client = client();
|
||||
|
||||
Path repo = createTempDir();
|
||||
Path repo = randomRepoPath();
|
||||
logger.info("--> creating repository at " + repo.toAbsolutePath());
|
||||
assertAcked(client.admin().cluster().preparePutRepository("test-repo")
|
||||
.setType("fs").setSettings(ImmutableSettings.settingsBuilder()
|
||||
@ -852,7 +852,7 @@ public class SharedClusterSnapshotRestoreTests extends AbstractSnapshotTests {
|
||||
public void deleteSnapshotWithMissingMetadataTest() throws Exception {
|
||||
Client client = client();
|
||||
|
||||
Path repo = createTempDir();
|
||||
Path repo = randomRepoPath();
|
||||
logger.info("--> creating repository at " + repo.toAbsolutePath());
|
||||
assertAcked(client.admin().cluster().preparePutRepository("test-repo")
|
||||
.setType("fs").setSettings(ImmutableSettings.settingsBuilder()
|
||||
@ -887,7 +887,7 @@ public class SharedClusterSnapshotRestoreTests extends AbstractSnapshotTests {
|
||||
public void deleteSnapshotWithCorruptedSnapshotFileTest() throws Exception {
|
||||
Client client = client();
|
||||
|
||||
Path repo = createTempDir();
|
||||
Path repo = randomRepoPath();
|
||||
logger.info("--> creating repository at " + repo.toAbsolutePath());
|
||||
assertAcked(client.admin().cluster().preparePutRepository("test-repo")
|
||||
.setType("fs").setSettings(ImmutableSettings.settingsBuilder()
|
||||
@ -932,7 +932,7 @@ public class SharedClusterSnapshotRestoreTests extends AbstractSnapshotTests {
|
||||
logger.info("--> creating repository");
|
||||
assertAcked(client.admin().cluster().preparePutRepository("test-repo")
|
||||
.setType("fs").setSettings(ImmutableSettings.settingsBuilder()
|
||||
.put("location", createTempDir())));
|
||||
.put("location", randomRepoPath())));
|
||||
|
||||
createIndex("test-idx", "test-idx-closed");
|
||||
ensureGreen();
|
||||
@ -970,7 +970,7 @@ public class SharedClusterSnapshotRestoreTests extends AbstractSnapshotTests {
|
||||
logger.info("--> creating repository");
|
||||
assertAcked(client.admin().cluster().preparePutRepository("test-repo")
|
||||
.setType("fs").setSettings(ImmutableSettings.settingsBuilder()
|
||||
.put("location", createTempDir())));
|
||||
.put("location", randomRepoPath())));
|
||||
|
||||
createIndex("test-idx");
|
||||
ensureGreen();
|
||||
@ -991,7 +991,7 @@ public class SharedClusterSnapshotRestoreTests extends AbstractSnapshotTests {
|
||||
logger.info("--> creating repository");
|
||||
assertAcked(client.admin().cluster().preparePutRepository("test-repo")
|
||||
.setType("fs").setSettings(ImmutableSettings.settingsBuilder()
|
||||
.put("location", createTempDir())));
|
||||
.put("location", randomRepoPath())));
|
||||
|
||||
createIndex("test-idx-1", "test-idx-2", "test-idx-3");
|
||||
ensureGreen();
|
||||
@ -1107,7 +1107,7 @@ public class SharedClusterSnapshotRestoreTests extends AbstractSnapshotTests {
|
||||
@Test
|
||||
public void moveShardWhileSnapshottingTest() throws Exception {
|
||||
Client client = client();
|
||||
Path repositoryLocation = createTempDir();
|
||||
Path repositoryLocation = randomRepoPath();
|
||||
logger.info("--> creating repository");
|
||||
assertAcked(client.admin().cluster().preparePutRepository("test-repo")
|
||||
.setType(MockRepositoryModule.class.getCanonicalName()).setSettings(
|
||||
@ -1169,7 +1169,7 @@ public class SharedClusterSnapshotRestoreTests extends AbstractSnapshotTests {
|
||||
@Test
|
||||
public void deleteRepositoryWhileSnapshottingTest() throws Exception {
|
||||
Client client = client();
|
||||
Path repositoryLocation = createTempDir();
|
||||
Path repositoryLocation = randomRepoPath();
|
||||
logger.info("--> creating repository");
|
||||
PutRepositoryResponse putRepositoryResponse = client.admin().cluster().preparePutRepository("test-repo")
|
||||
.setType(MockRepositoryModule.class.getCanonicalName()).setSettings(
|
||||
@ -1254,7 +1254,7 @@ public class SharedClusterSnapshotRestoreTests extends AbstractSnapshotTests {
|
||||
Client client = client();
|
||||
|
||||
logger.info("--> creating repository");
|
||||
Path repositoryLocation = createTempDir();
|
||||
Path repositoryLocation = randomRepoPath();
|
||||
assertAcked(client.admin().cluster().preparePutRepository("test-repo")
|
||||
.setType("fs").setSettings(ImmutableSettings.settingsBuilder()
|
||||
.put("location", repositoryLocation)
|
||||
@ -1312,7 +1312,7 @@ public class SharedClusterSnapshotRestoreTests extends AbstractSnapshotTests {
|
||||
Client client = client();
|
||||
|
||||
logger.info("--> creating repository");
|
||||
Path repositoryLocation = createTempDir();
|
||||
Path repositoryLocation = randomRepoPath();
|
||||
boolean throttleSnapshot = randomBoolean();
|
||||
boolean throttleRestore = randomBoolean();
|
||||
assertAcked(client.admin().cluster().preparePutRepository("test-repo")
|
||||
@ -1370,7 +1370,7 @@ public class SharedClusterSnapshotRestoreTests extends AbstractSnapshotTests {
|
||||
@Test
|
||||
public void snapshotStatusTest() throws Exception {
|
||||
Client client = client();
|
||||
Path repositoryLocation = createTempDir();
|
||||
Path repositoryLocation = randomRepoPath();
|
||||
logger.info("--> creating repository");
|
||||
PutRepositoryResponse putRepositoryResponse = client.admin().cluster().preparePutRepository("test-repo")
|
||||
.setType(MockRepositoryModule.class.getCanonicalName()).setSettings(
|
||||
@ -1473,7 +1473,7 @@ public class SharedClusterSnapshotRestoreTests extends AbstractSnapshotTests {
|
||||
logger.info("--> creating repository");
|
||||
assertAcked(client.admin().cluster().preparePutRepository("test-repo")
|
||||
.setType("fs").setSettings(ImmutableSettings.settingsBuilder()
|
||||
.put("location", createTempDir())
|
||||
.put("location", randomRepoPath())
|
||||
.put("compress", randomBoolean())
|
||||
.put("chunk_size", randomIntBetween(100, 1000))));
|
||||
|
||||
@ -1521,7 +1521,7 @@ public class SharedClusterSnapshotRestoreTests extends AbstractSnapshotTests {
|
||||
logger.info("--> creating repository");
|
||||
assertAcked(client.admin().cluster().preparePutRepository("test-repo")
|
||||
.setType("fs").setSettings(ImmutableSettings.settingsBuilder()
|
||||
.put("location", createTempDir())
|
||||
.put("location", randomRepoPath())
|
||||
.put("compress", randomBoolean())
|
||||
.put("chunk_size", randomIntBetween(100, 1000))));
|
||||
|
||||
@ -1584,7 +1584,7 @@ public class SharedClusterSnapshotRestoreTests extends AbstractSnapshotTests {
|
||||
logger.info("--> creating repository");
|
||||
assertAcked(client.admin().cluster().preparePutRepository("test-repo")
|
||||
.setType("fs").setSettings(ImmutableSettings.settingsBuilder()
|
||||
.put("location", createTempDir())
|
||||
.put("location", randomRepoPath())
|
||||
.put("compress", randomBoolean())
|
||||
.put("chunk_size", randomIntBetween(100, 1000))));
|
||||
|
||||
@ -1701,7 +1701,7 @@ public class SharedClusterSnapshotRestoreTests extends AbstractSnapshotTests {
|
||||
logger.info("--> creating repository");
|
||||
assertAcked(client.admin().cluster().preparePutRepository("test-repo")
|
||||
.setType(MockRepositoryModule.class.getCanonicalName()).setSettings(ImmutableSettings.settingsBuilder()
|
||||
.put("location", createTempDir())
|
||||
.put("location", randomRepoPath())
|
||||
.put("compress", randomBoolean())
|
||||
.put("chunk_size", randomIntBetween(100, 1000))
|
||||
.put("block_on_init", true)
|
||||
@ -1753,7 +1753,7 @@ public class SharedClusterSnapshotRestoreTests extends AbstractSnapshotTests {
|
||||
logger.info("--> creating repository");
|
||||
assertAcked(client.admin().cluster().preparePutRepository("test-repo")
|
||||
.setType(MockRepositoryModule.class.getCanonicalName()).setSettings(ImmutableSettings.settingsBuilder()
|
||||
.put("location", createTempDir())
|
||||
.put("location", randomRepoPath())
|
||||
.put("compress", randomBoolean())
|
||||
.put("chunk_size", randomIntBetween(100, 1000))
|
||||
));
|
||||
@ -1833,7 +1833,7 @@ public class SharedClusterSnapshotRestoreTests extends AbstractSnapshotTests {
|
||||
logger.info("--> creating repository");
|
||||
assertAcked(client.admin().cluster().preparePutRepository("test-repo")
|
||||
.setType("fs").setSettings(ImmutableSettings.settingsBuilder()
|
||||
.put("location", createTempDir())
|
||||
.put("location", randomRepoPath())
|
||||
.put("compress", randomBoolean())
|
||||
.put("chunk_size", randomIntBetween(100, 1000))));
|
||||
|
||||
|
@ -59,7 +59,7 @@ public class SnapshotBackwardsCompatibilityTest extends ElasticsearchBackwardsCo
|
||||
logger.info("--> creating repository");
|
||||
assertAcked(client().admin().cluster().preparePutRepository("test-repo")
|
||||
.setType("fs").setSettings(ImmutableSettings.settingsBuilder()
|
||||
.put("location", createTempDir().toAbsolutePath())
|
||||
.put("location", randomRepoPath().toAbsolutePath())
|
||||
.put("compress", randomBoolean())
|
||||
.put("chunk_size", randomIntBetween(100, 1000))));
|
||||
String[] indicesBefore = new String[randomIntBetween(2,5)];
|
||||
@ -165,7 +165,7 @@ public class SnapshotBackwardsCompatibilityTest extends ElasticsearchBackwardsCo
|
||||
|
||||
public void testSnapshotMoreThanOnce() throws ExecutionException, InterruptedException, IOException {
|
||||
Client client = client();
|
||||
final Path tempDir = createTempDir().toAbsolutePath();
|
||||
final Path tempDir = randomRepoPath().toAbsolutePath();
|
||||
logger.info("--> creating repository");
|
||||
assertAcked(client.admin().cluster().preparePutRepository("test-repo")
|
||||
.setType("fs").setSettings(ImmutableSettings.settingsBuilder()
|
||||
|
@ -33,6 +33,7 @@ import org.elasticsearch.common.blobstore.BlobStore;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.io.PathUtils;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.env.Environment;
|
||||
import org.elasticsearch.index.snapshots.IndexShardRepository;
|
||||
import org.elasticsearch.repositories.RepositoryName;
|
||||
import org.elasticsearch.repositories.RepositorySettings;
|
||||
@ -43,7 +44,6 @@ import java.io.InputStream;
|
||||
import java.io.OutputStream;
|
||||
import java.io.UnsupportedEncodingException;
|
||||
import java.nio.file.Path;
|
||||
import java.nio.file.Paths;
|
||||
import java.security.MessageDigest;
|
||||
import java.security.NoSuchAlgorithmException;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
@ -81,8 +81,8 @@ public class MockRepository extends FsRepository {
|
||||
private volatile boolean blocked = false;
|
||||
|
||||
@Inject
|
||||
public MockRepository(RepositoryName name, RepositorySettings repositorySettings, IndexShardRepository indexShardRepository, ClusterService clusterService) throws IOException {
|
||||
super(name, overrideSettings(repositorySettings, clusterService), indexShardRepository);
|
||||
public MockRepository(RepositoryName name, RepositorySettings repositorySettings, IndexShardRepository indexShardRepository, ClusterService clusterService, Environment environment) throws IOException {
|
||||
super(name, overrideSettings(repositorySettings, clusterService), indexShardRepository, environment);
|
||||
randomControlIOExceptionRate = repositorySettings.settings().getAsDouble("random_control_io_exception_rate", 0.0);
|
||||
randomDataFileIOExceptionRate = repositorySettings.settings().getAsDouble("random_data_file_io_exception_rate", 0.0);
|
||||
blockOnControlFiles = repositorySettings.settings().getAsBoolean("block_on_control", false);
|
||||
|
@ -93,6 +93,7 @@ import org.elasticsearch.common.xcontent.XContentHelper;
|
||||
import org.elasticsearch.common.xcontent.json.JsonXContent;
|
||||
import org.elasticsearch.common.xcontent.support.XContentMapValues;
|
||||
import org.elasticsearch.discovery.zen.elect.ElectMasterService;
|
||||
import org.elasticsearch.env.Environment;
|
||||
import org.elasticsearch.index.IndexService;
|
||||
import org.elasticsearch.index.codec.CodecService;
|
||||
import org.elasticsearch.index.fielddata.FieldDataType;
|
||||
@ -1846,6 +1847,27 @@ public abstract class ElasticsearchIntegrationTest extends ElasticsearchTestCase
|
||||
return timeZone;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns path to a random directory that can be used to create a temporary file system repo
|
||||
*/
|
||||
public Path randomRepoPath() {
|
||||
return randomRepoPath(internalCluster().getDefaultSettings());
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns path to a random directory that can be used to create a temporary file system repo
|
||||
*/
|
||||
public static Path randomRepoPath(Settings settings) {
|
||||
Environment environment = new Environment(settings);
|
||||
Path[] repoFiles = environment.repoFiles();
|
||||
assert repoFiles.length > 0;
|
||||
Path path;
|
||||
do {
|
||||
path = repoFiles[0].resolve(randomAsciiOfLength(10));
|
||||
} while (Files.exists(path));
|
||||
return path;
|
||||
}
|
||||
|
||||
protected NumShards getNumShards(String index) {
|
||||
MetaData metaData = client().admin().cluster().prepareState().get().getState().metaData();
|
||||
assertThat(metaData.hasIndex(index), equalTo(true));
|
||||
|
@ -278,6 +278,7 @@ public final class InternalTestCluster extends TestCluster {
|
||||
}
|
||||
}
|
||||
builder.put("path.home", baseDir);
|
||||
builder.put("path.repo", baseDir.resolve("repos"));
|
||||
builder.put("transport.tcp.port", BASE_PORT + "-" + (BASE_PORT+100));
|
||||
builder.put("http.port", BASE_PORT+101 + "-" + (BASE_PORT+200));
|
||||
builder.put("config.ignore_system_properties", true);
|
||||
|
Loading…
x
Reference in New Issue
Block a user