mirror of
https://github.com/honeymoose/OpenSearch.git
synced 2025-02-17 10:25:15 +00:00
Add multi data.path to migration guide
this commit removes the obsolete settings for distributors and updates the documentation on multiple data.path. It also adds an explain to the migration guide. Relates to #9498 Closes #10770
This commit is contained in:
parent
32759398a5
commit
94d8b20611
@ -423,3 +423,22 @@ systems and the provided start/stop scripts.
|
||||
|
||||
The Analyze API return 0 as first Token's position instead of 1.
|
||||
|
||||
=== Multiple data.path striping
|
||||
|
||||
Previously, if the `data.path` setting listed multiple data paths, then a
|
||||
shard would be ``striped'' across all paths by writing a whole file to each
|
||||
path in turn (in accordance with the `index.store.distributor` setting). The
|
||||
result was that the files from a single segment in a shard could be spread
|
||||
across multiple disks, and the failure of any one disk could corrupt multiple
|
||||
shards.
|
||||
|
||||
This striping is no longer supported. Instead, different shards may be
|
||||
allocated to different paths, but all of the files in a single shard will be
|
||||
written to the same path.
|
||||
|
||||
If striping is detected while starting Elasticsearch 2.0.0 or later, all of
|
||||
the files belonging to the same shard will be migrated to the same path. If
|
||||
there is not enough disk space to complete this migration, the upgrade will be
|
||||
cancelled and can only be resumed once enough disk space is made available.
|
||||
|
||||
The `index.store.distributor` setting has also been removed.
|
||||
|
@ -18,30 +18,22 @@ on the node. Can hold multiple locations. | {path.home}/data| path.data
|
||||
| plugins | Plugin files location. Each plugin will be contained in a subdirectory. | {path.home}/plugins | path.plugins
|
||||
|=======================================================================
|
||||
|
||||
The multiple data locations allows to stripe it. The striping is simple,
|
||||
placing whole files in one of the locations, and deciding where to place
|
||||
the file based on the value of the `index.store.distributor` setting:
|
||||
Multiple `data` paths may be specified, in order to spread data across
|
||||
multiple disks or locations, but all of the files from a single shard will be
|
||||
written to the same path. This can be configured as follows:
|
||||
|
||||
* `least_used` (default) always selects the directory with the most
|
||||
available space +
|
||||
* `random` selects directories at random. The probability of selecting
|
||||
a particular directory is proportional to amount of available space in
|
||||
this directory.
|
||||
---------------------------------
|
||||
path.data: /mnt/first,/mnt/second
|
||||
---------------------------------
|
||||
|
||||
Note, there are no multiple copies of the same data, in that, its
|
||||
similar to RAID 0. Though simple, it should provide a good solution for
|
||||
people that don't want to mess with RAID. Here is how it is configured:
|
||||
Or in an array format:
|
||||
|
||||
---------------------------------
|
||||
path.data: /mnt/first,/mnt/second
|
||||
---------------------------------
|
||||
|
||||
Or the in an array format:
|
||||
|
||||
----------------------------------------
|
||||
path.data: ["/mnt/first", "/mnt/second"]
|
||||
----------------------------------------
|
||||
----------------------------------------
|
||||
path.data: ["/mnt/first", "/mnt/second"]
|
||||
----------------------------------------
|
||||
|
||||
TIP: To stripe shards across multiple disks, please use a RAID driver
|
||||
instead.
|
||||
|
||||
[float]
|
||||
[[default-paths]]
|
||||
|
@ -311,7 +311,7 @@ public class IndexService extends AbstractIndexComponent implements IndexCompone
|
||||
modules.add(new ShardIndexingModule());
|
||||
modules.add(new ShardSearchModule());
|
||||
modules.add(new ShardGetModule());
|
||||
modules.add(new StoreModule(indexSettings, injector.getInstance(IndexStore.class).shardDirectory(), lock,
|
||||
modules.add(new StoreModule(injector.getInstance(IndexStore.class).shardDirectory(), lock,
|
||||
new StoreCloseListener(shardId, canDeleteShardContent), path));
|
||||
modules.add(new DeletionPolicyModule(indexSettings));
|
||||
modules.add(new MergePolicyModule(indexSettings));
|
||||
|
@ -29,11 +29,6 @@ import org.elasticsearch.index.shard.ShardPath;
|
||||
*/
|
||||
public class StoreModule extends AbstractModule {
|
||||
|
||||
public static final String DISTIBUTOR_KEY = "index.store.distributor";
|
||||
public static final String LEAST_USED_DISTRIBUTOR = "least_used";
|
||||
public static final String RANDOM_WEIGHT_DISTRIBUTOR = "random";
|
||||
|
||||
private final Settings settings;
|
||||
|
||||
private final ShardLock lock;
|
||||
private final Store.OnClose closeCallback;
|
||||
@ -41,9 +36,8 @@ public class StoreModule extends AbstractModule {
|
||||
private final Class<? extends DirectoryService> shardDirectory;
|
||||
|
||||
|
||||
public StoreModule(Settings settings, Class<? extends DirectoryService> shardDirectory, ShardLock lock, Store.OnClose closeCallback, ShardPath path) {
|
||||
public StoreModule(Class<? extends DirectoryService> shardDirectory, ShardLock lock, Store.OnClose closeCallback, ShardPath path) {
|
||||
this.shardDirectory = shardDirectory;
|
||||
this.settings = settings;
|
||||
this.lock = lock;
|
||||
this.closeCallback = closeCallback;
|
||||
this.path = path;
|
||||
|
@ -487,9 +487,6 @@ public abstract class ElasticsearchIntegrationTest extends ElasticsearchTestCase
|
||||
builder.put(IndicesStore.INDICES_STORE_THROTTLE_TYPE, RandomPicks.randomFrom(random, StoreRateLimiting.Type.values()));
|
||||
}
|
||||
|
||||
if (random.nextBoolean()) {
|
||||
builder.put(StoreModule.DISTIBUTOR_KEY, random.nextBoolean() ? StoreModule.LEAST_USED_DISTRIBUTOR : StoreModule.RANDOM_WEIGHT_DISTRIBUTOR);
|
||||
}
|
||||
if (random.nextBoolean()) {
|
||||
builder.put(ConcurrentMergeSchedulerProvider.AUTO_THROTTLE, false);
|
||||
}
|
||||
|
Loading…
x
Reference in New Issue
Block a user