Allow shrinking of indices from a previous major (#28076)

Lucene does not allow adding Lucene 6 files to a Lucene 7 index. This commit ensures that we carry over the Lucene version to the newly created Lucene index.

Closes #28061
This commit is contained in:
Yannick Welsch 2018-01-04 16:23:25 +01:00 committed by GitHub
parent 19c61e894e
commit ca325194d9
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
2 changed files with 76 additions and 1 deletions

View File

@ -145,14 +145,22 @@ final class StoreRecovery {
void addIndices(final RecoveryState.Index indexRecoveryStats, final Directory target, final Sort indexSort, final Directory[] sources,
final long maxSeqNo, final long maxUnsafeAutoIdTimestamp, IndexMetaData indexMetaData, int shardId, boolean split,
boolean hasNested) throws IOException {
// clean target directory (if previous recovery attempt failed) and create a fresh segment file with the proper lucene version
Lucene.cleanLuceneIndex(target);
assert sources.length > 0;
final int luceneIndexCreatedVersionMajor = Lucene.readSegmentInfos(sources[0]).getIndexCreatedVersionMajor();
new SegmentInfos(luceneIndexCreatedVersionMajor).commit(target);
final Directory hardLinkOrCopyTarget = new org.apache.lucene.store.HardlinkCopyDirectoryWrapper(target);
IndexWriterConfig iwc = new IndexWriterConfig(null)
.setCommitOnClose(false)
// we don't want merges to happen here - we call maybe merge on the engine
// later once we stared it up otherwise we would need to wait for it here
// we also don't specify a codec here and merges should use the engines for this index
.setMergePolicy(NoMergePolicy.INSTANCE)
.setOpenMode(IndexWriterConfig.OpenMode.CREATE);
.setOpenMode(IndexWriterConfig.OpenMode.APPEND);
if (indexSort != null) {
iwc.setIndexSort(indexSort);
}

View File

@ -423,6 +423,73 @@ public class FullClusterRestartIT extends ESRestTestCase {
assertEquals(numDocs, totalHits);
}
public void testShrinkAfterUpgrade() throws IOException {
String shrunkenIndex = index + "_shrunk";
int numDocs;
if (runningAgainstOldCluster) {
XContentBuilder mappingsAndSettings = jsonBuilder();
mappingsAndSettings.startObject();
{
mappingsAndSettings.startObject("mappings");
mappingsAndSettings.startObject("doc");
mappingsAndSettings.startObject("properties");
{
mappingsAndSettings.startObject("field");
mappingsAndSettings.field("type", "text");
mappingsAndSettings.endObject();
}
mappingsAndSettings.endObject();
mappingsAndSettings.endObject();
mappingsAndSettings.endObject();
}
mappingsAndSettings.endObject();
client().performRequest("PUT", "/" + index, Collections.emptyMap(),
new StringEntity(mappingsAndSettings.string(), ContentType.APPLICATION_JSON));
numDocs = randomIntBetween(512, 1024);
indexRandomDocuments(numDocs, true, true, i -> {
return JsonXContent.contentBuilder().startObject()
.field("field", "value")
.endObject();
});
} else {
String updateSettingsRequestBody = "{\"settings\": {\"index.blocks.write\": true}}";
Response rsp = client().performRequest("PUT", "/" + index + "/_settings", Collections.emptyMap(),
new StringEntity(updateSettingsRequestBody, ContentType.APPLICATION_JSON));
assertEquals(200, rsp.getStatusLine().getStatusCode());
String shrinkIndexRequestBody = "{\"settings\": {\"index.number_of_shards\": 1}}";
rsp = client().performRequest("PUT", "/" + index + "/_shrink/" + shrunkenIndex, Collections.emptyMap(),
new StringEntity(shrinkIndexRequestBody, ContentType.APPLICATION_JSON));
assertEquals(200, rsp.getStatusLine().getStatusCode());
numDocs = countOfIndexedRandomDocuments();
}
Response rsp = client().performRequest("POST", "/_refresh");
assertEquals(200, rsp.getStatusLine().getStatusCode());
Map<?, ?> response = toMap(client().performRequest("GET", "/" + index + "/_search"));
assertNoFailures(response);
int totalShards = (int) XContentMapValues.extractValue("_shards.total", response);
assertThat(totalShards, greaterThan(1));
int successfulShards = (int) XContentMapValues.extractValue("_shards.successful", response);
assertEquals(totalShards, successfulShards);
int totalHits = (int) XContentMapValues.extractValue("hits.total", response);
assertEquals(numDocs, totalHits);
if (runningAgainstOldCluster == false) {
response = toMap(client().performRequest("GET", "/" + shrunkenIndex + "/_search"));
assertNoFailures(response);
totalShards = (int) XContentMapValues.extractValue("_shards.total", response);
assertEquals(1, totalShards);
successfulShards = (int) XContentMapValues.extractValue("_shards.successful", response);
assertEquals(1, successfulShards);
totalHits = (int) XContentMapValues.extractValue("hits.total", response);
assertEquals(numDocs, totalHits);
}
}
void assertBasicSearchWorks(int count) throws IOException {
logger.info("--> testing basic search");
Map<String, Object> response = toMap(client().performRequest("GET", "/" + index + "/_search"));