* Name Snapshot Data Blobs by UUID * There is no functional reason why we need incremental naming for these files but * As explained in #38941 it is a possible source of corrupting the repository * It wastes API calls for the list operation * Is just needless complication * Since we store the exact names of the data blobs in all the metadata anyway, we can make this change without any BwC considerations * Even on the worst case scenario of a downgrade the functionality would continue working since the incremental names wouldn't conflict with the uuids and the number parsing for finding the next incremental name suppresses the exception when encountring a non-numeric value after the double underscore prefix
This commit is contained in:
parent
31d5a5aa36
commit
cd830b53e2
|
@ -1053,41 +1053,6 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* Generates blob name
|
|
||||||
*
|
|
||||||
* @param generation the blob number
|
|
||||||
* @return the blob name
|
|
||||||
*/
|
|
||||||
protected String fileNameFromGeneration(long generation) {
|
|
||||||
return DATA_BLOB_PREFIX + Long.toString(generation, Character.MAX_RADIX);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Finds the next available blob number
|
|
||||||
*
|
|
||||||
* @param blobs list of blobs in the repository
|
|
||||||
* @return next available blob number
|
|
||||||
*/
|
|
||||||
protected long findLatestFileNameGeneration(Map<String, BlobMetaData> blobs) {
|
|
||||||
long generation = -1;
|
|
||||||
for (String name : blobs.keySet()) {
|
|
||||||
if (!name.startsWith(DATA_BLOB_PREFIX)) {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
name = canonicalName(name);
|
|
||||||
try {
|
|
||||||
long currentGen = Long.parseLong(name.substring(DATA_BLOB_PREFIX.length()), Character.MAX_RADIX);
|
|
||||||
if (currentGen > generation) {
|
|
||||||
generation = currentGen;
|
|
||||||
}
|
|
||||||
} catch (NumberFormatException e) {
|
|
||||||
logger.warn("file [{}] does not conform to the '{}' schema", name, DATA_BLOB_PREFIX);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return generation;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Loads all available snapshots in the repository
|
* Loads all available snapshots in the repository
|
||||||
*
|
*
|
||||||
|
@ -1180,7 +1145,6 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp
|
||||||
throw new IndexShardSnapshotFailedException(shardId, "failed to list blobs", e);
|
throw new IndexShardSnapshotFailedException(shardId, "failed to list blobs", e);
|
||||||
}
|
}
|
||||||
|
|
||||||
long generation = findLatestFileNameGeneration(blobs);
|
|
||||||
Tuple<BlobStoreIndexShardSnapshots, Integer> tuple = buildBlobStoreIndexShardSnapshots(blobs);
|
Tuple<BlobStoreIndexShardSnapshots, Integer> tuple = buildBlobStoreIndexShardSnapshots(blobs);
|
||||||
BlobStoreIndexShardSnapshots snapshots = tuple.v1();
|
BlobStoreIndexShardSnapshots snapshots = tuple.v1();
|
||||||
int fileListGeneration = tuple.v2();
|
int fileListGeneration = tuple.v2();
|
||||||
|
@ -1248,7 +1212,7 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp
|
||||||
indexIncrementalSize += md.length();
|
indexIncrementalSize += md.length();
|
||||||
// create a new FileInfo
|
// create a new FileInfo
|
||||||
BlobStoreIndexShardSnapshot.FileInfo snapshotFileInfo =
|
BlobStoreIndexShardSnapshot.FileInfo snapshotFileInfo =
|
||||||
new BlobStoreIndexShardSnapshot.FileInfo(fileNameFromGeneration(++generation), md, chunkSize());
|
new BlobStoreIndexShardSnapshot.FileInfo(DATA_BLOB_PREFIX + UUIDs.randomBase64UUID(), md, chunkSize());
|
||||||
indexCommitPointFiles.add(snapshotFileInfo);
|
indexCommitPointFiles.add(snapshotFileInfo);
|
||||||
filesToSnapshot.add(snapshotFileInfo);
|
filesToSnapshot.add(snapshotFileInfo);
|
||||||
} else {
|
} else {
|
||||||
|
|
Loading…
Reference in New Issue