FEATURE: Dynamic chunk size with uppy (#22061)

When we get to really big files, it's better to not have thousands
of small chunks, since we don't have a resume functionality if the
upload fails. Better to try upload less chunks even if those chunks
are bigger.

For example, with this change a 20GB file would go from 4000 chunks
of the default 5mb to 1000 chunks of the new 20mb size. Still a lot,
but perhaps more manageable.

This is somewhat experimental -- if we still don't see improvements
we can always change back.
This commit is contained in:
Martin Brennan 2023-06-12 17:47:29 +10:00 committed by GitHub
parent fca6c1836c
commit 7e0fcd1b42
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
1 changed files with 15 additions and 0 deletions

View File

@ -5,6 +5,7 @@ import { Promise } from "rsvp";
import { ajax } from "discourse/lib/ajax";
import AwsS3Multipart from "@uppy/aws-s3-multipart";
const RETRY_DELAYS = [0, 1000, 3000, 5000];
const MB = 1024 * 1024;
export default Mixin.create({
_useS3MultipartUploads() {
@ -21,6 +22,20 @@ export default Mixin.create({
limit: 10,
retryDelays: RETRY_DELAYS,
// When we get to really big files, it's better to not have thousands
// of small chunks, since we don't have a resume functionality if the
// upload fails. Better to try upload less chunks even if those chunks
// are bigger.
getChunkSize(file) {
if (file.size >= 500 * MB) {
return 20 * MB;
} else if (file.size >= 100 * MB) {
return 10 * MB;
} else {
return 5 * MB;
}
},
createMultipartUpload: this._createMultipartUpload,
prepareUploadParts: this._prepareUploadParts,
completeMultipartUpload: this._completeMultipartUpload,