DEV: Upgrade Uppy to v4 (#29397)

Key changes include:

- `@uppy/aws-s3-multipart` is now part of `@uppy/aws-s3`, and controlled with a boolean

- Some minor changes/renames to Uppy APIs

- Uppy has removed batch signing from their S3 multipart implementation. This commit implements a batching system outside of Uppy to avoid needing one-signing-request-per-part

- Reduces concurrent part uploads to 6, because S3 uses HTTP/1.1 and browsers limit concurrent connections to 6-per-host.

- Upstream drop-target implementation has changed slightly, so we now need `pointer-events: none` on the hover element
This commit is contained in:
David Taylor 2024-10-28 14:01:44 +00:00 committed by GitHub
parent 27c20eeacd
commit aa89acbda6
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
14 changed files with 515 additions and 1034 deletions

View File

@ -16,12 +16,6 @@
"dependencies": {
"@babel/core": "^7.26.0",
"@ember/string": "^4.0.0",
"@uppy/aws-s3": "3.0.6",
"@uppy/aws-s3-multipart": "3.1.3",
"@uppy/core": "3.0.4",
"@uppy/drop-target": "2.0.1",
"@uppy/utils": "5.4.3",
"@uppy/xhr-upload": "3.1.1",
"discourse-i18n": "workspace:1.0.0",
"ember-auto-import": "^2.8.1",
"ember-cli-babel": "^8.2.0",

View File

@ -342,10 +342,18 @@ export function displayErrorForUpload(data, siteSettings, fileName) {
if (didError) {
return;
}
} else if (data.body && data.status) {
} else if (data.responseText && data.status) {
let parsedBody = data.responseText;
if (typeof parsedBody === "string") {
try {
parsedBody = JSON.parse(parsedBody);
} catch (e) {
// ignore
}
}
const didError = displayErrorByResponseStatus(
data.status,
data.body,
parsedBody,
fileName,
siteSettings
);

View File

@ -117,7 +117,7 @@ export default class UppyComposerUpload {
this.#reset();
if (this.uppyWrapper.uppyInstance) {
this.uppyWrapper.uppyInstance.close();
this.uppyWrapper.uppyInstance.destroy();
this.uppyWrapper.uppyInstance = null;
}
@ -311,13 +311,9 @@ export default class UppyComposerUpload {
});
});
this.uppyWrapper.uppyInstance.on("upload", (data) => {
this.uppyWrapper.uppyInstance.on("upload", (uploadId, files) => {
run(() => {
this.uppyWrapper.addNeedProcessing(data.fileIDs.length);
const files = data.fileIDs.map((fileId) =>
this.uppyWrapper.uppyInstance.getFile(fileId)
);
this.uppyWrapper.addNeedProcessing(files.length);
this.composer.setProperties({
isProcessingUpload: true,
@ -605,6 +601,7 @@ export default class UppyComposerUpload {
#useXHRUploads() {
this.uppyWrapper.uppyInstance.use(XHRUpload, {
endpoint: getURL(`/uploads.json?client_id=${this.messageBus.clientId}`),
shouldRetry: () => false,
headers: () => ({
"X-CSRF-Token": this.session.csrfToken,
}),
@ -627,7 +624,7 @@ export default class UppyComposerUpload {
}
#resetUpload(file, opts) {
if (opts.removePlaceholder) {
if (opts.removePlaceholder && this.#placeholders[file.id]) {
this.appEvents.trigger(
`${this.composerEventPrefix}:replace-text`,
this.#placeholders[file.id].uploadPlaceholder,

View File

@ -1,12 +1,15 @@
import { setOwner } from "@ember/owner";
import { debounce } from "@ember/runloop";
import { service } from "@ember/service";
import AwsS3Multipart from "@uppy/aws-s3-multipart";
import AwsS3 from "@uppy/aws-s3";
import { Promise } from "rsvp";
import { ajax } from "discourse/lib/ajax";
const RETRY_DELAYS = [0, 1000, 3000, 5000];
const MB = 1024 * 1024;
const s3MultipartMeta = new WeakMap(); // file -> { attempts: { partNumber -> attempts }, signingErrorRaised: boolean, batchSigner: BatchSigner }
export default class UppyS3Multipart {
@service siteSettings;
@ -20,15 +23,17 @@ export default class UppyS3Multipart {
apply(uppyInstance) {
this.uppyInstance = uppyInstance;
this.uppyInstance.use(AwsS3Multipart, {
// controls how many simultaneous _chunks_ are uploaded, not files,
// which in turn controls the minimum number of chunks presigned
// in each batch (limit / 2)
//
// the default, and minimum, chunk size is 5mb. we can control the
// chunk size via getChunkSize(file), so we may want to increase
// the chunk size for larger files
limit: 10,
this.uppyInstance.use(AwsS3, {
// TODO: using multipart even for tiny files is not ideal. Now that uppy
// made multipart a simple boolean, rather than a separate plugin, we can
// consider combining our two S3 implementations and choose the strategy
// based on file size.
shouldUseMultipart: true,
// Number of concurrent part uploads. AWS uses http/1.1,
// which browsers limit to 6 concurrent connections per host.
limit: 6,
retryDelays: RETRY_DELAYS,
// When we get to really big files, it's better to not have thousands
@ -46,9 +51,9 @@ export default class UppyS3Multipart {
},
createMultipartUpload: this.#createMultipartUpload.bind(this),
prepareUploadParts: this.#prepareUploadParts.bind(this),
completeMultipartUpload: this.#completeMultipartUpload.bind(this),
abortMultipartUpload: this.#abortMultipartUpload.bind(this),
signPart: this.#signPart.bind(this),
// we will need a listParts function at some point when we want to
// resume multipart uploads; this is used by uppy to figure out
@ -89,54 +94,56 @@ export default class UppyS3Multipart {
});
}
#prepareUploadParts(file, partData) {
if (file.preparePartsRetryAttempts === undefined) {
file.preparePartsRetryAttempts = 0;
#getFileMeta(file) {
if (s3MultipartMeta.has(file)) {
return s3MultipartMeta.get(file);
}
return ajax(`${this.uploadRootPath}/batch-presign-multipart-parts.json`, {
type: "POST",
data: {
part_numbers: partData.parts.map((part) => part.number),
unique_identifier: file.meta.unique_identifier,
},
})
.then((data) => {
if (file.preparePartsRetryAttempts) {
delete file.preparePartsRetryAttempts;
this.uppyWrapper.debug.log(
`[uppy] Retrying batch fetch for ${file.id} was successful, continuing.`
);
}
return { presignedUrls: data.presigned_urls };
})
.catch((err) => {
const status = err.jqXHR.status;
// it is kind of ugly to have to track the retry attempts for
// the file based on the retry delays, but uppy's `retryable`
// function expects the rejected Promise data to be structured
// _just so_, and provides no interface for us to tell how many
// times the upload has been retried (which it tracks internally)
//
// if we exceed the attempts then there is no way that uppy will
// retry the upload once again, so in that case the alert can
// be safely shown to the user that their upload has failed.
if (file.preparePartsRetryAttempts < RETRY_DELAYS.length) {
file.preparePartsRetryAttempts += 1;
const attemptsLeft =
RETRY_DELAYS.length - file.preparePartsRetryAttempts + 1;
this.uppyWrapper.debug.log(
`[uppy] Fetching a batch of upload part URLs for ${file.id} failed with status ${status}, retrying ${attemptsLeft} more times...`
);
return Promise.reject({ source: { status } });
} else {
this.uppyWrapper.debug.log(
`[uppy] Fetching a batch of upload part URLs for ${file.id} failed too many times, throwing error.`
);
// uppy is inconsistent, an error here does not fire the upload-error event
this.handleUploadError(file, err);
}
});
const fileMeta = {
attempts: {},
signingErrorRaised: false,
batchSigner: new BatchSigner({
file,
uploadRootPath: this.uploadRootPath,
}),
};
s3MultipartMeta.set(file, fileMeta);
return fileMeta;
}
async #signPart(file, partData) {
const fileMeta = this.#getFileMeta(file);
fileMeta.attempts[partData.partNumber] ??= 0;
const thisPartAttempts = (fileMeta.attempts[partData.partNumber] += 1);
this.uppyWrapper.debug.log(
`[uppy] requesting signature for part ${partData.partNumber} (attempt ${thisPartAttempts})`
);
try {
const url = await fileMeta.batchSigner.signedUrlFor(partData);
this.uppyWrapper.debug.log(
`[uppy] signature for part ${partData.partNumber} obtained, continuing.`
);
return { url };
} catch (err) {
// Uppy doesn't properly bubble errors from failed #signPart, so we call
// the error handler ourselves after the last failed attempt
if (
!fileMeta.signingErrorRaised &&
thisPartAttempts >= RETRY_DELAYS.length
) {
this.uppyWrapper.debug.log(
`[uppy] Fetching a signed part URL for ${file.id} failed too many times, raising error.`
);
// uppy is inconsistent, an error here does not fire the upload-error event
this.handleUploadError(file, err);
fileMeta.signingErrorRaised = true;
}
throw err;
}
}
#completeMultipartUpload(file, data) {
@ -193,3 +200,78 @@ export default class UppyS3Multipart {
});
}
}
const BATCH_SIGNER_INITIAL_DEBOUNCE = 50;
const BATCH_SIGNER_REGULAR_DEBOUNCE = 500;
/**
* This class is responsible for batching requests to the server to sign
* parts of a multipart upload. It is used to avoid making a request for
* every single part, which would likely hit our rate limits.
*/
class BatchSigner {
pendingRequests = [];
#madeFirstRequest = false;
constructor({ file, uploadRootPath }) {
this.file = file;
this.uploadRootPath = uploadRootPath;
}
signedUrlFor(partData) {
const promise = new Promise((resolve, reject) => {
this.pendingRequests.push({
partData,
resolve,
reject,
});
});
this.#scheduleSigning();
return promise;
}
#scheduleSigning() {
debounce(
this,
this.#signParts,
this.#madeFirstRequest
? BATCH_SIGNER_REGULAR_DEBOUNCE
: BATCH_SIGNER_INITIAL_DEBOUNCE
);
}
async #signParts() {
if (this.pendingRequests.length === 0) {
return;
}
this.#madeFirstRequest = true;
const requests = this.pendingRequests;
this.pendingRequests = [];
try {
const result = await ajax(
`${this.uploadRootPath}/batch-presign-multipart-parts.json`,
{
type: "POST",
data: {
part_numbers: requests.map(
(request) => request.partData.partNumber
),
unique_identifier: this.file.meta.unique_identifier,
},
}
);
requests.forEach(({ partData, resolve }) => {
resolve(result.presigned_urls[partData.partNumber.toString()]);
});
} catch (err) {
// eslint-disable-next-line no-console
console.error("[uppy] failed to get part signatures", err);
requests.forEach(({ reject }) => reject(err));
return;
}
}
}

View File

@ -70,10 +70,8 @@ export default class UppyUploadDebugging {
return;
}
uppy.on("upload", (data) => {
data.fileIDs.forEach((fileId) =>
this.#performanceMark(`upload-${fileId}-start`)
);
uppy.on("upload", (uploadID, files) => {
files.forEach(({ id }) => this.#performanceMark(`upload-${id}-start`));
});
uppy.on("create-multipart", (fileId) => {

View File

@ -210,11 +210,8 @@ export default class UppyUpload {
this.uploadProgress = progress;
});
this.uppyWrapper.uppyInstance.on("upload", (data) => {
this.uppyWrapper.addNeedProcessing(data.fileIDs.length);
const files = data.fileIDs.map((fileId) =>
this.uppyWrapper.uppyInstance.getFile(fileId)
);
this.uppyWrapper.uppyInstance.on("upload", (uploadId, files) => {
this.uppyWrapper.addNeedProcessing(files.length);
this.processing = true;
this.cancellable = false;
files.forEach((file) => {
@ -287,6 +284,9 @@ export default class UppyUpload {
this.uppyWrapper.uppyInstance.on(
"upload-error",
(file, error, response) => {
if (response.aborted) {
return; // User cancelled the upload
}
this.#removeInProgressUpload(file.id);
displayErrorForUpload(response || error, this.siteSettings, file.name);
this.#reset();
@ -402,6 +402,7 @@ export default class UppyUpload {
#useXHRUploads() {
this.uppyWrapper.uppyInstance.use(XHRUpload, {
endpoint: this.#xhrUploadUrl(),
shouldRetry: () => false,
headers: () => ({
"X-CSRF-Token": this.session.csrfToken,
}),
@ -420,6 +421,7 @@ export default class UppyUpload {
#useS3Uploads() {
this.#usingS3Uploads = true;
this.uppyWrapper.uppyInstance.use(AwsS3, {
shouldUseMultipart: false,
getUploadParameters: (file) => {
const data = {
file_name: file.name,

View File

@ -60,12 +60,11 @@
"@types/jquery": "^3.5.32",
"@types/qunit": "^2.19.11",
"@types/rsvp": "^4.0.9",
"@uppy/aws-s3": "3.0.6",
"@uppy/aws-s3-multipart": "3.1.3",
"@uppy/core": "3.0.4",
"@uppy/drop-target": "2.0.1",
"@uppy/utils": "5.4.3",
"@uppy/xhr-upload": "3.1.1",
"@uppy/aws-s3": "^4.1.0",
"@uppy/core": "^4.2.2",
"@uppy/drop-target": "3.0.1",
"@uppy/utils": "^6.0.3",
"@uppy/xhr-upload": "^4.2.1",
"a11y-dialog": "8.1.1",
"admin": "workspace:1.0.0",
"autosize": "^6.0.1",

View File

@ -498,14 +498,6 @@ acceptance("Uppy Composer Attachment - Upload Error", function (needs) {
});
test("should show an error message for the failed upload", async function (assert) {
// Don't log the upload error
const stub = sinon
.stub(console, "error")
.withArgs(
sinon.match(/\[Uppy\]/),
sinon.match(/Failed to upload avatar\.png/)
);
await visit("/");
await click("#create-topic");
await fillIn(".d-editor-input", "The image:\n");
@ -513,7 +505,6 @@ acceptance("Uppy Composer Attachment - Upload Error", function (needs) {
const done = assert.async();
appEvents.on("composer:upload-error", async () => {
sinon.assert.calledOnce(stub);
await settled();
assert.strictEqual(
query(".dialog-body").textContent.trim(),

View File

@ -444,7 +444,7 @@ module("Unit | Utility | uploads", function (hooks) {
displayErrorForUpload(
{
status: 422,
body: { message: "upload failed" },
responseText: JSON.stringify({ message: "upload failed" }),
},
"test.png",
{ max_attachment_size_kb: 1024, max_image_size_kb: 1024 }

View File

@ -11,6 +11,7 @@
}
.uppy-is-drag-over {
box-shadow: 0 0px 52px 0 #ffffff, 0px 7px 33px 0 var(--tertiary-low);
pointer-events: none;
}
#custom_emoji.highlighted {
background: var(--tertiary-very-low);

View File

@ -26,6 +26,7 @@ html.ios-device.keyboard-visible body #main-outlet .full-page-chat {
left: 0;
background-color: rgba(0, 0, 0, 0.75);
z-index: z("header");
pointer-events: none;
&-content {
width: max-content;
display: flex;

View File

@ -10,6 +10,7 @@
justify-content: center;
display: flex;
background: rgba(var(--always-black-rgb), 0.85);
pointer-events: none;
.uppy-is-drag-over & {
visibility: visible;

View File

@ -120,9 +120,8 @@ module("Discourse Chat | Component | chat-composer-uploads", function (hooks) {
this.appEvents.on(
`upload-mixin:chat-composer-uploader:upload-cancelled`,
(fileId) => {
assert.strictEqual(
fileId.includes("uppy-avatar/"),
true,
assert.true(
fileId.includes("chat-composer-uploader-avatar/"),
"upload was cancelled"
);
done();

File diff suppressed because it is too large Load Diff