2019-04-29 20:27:42 -04:00
|
|
|
# frozen_string_literal: true
|
|
|
|
|
2022-07-27 22:27:38 -04:00
|
|
|
RSpec.describe UploadsController do
|
2023-12-12 18:53:19 -05:00
|
|
|
fab!(:user) { Fabricate(:user, refresh_auto_groups: true) }
|
2019-05-28 21:00:25 -04:00
|
|
|
|
2018-06-03 23:13:52 -04:00
|
|
|
describe "#create" do
|
2013-09-06 13:18:42 -04:00
|
|
|
it "requires you to be logged in" do
|
2018-06-03 23:13:52 -04:00
|
|
|
post "/uploads.json"
|
2018-01-11 22:15:10 -05:00
|
|
|
expect(response.status).to eq(403)
|
2013-04-02 19:17:17 -04:00
|
|
|
end
|
|
|
|
|
2022-07-27 12:14:14 -04:00
|
|
|
context "when logged in" do
|
2019-05-28 21:00:25 -04:00
|
|
|
before { sign_in(user) }
|
2013-04-02 19:17:17 -04:00
|
|
|
|
2020-05-18 19:09:36 -04:00
|
|
|
let(:logo_file) { file_from_fixtures("logo.png") }
|
|
|
|
let(:logo_filename) { File.basename(logo_file) }
|
2013-04-02 19:17:17 -04:00
|
|
|
|
2020-07-03 13:16:54 -04:00
|
|
|
let(:logo) { Rack::Test::UploadedFile.new(logo_file) }
|
|
|
|
let(:fake_jpg) { Rack::Test::UploadedFile.new(file_from_fixtures("fake.jpg")) }
|
|
|
|
let(:text_file) { Rack::Test::UploadedFile.new(File.new("#{Rails.root}/LICENSE.txt")) }
|
2013-04-02 19:17:17 -04:00
|
|
|
|
2021-07-12 22:22:00 -04:00
|
|
|
it "expects a type or upload_type" do
|
2018-06-03 23:13:52 -04:00
|
|
|
post "/uploads.json", params: { file: logo }
|
|
|
|
expect(response.status).to eq(400)
|
2021-07-12 22:22:00 -04:00
|
|
|
post "/uploads.json",
|
|
|
|
params: {
|
|
|
|
file: Rack::Test::UploadedFile.new(logo_file),
|
|
|
|
type: "avatar",
|
|
|
|
}
|
|
|
|
expect(response.status).to eq 200
|
|
|
|
post "/uploads.json",
|
|
|
|
params: {
|
|
|
|
file: Rack::Test::UploadedFile.new(logo_file),
|
|
|
|
upload_type: "avatar",
|
|
|
|
}
|
|
|
|
expect(response.status).to eq 200
|
2017-08-22 16:40:01 -04:00
|
|
|
end
|
|
|
|
|
2015-05-19 19:39:58 -04:00
|
|
|
it "is successful with an image" do
|
2018-06-03 23:13:52 -04:00
|
|
|
post "/uploads.json", params: { file: logo, type: "avatar" }
|
2015-05-19 19:39:58 -04:00
|
|
|
expect(response.status).to eq 200
|
2020-05-07 11:04:12 -04:00
|
|
|
expect(response.parsed_body["id"]).to be_present
|
2018-06-03 23:13:52 -04:00
|
|
|
expect(Jobs::CreateAvatarThumbnails.jobs.size).to eq(1)
|
2015-05-19 19:39:58 -04:00
|
|
|
end
|
2014-04-29 13:12:35 -04:00
|
|
|
|
2020-07-03 07:23:10 -04:00
|
|
|
it 'returns "raw" url for site settings' do
|
|
|
|
set_cdn_url "https://awesome.com"
|
2020-07-03 13:16:54 -04:00
|
|
|
|
|
|
|
upload = UploadCreator.new(logo_file, "logo.png").create_for(-1)
|
|
|
|
logo = Rack::Test::UploadedFile.new(file_from_fixtures("logo.png"))
|
|
|
|
|
2020-07-03 07:23:10 -04:00
|
|
|
post "/uploads.json", params: { file: logo, type: "site_setting", for_site_setting: "true" }
|
|
|
|
expect(response.status).to eq 200
|
2020-07-03 13:16:54 -04:00
|
|
|
expect(response.parsed_body["url"]).to eq(upload.url)
|
2020-07-03 07:23:10 -04:00
|
|
|
end
|
|
|
|
|
2020-07-01 21:06:14 -04:00
|
|
|
it "returns cdn url" do
|
|
|
|
set_cdn_url "https://awesome.com"
|
|
|
|
post "/uploads.json", params: { file: logo, type: "composer" }
|
|
|
|
expect(response.status).to eq 200
|
|
|
|
expect(response.parsed_body["url"]).to start_with("https://awesome.com/uploads/default/")
|
|
|
|
end
|
|
|
|
|
2015-05-19 19:39:58 -04:00
|
|
|
it "is successful with an attachment" do
|
2017-06-12 16:41:29 -04:00
|
|
|
SiteSetting.authorized_extensions = "*"
|
2015-05-25 11:59:00 -04:00
|
|
|
|
2018-06-03 23:13:52 -04:00
|
|
|
post "/uploads.json", params: { file: text_file, type: "composer" }
|
2015-05-19 19:39:58 -04:00
|
|
|
expect(response.status).to eq 200
|
2018-06-03 23:13:52 -04:00
|
|
|
|
|
|
|
expect(Jobs::CreateAvatarThumbnails.jobs.size).to eq(0)
|
2020-05-07 11:04:12 -04:00
|
|
|
id = response.parsed_body["id"]
|
2017-11-26 20:43:18 -05:00
|
|
|
expect(id).to be
|
2015-06-21 07:52:52 -04:00
|
|
|
end
|
|
|
|
|
2018-02-24 06:35:57 -05:00
|
|
|
it "is successful with api" do
|
2017-04-15 00:11:02 -04:00
|
|
|
SiteSetting.authorized_extensions = "*"
|
2018-06-03 23:13:52 -04:00
|
|
|
api_key = Fabricate(:api_key, user: user).key
|
2017-05-26 03:19:09 -04:00
|
|
|
|
2018-02-24 06:35:57 -05:00
|
|
|
url = "http://example.com/image.png"
|
|
|
|
png = File.read(Rails.root + "spec/fixtures/images/logo.png")
|
|
|
|
|
|
|
|
stub_request(:get, url).to_return(status: 200, body: png)
|
2015-06-21 07:52:52 -04:00
|
|
|
|
2020-04-06 18:55:44 -04:00
|
|
|
post "/uploads.json",
|
|
|
|
params: {
|
|
|
|
url: url,
|
|
|
|
type: "avatar",
|
|
|
|
},
|
|
|
|
headers: {
|
|
|
|
HTTP_API_KEY: api_key,
|
|
|
|
HTTP_API_USERNAME: user.username.downcase,
|
|
|
|
}
|
2015-06-21 07:52:52 -04:00
|
|
|
|
2020-05-07 11:04:12 -04:00
|
|
|
json = response.parsed_body
|
2015-06-21 07:52:52 -04:00
|
|
|
|
2018-06-03 23:13:52 -04:00
|
|
|
expect(response.status).to eq(200)
|
|
|
|
expect(Jobs::CreateAvatarThumbnails.jobs.size).to eq(1)
|
|
|
|
expect(json["id"]).to be_present
|
2017-08-22 16:40:01 -04:00
|
|
|
expect(json["short_url"]).to eq("upload://qUm0DGR49PAZshIi7HxMd3cAlzn.png")
|
2015-05-19 19:39:58 -04:00
|
|
|
end
|
2014-04-29 13:12:35 -04:00
|
|
|
|
2015-05-19 19:39:58 -04:00
|
|
|
it "correctly sets retain_hours for admins" do
|
2018-06-03 23:13:52 -04:00
|
|
|
sign_in(Fabricate(:admin))
|
2014-04-29 13:12:35 -04:00
|
|
|
|
2018-06-03 23:13:52 -04:00
|
|
|
post "/uploads.json", params: { file: logo, retain_hours: 100, type: "profile_background" }
|
2014-04-29 13:12:35 -04:00
|
|
|
|
2020-05-07 11:04:12 -04:00
|
|
|
id = response.parsed_body["id"]
|
2018-06-03 23:13:52 -04:00
|
|
|
expect(Jobs::CreateAvatarThumbnails.jobs.size).to eq(0)
|
2015-05-19 19:39:58 -04:00
|
|
|
expect(Upload.find(id).retain_hours).to eq(100)
|
2013-06-15 03:54:49 -04:00
|
|
|
end
|
2013-04-02 19:17:17 -04:00
|
|
|
|
2015-08-18 05:39:51 -04:00
|
|
|
it "requires a file" do
|
2018-06-03 23:13:52 -04:00
|
|
|
post "/uploads.json", params: { type: "composer" }
|
2015-08-18 05:39:51 -04:00
|
|
|
|
2018-06-03 23:13:52 -04:00
|
|
|
expect(Jobs::CreateAvatarThumbnails.jobs.size).to eq(0)
|
2020-05-07 11:04:12 -04:00
|
|
|
message = response.parsed_body
|
2017-11-26 20:43:18 -05:00
|
|
|
expect(response.status).to eq 422
|
|
|
|
expect(message["errors"]).to contain_exactly(I18n.t("upload.file_missing"))
|
2015-08-18 05:39:51 -04:00
|
|
|
end
|
|
|
|
|
2015-05-19 19:39:58 -04:00
|
|
|
it "properly returns errors" do
|
2018-06-03 23:13:52 -04:00
|
|
|
SiteSetting.authorized_extensions = "*"
|
2017-06-12 16:41:29 -04:00
|
|
|
SiteSetting.max_attachment_size_kb = 1
|
2013-04-02 19:17:17 -04:00
|
|
|
|
2018-06-03 23:13:52 -04:00
|
|
|
post "/uploads.json", params: { file: text_file, type: "avatar" }
|
2015-05-25 11:59:00 -04:00
|
|
|
|
2018-06-03 23:13:52 -04:00
|
|
|
expect(response.status).to eq(422)
|
|
|
|
expect(Jobs::CreateAvatarThumbnails.jobs.size).to eq(0)
|
2020-05-07 11:04:12 -04:00
|
|
|
errors = response.parsed_body["errors"]
|
FEATURE: Humanize file size error messages (#14398)
The file size error messages for max_image_size_kb and
max_attachment_size_kb are shown to the user in the KB
format, regardless of how large the limit is. Since we
are going to support uploading much larger files soon,
this KB-based limit soon becomes unfriendly to the end
user.
For example, if the max attachment size is set to 512000
KB, this is what the user sees:
> Sorry, the file you are trying to upload is too big (maximum
size is 512000KB)
This makes the user do math. In almost all file explorers that
a regular user would be familiar width, the file size is shown
in a format based on the maximum increment (e.g. KB, MB, GB).
This commit changes the behaviour to output a humanized file size
instead of the raw KB. For the above example, it would now say:
> Sorry, the file you are trying to upload is too big (maximum
size is 512 MB)
This humanization also handles decimals, e.g. 1536KB = 1.5 MB
2021-09-21 17:59:45 -04:00
|
|
|
expect(errors.first).to eq(
|
|
|
|
I18n.t("upload.attachments.too_large_humanized", max_size: "1 KB"),
|
|
|
|
)
|
2013-04-02 19:17:17 -04:00
|
|
|
end
|
|
|
|
|
2023-12-12 18:53:19 -05:00
|
|
|
it "ensures user belongs to uploaded_avatars_allowed_groups when uploading an avatar" do
|
|
|
|
SiteSetting.uploaded_avatars_allowed_groups = "13"
|
2018-06-03 23:13:52 -04:00
|
|
|
post "/uploads.json", params: { file: logo, type: "avatar" }
|
|
|
|
expect(response.status).to eq(422)
|
2023-12-12 18:53:19 -05:00
|
|
|
|
2024-01-25 01:28:26 -05:00
|
|
|
user.change_trust_level!(TrustLevel[3])
|
|
|
|
|
2023-12-12 18:53:19 -05:00
|
|
|
post "/uploads.json", params: { file: logo, type: "avatar" }
|
|
|
|
expect(response.status).to eq(200)
|
2015-11-12 04:26:45 -05:00
|
|
|
end
|
|
|
|
|
2021-02-08 05:04:33 -05:00
|
|
|
it "ensures discourse_connect_overrides_avatar is not enabled when uploading an avatar" do
|
|
|
|
SiteSetting.discourse_connect_overrides_avatar = true
|
2018-06-03 23:13:52 -04:00
|
|
|
post "/uploads.json", params: { file: logo, type: "avatar" }
|
|
|
|
expect(response.status).to eq(422)
|
2015-11-12 04:26:45 -05:00
|
|
|
end
|
|
|
|
|
2017-06-12 16:41:29 -04:00
|
|
|
it "allows staff to upload any file in PM" do
|
|
|
|
SiteSetting.authorized_extensions = "jpg"
|
|
|
|
SiteSetting.allow_staff_to_upload_any_file_in_pm = true
|
2018-06-03 23:13:52 -04:00
|
|
|
user.update_columns(moderator: true)
|
2017-06-12 16:41:29 -04:00
|
|
|
|
2018-06-03 23:13:52 -04:00
|
|
|
post "/uploads.json",
|
|
|
|
params: {
|
2017-11-26 20:43:18 -05:00
|
|
|
file: text_file,
|
|
|
|
type: "composer",
|
|
|
|
for_private_message: "true",
|
|
|
|
}
|
2017-06-12 16:41:29 -04:00
|
|
|
|
2018-06-07 04:11:09 -04:00
|
|
|
expect(response.status).to eq(200)
|
2020-05-07 11:04:12 -04:00
|
|
|
id = response.parsed_body["id"]
|
2018-11-14 02:03:02 -05:00
|
|
|
expect(Upload.last.id).to eq(id)
|
|
|
|
end
|
|
|
|
|
|
|
|
it "allows staff to upload supported images for site settings" do
|
|
|
|
SiteSetting.authorized_extensions = ""
|
|
|
|
user.update!(admin: true)
|
|
|
|
|
|
|
|
post "/uploads.json", params: { file: logo, type: "site_setting", for_site_setting: "true" }
|
|
|
|
|
|
|
|
expect(response.status).to eq(200)
|
2020-05-07 11:04:12 -04:00
|
|
|
id = response.parsed_body["id"]
|
2018-11-14 02:03:02 -05:00
|
|
|
|
|
|
|
upload = Upload.last
|
|
|
|
|
|
|
|
expect(upload.id).to eq(id)
|
2020-05-18 19:09:36 -04:00
|
|
|
expect(upload.original_filename).to eq(logo_filename)
|
2017-06-12 16:41:29 -04:00
|
|
|
end
|
|
|
|
|
2018-02-19 04:44:24 -05:00
|
|
|
it "respects `authorized_extensions_for_staff` setting when staff upload file" do
|
|
|
|
SiteSetting.authorized_extensions = ""
|
|
|
|
SiteSetting.authorized_extensions_for_staff = "*"
|
2018-06-03 23:13:52 -04:00
|
|
|
user.update_columns(moderator: true)
|
2018-02-19 04:44:24 -05:00
|
|
|
|
2018-06-03 23:13:52 -04:00
|
|
|
post "/uploads.json", params: { file: text_file, type: "composer" }
|
2018-02-19 04:44:24 -05:00
|
|
|
|
2018-06-07 04:11:09 -04:00
|
|
|
expect(response.status).to eq(200)
|
2020-05-07 11:04:12 -04:00
|
|
|
data = response.parsed_body
|
2018-06-03 23:13:52 -04:00
|
|
|
expect(data["id"]).to be_present
|
2018-02-19 04:44:24 -05:00
|
|
|
end
|
|
|
|
|
|
|
|
it "ignores `authorized_extensions_for_staff` setting when non-staff upload file" do
|
|
|
|
SiteSetting.authorized_extensions = ""
|
|
|
|
SiteSetting.authorized_extensions_for_staff = "*"
|
|
|
|
|
2018-06-03 23:13:52 -04:00
|
|
|
post "/uploads.json", params: { file: text_file, type: "composer" }
|
2018-02-19 04:44:24 -05:00
|
|
|
|
2020-05-07 11:04:12 -04:00
|
|
|
data = response.parsed_body
|
2018-02-19 04:44:24 -05:00
|
|
|
expect(data["errors"].first).to eq(I18n.t("upload.unauthorized", authorized_extensions: ""))
|
|
|
|
end
|
|
|
|
|
2015-12-21 10:08:14 -05:00
|
|
|
it "returns an error when it could not determine the dimensions of an image" do
|
2018-06-03 23:13:52 -04:00
|
|
|
post "/uploads.json", params: { file: fake_jpg, type: "composer" }
|
2015-12-21 10:08:14 -05:00
|
|
|
|
2018-06-03 23:13:52 -04:00
|
|
|
expect(response.status).to eq(422)
|
|
|
|
expect(Jobs::CreateAvatarThumbnails.jobs.size).to eq(0)
|
2020-05-07 11:04:12 -04:00
|
|
|
message = response.parsed_body["errors"]
|
2017-11-26 20:43:18 -05:00
|
|
|
expect(message).to contain_exactly(I18n.t("upload.images.size_not_found"))
|
2015-12-21 10:08:14 -05:00
|
|
|
end
|
2013-04-02 19:17:17 -04:00
|
|
|
end
|
|
|
|
end
|
|
|
|
|
2019-05-28 21:00:25 -04:00
|
|
|
def upload_file(file, folder = "images")
|
|
|
|
fake_logo = Rack::Test::UploadedFile.new(file_from_fixtures(file, folder))
|
|
|
|
SiteSetting.authorized_extensions = "*"
|
|
|
|
sign_in(user)
|
2018-06-03 23:13:52 -04:00
|
|
|
|
2019-05-28 21:00:25 -04:00
|
|
|
post "/uploads.json", params: { file: fake_logo, type: "composer" }
|
2018-06-03 23:13:52 -04:00
|
|
|
|
2019-05-28 21:00:25 -04:00
|
|
|
expect(response.status).to eq(200)
|
2019-05-14 20:42:17 -04:00
|
|
|
|
2020-05-07 11:04:12 -04:00
|
|
|
url = response.parsed_body["url"]
|
2019-05-28 21:00:25 -04:00
|
|
|
upload = Upload.get_from_url(url)
|
|
|
|
upload
|
|
|
|
end
|
2019-05-14 20:42:17 -04:00
|
|
|
|
2019-05-28 21:00:25 -04:00
|
|
|
describe "#show" do
|
|
|
|
let(:site) { "default" }
|
|
|
|
let(:sha) { Digest::SHA1.hexdigest("discourse") }
|
2015-05-19 06:31:12 -04:00
|
|
|
|
2019-05-14 16:36:54 -04:00
|
|
|
context "when using external storage" do
|
2019-05-14 20:42:17 -04:00
|
|
|
fab!(:upload) { upload_file("small.pdf", "pdf") }
|
|
|
|
|
2020-09-14 07:32:25 -04:00
|
|
|
before { setup_s3 }
|
2015-05-19 06:31:12 -04:00
|
|
|
|
2019-05-14 20:42:17 -04:00
|
|
|
it "returns 404 " do
|
|
|
|
upload = Fabricate(:upload_s3)
|
|
|
|
get "/uploads/#{site}/#{upload.sha1}.#{upload.extension}"
|
|
|
|
|
2019-05-14 16:36:54 -04:00
|
|
|
expect(response.response_code).to eq(404)
|
|
|
|
end
|
|
|
|
|
|
|
|
it "returns upload if url not migrated" do
|
2019-05-14 20:42:17 -04:00
|
|
|
get "/uploads/#{site}/#{upload.sha1}.#{upload.extension}"
|
|
|
|
|
2019-05-14 16:36:54 -04:00
|
|
|
expect(response.status).to eq(200)
|
|
|
|
end
|
2013-09-06 13:18:42 -04:00
|
|
|
end
|
|
|
|
|
2016-12-19 13:39:04 -05:00
|
|
|
it "returns 404 when the upload doesn't exist" do
|
2018-06-03 23:13:52 -04:00
|
|
|
get "/uploads/#{site}/#{sha}.pdf"
|
|
|
|
expect(response.status).to eq(404)
|
2013-09-06 13:18:42 -04:00
|
|
|
end
|
|
|
|
|
2019-01-29 16:47:25 -05:00
|
|
|
it "returns 404 when the path is nil" do
|
|
|
|
upload = upload_file("logo.png")
|
|
|
|
upload.update_column(:url, "invalid-url")
|
|
|
|
|
|
|
|
get "/uploads/#{site}/#{upload.sha1}.#{upload.extension}"
|
|
|
|
expect(response.status).to eq(404)
|
|
|
|
end
|
|
|
|
|
2013-09-06 13:18:42 -04:00
|
|
|
it "uses send_file" do
|
2018-06-03 23:13:52 -04:00
|
|
|
upload = upload_file("logo.png")
|
|
|
|
get "/uploads/#{site}/#{upload.sha1}.#{upload.extension}"
|
|
|
|
expect(response.status).to eq(200)
|
2019-04-30 02:58:18 -04:00
|
|
|
|
2019-08-07 11:00:43 -04:00
|
|
|
expect(response.headers["Content-Disposition"]).to eq(
|
2020-05-18 19:09:36 -04:00
|
|
|
%Q|attachment; filename="#{upload.original_filename}"; filename*=UTF-8''#{upload.original_filename}|,
|
|
|
|
)
|
2013-09-06 13:18:42 -04:00
|
|
|
end
|
|
|
|
|
2019-10-14 00:40:33 -04:00
|
|
|
it "returns 200 when js file" do
|
2019-10-15 19:39:31 -04:00
|
|
|
ActionDispatch::FileHandler.any_instance.stubs(:match?).returns(false)
|
2019-10-14 00:40:33 -04:00
|
|
|
upload = upload_file("test.js", "themes")
|
|
|
|
get upload.url
|
|
|
|
expect(response.status).to eq(200)
|
|
|
|
end
|
|
|
|
|
2018-08-19 22:41:46 -04:00
|
|
|
it "handles image without extension" do
|
2016-12-19 13:39:04 -05:00
|
|
|
SiteSetting.authorized_extensions = "*"
|
2018-06-03 23:13:52 -04:00
|
|
|
upload = upload_file("image_no_extension")
|
2016-12-19 13:39:04 -05:00
|
|
|
|
2018-06-03 23:13:52 -04:00
|
|
|
get "/uploads/#{site}/#{upload.sha1}.json"
|
|
|
|
expect(response.status).to eq(200)
|
2019-08-07 11:00:43 -04:00
|
|
|
expect(response.headers["Content-Disposition"]).to eq(
|
2020-05-18 19:09:36 -04:00
|
|
|
%Q|attachment; filename="#{upload.original_filename}"; filename*=UTF-8''#{upload.original_filename}|,
|
|
|
|
)
|
2018-08-19 22:41:46 -04:00
|
|
|
end
|
|
|
|
|
|
|
|
it "handles file without extension" do
|
|
|
|
SiteSetting.authorized_extensions = "*"
|
|
|
|
upload = upload_file("not_an_image")
|
|
|
|
|
|
|
|
get "/uploads/#{site}/#{upload.sha1}.json"
|
|
|
|
expect(response.status).to eq(200)
|
2019-08-07 11:00:43 -04:00
|
|
|
expect(response.headers["Content-Disposition"]).to eq(
|
2020-05-18 19:09:36 -04:00
|
|
|
%Q|attachment; filename="#{upload.original_filename}"; filename*=UTF-8''#{upload.original_filename}|,
|
|
|
|
)
|
2016-12-19 13:39:04 -05:00
|
|
|
end
|
|
|
|
|
2022-07-27 12:14:14 -04:00
|
|
|
context "when user is anonymous" do
|
|
|
|
it "returns 404" do
|
2019-04-18 12:58:39 -04:00
|
|
|
upload = upload_file("small.pdf", "pdf")
|
2019-05-28 21:00:25 -04:00
|
|
|
delete "/session/#{user.username}.json"
|
2015-05-19 06:31:12 -04:00
|
|
|
|
2018-06-03 23:13:52 -04:00
|
|
|
SiteSetting.prevent_anons_from_downloading_files = true
|
2019-06-05 23:27:24 -04:00
|
|
|
get "/uploads/#{site}/#{upload.sha1}.#{upload.extension}"
|
2018-06-03 23:13:52 -04:00
|
|
|
expect(response.status).to eq(404)
|
2014-09-09 12:40:11 -04:00
|
|
|
end
|
|
|
|
end
|
2013-09-06 13:18:42 -04:00
|
|
|
end
|
|
|
|
|
2019-05-28 21:00:25 -04:00
|
|
|
describe "#show_short" do
|
2019-12-11 08:21:41 -05:00
|
|
|
it "inlines only supported image files" do
|
|
|
|
upload = upload_file("smallest.png")
|
|
|
|
get upload.short_path, params: { inline: true }
|
|
|
|
expect(response.header["Content-Type"]).to eq("image/png")
|
|
|
|
expect(response.header["Content-Disposition"]).to include("inline;")
|
|
|
|
|
|
|
|
upload.update!(original_filename: "test.xml")
|
|
|
|
get upload.short_path, params: { inline: true }
|
|
|
|
expect(response.header["Content-Type"]).to eq("application/xml")
|
|
|
|
expect(response.header["Content-Disposition"]).to include("attachment;")
|
|
|
|
end
|
|
|
|
|
2019-05-28 21:00:25 -04:00
|
|
|
describe "local store" do
|
|
|
|
fab!(:image_upload) { upload_file("smallest.png") }
|
|
|
|
|
|
|
|
it "returns the right response" do
|
|
|
|
get image_upload.short_path
|
|
|
|
|
|
|
|
expect(response.status).to eq(200)
|
|
|
|
|
|
|
|
expect(response.headers["Content-Disposition"]).to include(
|
|
|
|
"attachment; filename=\"#{image_upload.original_filename}\"",
|
|
|
|
)
|
|
|
|
end
|
|
|
|
|
|
|
|
it "returns the right response when `inline` param is given" do
|
|
|
|
get "#{image_upload.short_path}?inline=1"
|
|
|
|
|
|
|
|
expect(response.status).to eq(200)
|
|
|
|
|
|
|
|
expect(response.headers["Content-Disposition"]).to include(
|
|
|
|
"inline; filename=\"#{image_upload.original_filename}\"",
|
|
|
|
)
|
|
|
|
end
|
|
|
|
|
|
|
|
it "returns the right response when base62 param is invalid " do
|
|
|
|
get "/uploads/short-url/12345.png"
|
|
|
|
|
|
|
|
expect(response.status).to eq(404)
|
|
|
|
end
|
|
|
|
|
2020-01-02 23:39:07 -05:00
|
|
|
it "returns uploads with underscore in extension correctly" do
|
|
|
|
fake_upload = upload_file("fake.not_image")
|
|
|
|
get fake_upload.short_path
|
|
|
|
|
|
|
|
expect(response.status).to eq(200)
|
|
|
|
end
|
|
|
|
|
2021-03-17 14:01:29 -04:00
|
|
|
it "returns uploads with a dash and uppercase in extension correctly" do
|
|
|
|
fake_upload = upload_file("fake.long-FileExtension")
|
|
|
|
get fake_upload.short_path
|
|
|
|
|
|
|
|
expect(response.status).to eq(200)
|
|
|
|
end
|
|
|
|
|
2019-05-28 21:00:25 -04:00
|
|
|
it "returns the right response when anon tries to download a file " \
|
2020-03-25 17:16:02 -04:00
|
|
|
"when prevent_anons_from_downloading_files is true" do
|
2019-05-28 21:00:25 -04:00
|
|
|
delete "/session/#{user.username}.json"
|
|
|
|
SiteSetting.prevent_anons_from_downloading_files = true
|
|
|
|
|
|
|
|
get image_upload.short_path
|
|
|
|
|
|
|
|
expect(response.status).to eq(404)
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
|
|
|
describe "s3 store" do
|
|
|
|
let(:upload) { Fabricate(:upload_s3) }
|
|
|
|
|
2020-09-14 07:32:25 -04:00
|
|
|
before { setup_s3 }
|
2019-05-28 21:00:25 -04:00
|
|
|
|
|
|
|
it "should redirect to the s3 URL" do
|
|
|
|
get upload.short_path
|
|
|
|
|
|
|
|
expect(response).to redirect_to(upload.url)
|
|
|
|
end
|
2020-01-15 22:50:27 -05:00
|
|
|
|
2022-09-28 19:24:33 -04:00
|
|
|
context "when upload is secure and secure uploads enabled" do
|
2020-01-15 22:50:27 -05:00
|
|
|
before do
|
2022-09-28 19:24:33 -04:00
|
|
|
SiteSetting.secure_uploads = true
|
2020-01-15 22:50:27 -05:00
|
|
|
upload.update(secure: true)
|
|
|
|
end
|
|
|
|
|
|
|
|
it "redirects to the signed_url_for_path" do
|
2020-03-25 17:16:02 -04:00
|
|
|
sign_in(user)
|
2020-03-10 18:22:26 -04:00
|
|
|
freeze_time
|
2020-01-15 22:50:27 -05:00
|
|
|
get upload.short_path
|
|
|
|
|
|
|
|
expect(response).to redirect_to(
|
|
|
|
Discourse.store.signed_url_for_path(Discourse.store.get_path_for_upload(upload)),
|
|
|
|
)
|
2020-09-28 22:12:03 -04:00
|
|
|
expect(response.header["Location"]).not_to include(
|
|
|
|
"response-content-disposition=attachment",
|
|
|
|
)
|
|
|
|
end
|
|
|
|
|
|
|
|
it "respects the force download (dl) param" do
|
|
|
|
sign_in(user)
|
|
|
|
freeze_time
|
|
|
|
get upload.short_path, params: { dl: "1" }
|
|
|
|
expect(response.header["Location"]).to include("response-content-disposition=attachment")
|
2020-01-15 22:50:27 -05:00
|
|
|
end
|
|
|
|
|
2020-05-18 10:00:41 -04:00
|
|
|
it "has the correct caching header" do
|
|
|
|
sign_in(user)
|
|
|
|
get upload.short_path
|
|
|
|
|
2022-05-25 19:53:01 -04:00
|
|
|
expected_max_age =
|
|
|
|
SiteSetting.s3_presigned_get_url_expires_after_seconds -
|
|
|
|
UploadsController::SECURE_REDIRECT_GRACE_SECONDS
|
2020-05-18 10:00:41 -04:00
|
|
|
expect(expected_max_age).to be > 0 # Sanity check that the constants haven't been set to broken values
|
|
|
|
|
|
|
|
expect(response.headers["Cache-Control"]).to eq("max-age=#{expected_max_age}, private")
|
|
|
|
end
|
|
|
|
|
2020-01-15 22:50:27 -05:00
|
|
|
it "raises invalid access if the user cannot access the upload access control post" do
|
2020-03-25 17:16:02 -04:00
|
|
|
sign_in(user)
|
2020-01-15 22:50:27 -05:00
|
|
|
post = Fabricate(:post)
|
|
|
|
post.topic.change_category_to_id(
|
|
|
|
Fabricate(:private_category, group: Fabricate(:group)).id,
|
|
|
|
)
|
|
|
|
upload.update(access_control_post: post)
|
|
|
|
|
|
|
|
get upload.short_path
|
|
|
|
expect(response.code).to eq("403")
|
|
|
|
end
|
|
|
|
end
|
2019-05-28 21:00:25 -04:00
|
|
|
end
|
|
|
|
end
|
|
|
|
|
2019-11-17 20:25:42 -05:00
|
|
|
describe "#show_secure" do
|
|
|
|
describe "local store" do
|
|
|
|
fab!(:image_upload) { upload_file("smallest.png") }
|
|
|
|
|
2022-09-28 19:24:33 -04:00
|
|
|
it "does not return secure uploads when using local store" do
|
|
|
|
secure_url = image_upload.url.sub("/uploads", "/secure-uploads")
|
2019-11-17 20:25:42 -05:00
|
|
|
get secure_url
|
|
|
|
|
|
|
|
expect(response.status).to eq(404)
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
|
|
|
describe "s3 store" do
|
|
|
|
let(:upload) { Fabricate(:upload_s3) }
|
2022-09-28 19:24:33 -04:00
|
|
|
let(:secure_url) { upload.url.sub(SiteSetting.Upload.absolute_base_url, "/secure-uploads") }
|
2020-01-15 22:50:27 -05:00
|
|
|
|
2019-11-17 20:25:42 -05:00
|
|
|
before do
|
2020-09-14 07:32:25 -04:00
|
|
|
setup_s3
|
2020-03-25 17:16:02 -04:00
|
|
|
SiteSetting.authorized_extensions = "*"
|
2022-09-28 19:24:33 -04:00
|
|
|
SiteSetting.secure_uploads = true
|
2019-11-17 20:25:42 -05:00
|
|
|
end
|
|
|
|
|
|
|
|
it "should return 404 for anonymous requests requests" do
|
|
|
|
get secure_url
|
|
|
|
expect(response.status).to eq(404)
|
|
|
|
end
|
|
|
|
|
|
|
|
it "should return signed url for legitimate request" do
|
2020-09-14 07:32:25 -04:00
|
|
|
sign_in(user)
|
2019-11-17 20:25:42 -05:00
|
|
|
get secure_url
|
|
|
|
|
|
|
|
expect(response.status).to eq(302)
|
|
|
|
expect(response.redirect_url).to match("Amz-Expires")
|
|
|
|
end
|
|
|
|
|
2022-09-28 19:24:33 -04:00
|
|
|
it "should return secure uploads URL when looking up urls" do
|
2019-11-17 20:25:42 -05:00
|
|
|
upload.update_column(:secure, true)
|
|
|
|
sign_in(user)
|
|
|
|
|
|
|
|
post "/uploads/lookup-urls.json", params: { short_urls: [upload.short_url] }
|
|
|
|
expect(response.status).to eq(200)
|
|
|
|
|
2020-05-07 11:04:12 -04:00
|
|
|
result = response.parsed_body
|
2022-09-28 19:24:33 -04:00
|
|
|
expect(result[0]["url"]).to match("secure-uploads")
|
2019-11-17 20:25:42 -05:00
|
|
|
end
|
2020-01-06 21:27:24 -05:00
|
|
|
|
2020-01-15 22:50:27 -05:00
|
|
|
context "when the upload cannot be found from the URL" do
|
|
|
|
it "returns a 404" do
|
2020-09-14 07:32:25 -04:00
|
|
|
sign_in(user)
|
2020-01-15 22:50:27 -05:00
|
|
|
upload.update(sha1: "test")
|
|
|
|
|
|
|
|
get secure_url
|
|
|
|
expect(response.status).to eq(404)
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
|
|
|
context "when the access_control_post_id has been set for the upload" do
|
|
|
|
let(:post) { Fabricate(:post) }
|
|
|
|
let!(:private_category) { Fabricate(:private_category, group: Fabricate(:group)) }
|
|
|
|
|
|
|
|
before { upload.update(access_control_post_id: post.id) }
|
|
|
|
|
2023-01-09 01:12:10 -05:00
|
|
|
context "when the user is anon" do
|
|
|
|
it "should return signed url for public posts" do
|
2020-01-15 22:50:27 -05:00
|
|
|
get secure_url
|
|
|
|
expect(response.status).to eq(302)
|
|
|
|
expect(response.redirect_url).to match("Amz-Expires")
|
|
|
|
end
|
2023-01-09 01:12:10 -05:00
|
|
|
|
|
|
|
it "should return 403 for deleted posts" do
|
|
|
|
post.trash!
|
|
|
|
get secure_url
|
|
|
|
expect(response.status).to eq(403)
|
|
|
|
end
|
|
|
|
|
|
|
|
context "when the user does not have access to the post via guardian" do
|
|
|
|
before { post.topic.change_category_to_id(private_category.id) }
|
|
|
|
|
|
|
|
it "returns a 403" do
|
|
|
|
get secure_url
|
|
|
|
expect(response.status).to eq(403)
|
|
|
|
end
|
|
|
|
end
|
2020-01-15 22:50:27 -05:00
|
|
|
end
|
|
|
|
|
2023-01-09 01:12:10 -05:00
|
|
|
context "when the user is logged in" do
|
|
|
|
before { sign_in(user) }
|
2020-01-15 22:50:27 -05:00
|
|
|
|
2023-01-09 01:12:10 -05:00
|
|
|
context "when the user has access to the post via guardian" do
|
|
|
|
it "should return signed url for legitimate request" do
|
|
|
|
get secure_url
|
|
|
|
expect(response.status).to eq(302)
|
|
|
|
expect(response.redirect_url).to match("Amz-Expires")
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
|
|
|
context "when the user does not have access to the post via guardian" do
|
|
|
|
before { post.topic.change_category_to_id(private_category.id) }
|
|
|
|
|
|
|
|
it "returns a 403" do
|
|
|
|
get secure_url
|
|
|
|
expect(response.status).to eq(403)
|
|
|
|
end
|
2020-01-15 22:50:27 -05:00
|
|
|
end
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
2020-03-25 17:16:02 -04:00
|
|
|
context "when the upload is an attachment file" do
|
|
|
|
before { upload.update(original_filename: "test.pdf") }
|
|
|
|
it "redirects to the signed_url_for_path" do
|
2020-09-14 07:32:25 -04:00
|
|
|
sign_in(user)
|
2020-03-25 17:16:02 -04:00
|
|
|
get secure_url
|
|
|
|
expect(response.status).to eq(302)
|
|
|
|
expect(response.redirect_url).to match("Amz-Expires")
|
|
|
|
end
|
|
|
|
|
|
|
|
context "when the user does not have access to the access control post via guardian" do
|
|
|
|
let(:post) { Fabricate(:post) }
|
|
|
|
let!(:private_category) { Fabricate(:private_category, group: Fabricate(:group)) }
|
|
|
|
|
|
|
|
before do
|
|
|
|
post.topic.change_category_to_id(private_category.id)
|
|
|
|
upload.update(access_control_post_id: post.id)
|
|
|
|
end
|
|
|
|
|
|
|
|
it "returns a 403" do
|
2020-09-14 07:32:25 -04:00
|
|
|
sign_in(user)
|
2020-03-25 17:16:02 -04:00
|
|
|
get secure_url
|
|
|
|
expect(response.status).to eq(403)
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
2023-12-07 00:38:38 -05:00
|
|
|
context "when login is required and user is not signed in" do
|
|
|
|
let(:post) { Fabricate(:post) }
|
|
|
|
|
|
|
|
before do
|
|
|
|
SiteSetting.login_required = true
|
|
|
|
upload.update(access_control_post_id: post.id)
|
|
|
|
end
|
|
|
|
|
|
|
|
it "returns a 403" do
|
|
|
|
get secure_url
|
|
|
|
expect(response.status).to eq(403)
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
2020-03-25 17:16:02 -04:00
|
|
|
context "when the prevent_anons_from_downloading_files setting is enabled and the user is anon" do
|
|
|
|
before { SiteSetting.prevent_anons_from_downloading_files = true }
|
2020-09-14 07:32:25 -04:00
|
|
|
|
2020-03-25 17:16:02 -04:00
|
|
|
it "returns a 404" do
|
|
|
|
delete "/session/#{user.username}.json"
|
|
|
|
get secure_url
|
|
|
|
expect(response.status).to eq(404)
|
|
|
|
end
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
2022-09-28 19:24:33 -04:00
|
|
|
context "when secure uploads is disabled" do
|
|
|
|
before { SiteSetting.secure_uploads = false }
|
2020-01-06 21:27:24 -05:00
|
|
|
|
2020-01-06 23:02:17 -05:00
|
|
|
context "if the upload is secure false, meaning the ACL is probably public" do
|
|
|
|
before { upload.update(secure: false) }
|
2020-01-06 21:27:24 -05:00
|
|
|
|
2020-01-06 23:02:17 -05:00
|
|
|
it "should redirect to the regular show route" do
|
2022-09-28 19:24:33 -04:00
|
|
|
secure_url = upload.url.sub(SiteSetting.Upload.absolute_base_url, "/secure-uploads")
|
2020-01-06 23:02:17 -05:00
|
|
|
sign_in(user)
|
|
|
|
get secure_url
|
|
|
|
|
|
|
|
expect(response.status).to eq(302)
|
|
|
|
expect(response.redirect_url).to eq(Discourse.store.cdn_url(upload.url))
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
|
|
|
context "if the upload is secure true, meaning the ACL is probably private" do
|
|
|
|
before { upload.update(secure: true) }
|
|
|
|
|
|
|
|
it "should redirect to the presigned URL still otherwise we will get a 403" do
|
2022-09-28 19:24:33 -04:00
|
|
|
secure_url = upload.url.sub(SiteSetting.Upload.absolute_base_url, "/secure-uploads")
|
2020-01-06 23:02:17 -05:00
|
|
|
sign_in(user)
|
|
|
|
get secure_url
|
|
|
|
|
|
|
|
expect(response.status).to eq(302)
|
|
|
|
expect(response.redirect_url).to match("Amz-Expires")
|
|
|
|
end
|
2020-01-06 21:27:24 -05:00
|
|
|
end
|
|
|
|
end
|
2019-11-17 20:25:42 -05:00
|
|
|
end
|
|
|
|
end
|
|
|
|
|
2018-06-03 23:13:52 -04:00
|
|
|
describe "#lookup_urls" do
|
|
|
|
it "can look up long urls" do
|
2019-05-28 21:00:25 -04:00
|
|
|
sign_in(user)
|
2018-06-03 23:13:52 -04:00
|
|
|
upload = Fabricate(:upload)
|
|
|
|
|
|
|
|
post "/uploads/lookup-urls.json", params: { short_urls: [upload.short_url] }
|
|
|
|
expect(response.status).to eq(200)
|
|
|
|
|
2020-05-07 11:04:12 -04:00
|
|
|
result = response.parsed_body
|
2018-06-03 23:13:52 -04:00
|
|
|
expect(result[0]["url"]).to eq(upload.url)
|
2019-05-28 21:00:25 -04:00
|
|
|
expect(result[0]["short_path"]).to eq(upload.short_path)
|
2018-06-03 23:13:52 -04:00
|
|
|
end
|
2019-11-17 20:25:42 -05:00
|
|
|
|
2022-09-28 19:24:33 -04:00
|
|
|
describe "secure uploads" do
|
2019-11-17 20:25:42 -05:00
|
|
|
let(:upload) { Fabricate(:upload_s3, secure: true) }
|
|
|
|
|
|
|
|
before do
|
2020-09-14 07:32:25 -04:00
|
|
|
setup_s3
|
2019-11-17 20:25:42 -05:00
|
|
|
SiteSetting.authorized_extensions = "pdf|png"
|
2022-09-28 19:24:33 -04:00
|
|
|
SiteSetting.secure_uploads = true
|
2019-11-17 20:25:42 -05:00
|
|
|
end
|
|
|
|
|
2022-09-28 19:24:33 -04:00
|
|
|
it "returns secure url for a secure uploads upload" do
|
2019-11-17 20:25:42 -05:00
|
|
|
sign_in(user)
|
|
|
|
|
|
|
|
post "/uploads/lookup-urls.json", params: { short_urls: [upload.short_url] }
|
|
|
|
expect(response.status).to eq(200)
|
|
|
|
|
2020-05-07 11:04:12 -04:00
|
|
|
result = response.parsed_body
|
2022-09-28 19:24:33 -04:00
|
|
|
expect(result[0]["url"]).to match("/secure-uploads")
|
2019-11-17 20:25:42 -05:00
|
|
|
expect(result[0]["short_path"]).to eq(upload.short_path)
|
|
|
|
end
|
|
|
|
|
2020-03-03 18:11:08 -05:00
|
|
|
it "returns secure urls for non-media uploads" do
|
2019-11-17 20:25:42 -05:00
|
|
|
upload.update!(original_filename: "not-an-image.pdf", extension: "pdf")
|
|
|
|
sign_in(user)
|
|
|
|
|
|
|
|
post "/uploads/lookup-urls.json", params: { short_urls: [upload.short_url] }
|
|
|
|
expect(response.status).to eq(200)
|
|
|
|
|
2020-05-07 11:04:12 -04:00
|
|
|
result = response.parsed_body
|
2022-09-28 19:24:33 -04:00
|
|
|
expect(result[0]["url"]).to match("/secure-uploads")
|
2019-11-17 20:25:42 -05:00
|
|
|
expect(result[0]["short_path"]).to eq(upload.short_path)
|
|
|
|
end
|
|
|
|
end
|
2018-06-03 23:13:52 -04:00
|
|
|
end
|
2019-02-20 21:13:37 -05:00
|
|
|
|
|
|
|
describe "#metadata" do
|
2023-11-09 17:47:59 -05:00
|
|
|
fab!(:upload)
|
2019-02-20 21:13:37 -05:00
|
|
|
|
|
|
|
describe "when url is missing" do
|
|
|
|
it "should return the right response" do
|
|
|
|
post "/uploads/lookup-metadata.json"
|
|
|
|
|
|
|
|
expect(response.status).to eq(403)
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
|
|
|
describe "when not signed in" do
|
|
|
|
it "should return the right response" do
|
|
|
|
post "/uploads/lookup-metadata.json", params: { url: upload.url }
|
|
|
|
|
|
|
|
expect(response.status).to eq(403)
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
|
|
|
describe "when signed in" do
|
2019-05-28 21:00:25 -04:00
|
|
|
before { sign_in(user) }
|
2019-02-20 21:13:37 -05:00
|
|
|
|
|
|
|
describe "when url is invalid" do
|
|
|
|
it "should return the right response" do
|
|
|
|
post "/uploads/lookup-metadata.json", params: { url: "abc" }
|
|
|
|
|
|
|
|
expect(response.status).to eq(404)
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
|
|
|
it "should return the right response" do
|
|
|
|
post "/uploads/lookup-metadata.json", params: { url: upload.url }
|
|
|
|
|
|
|
|
expect(response.status).to eq(200)
|
|
|
|
|
2020-05-07 11:04:12 -04:00
|
|
|
result = response.parsed_body
|
2019-02-20 21:13:37 -05:00
|
|
|
|
|
|
|
expect(result["original_filename"]).to eq(upload.original_filename)
|
|
|
|
expect(result["width"]).to eq(upload.width)
|
|
|
|
expect(result["height"]).to eq(upload.height)
|
|
|
|
expect(result["human_filesize"]).to eq(upload.human_filesize)
|
|
|
|
end
|
|
|
|
end
|
|
|
|
end
|
2021-07-27 18:42:25 -04:00
|
|
|
|
|
|
|
describe "#generate_presigned_put" do
|
|
|
|
context "when the store is external" do
|
|
|
|
before do
|
2021-08-10 00:30:22 -04:00
|
|
|
sign_in(user)
|
2021-07-27 18:42:25 -04:00
|
|
|
SiteSetting.enable_direct_s3_uploads = true
|
|
|
|
setup_s3
|
|
|
|
end
|
|
|
|
|
|
|
|
it "errors if the correct params are not provided" do
|
|
|
|
post "/uploads/generate-presigned-put.json", params: { file_name: "test.png" }
|
|
|
|
expect(response.status).to eq(400)
|
|
|
|
post "/uploads/generate-presigned-put.json", params: { type: "card_background" }
|
|
|
|
expect(response.status).to eq(400)
|
|
|
|
end
|
|
|
|
|
|
|
|
it "generates a presigned URL and creates an external upload stub" do
|
FEATURE: Uppy direct S3 multipart uploads in composer (#14051)
This pull request introduces the endpoints required, and the JavaScript functionality in the `ComposerUppyUpload` mixin, for direct S3 multipart uploads. There are four new endpoints in the uploads controller:
* `create-multipart.json` - Creates the multipart upload in S3 along with an `ExternalUploadStub` record, storing information about the file in the same way as `generate-presigned-put.json` does for regular direct S3 uploads
* `batch-presign-multipart-parts.json` - Takes a list of part numbers and the unique identifier for an `ExternalUploadStub` record, and generates the presigned URLs for those parts if the multipart upload still exists and if the user has permission to access that upload
* `complete-multipart.json` - Completes the multipart upload in S3. Needs the full list of part numbers and their associated ETags which are returned when the part is uploaded to the presigned URL above. Only works if the user has permission to access the associated `ExternalUploadStub` record and the multipart upload still exists.
After we confirm the upload is complete in S3, we go through the regular `UploadCreator` flow, the same as `complete-external-upload.json`, and promote the temporary upload S3 into a full `Upload` record, moving it to its final destination.
* `abort-multipart.json` - Aborts the multipart upload on S3 and destroys the `ExternalUploadStub` record if the user has permission to access that upload.
Also added are a few new columns to `ExternalUploadStub`:
* multipart - Whether or not this is a multipart upload
* external_upload_identifier - The "upload ID" for an S3 multipart upload
* filesize - The size of the file when the `create-multipart.json` or `generate-presigned-put.json` is called. This is used for validation.
When the user completes a direct S3 upload, either regular or multipart, we take the `filesize` that was captured when the `ExternalUploadStub` was first created and compare it with the final `Content-Length` size of the file where it is stored in S3. Then, if the two do not match, we throw an error, delete the file on S3, and ban the user from uploading files for N (default 5) minutes. This would only happen if the user uploads a different file than what they first specified, or in the case of multipart uploads uploaded larger chunks than needed. This is done to prevent abuse of S3 storage by bad actors.
Also included in this PR is an update to vendor/uppy.js. This has been built locally from the latest uppy source at https://github.com/transloadit/uppy/commit/d613b849a6591083f8a0968aa8d66537e231bbcd. This must be done so that I can get my multipart upload changes into Discourse. When the Uppy team cuts a proper release, we can bump the package.json versions instead.
2021-08-24 18:46:54 -04:00
|
|
|
post "/uploads/generate-presigned-put.json",
|
|
|
|
params: {
|
|
|
|
file_name: "test.png",
|
|
|
|
type: "card_background",
|
|
|
|
file_size: 1024,
|
|
|
|
}
|
2021-07-27 18:42:25 -04:00
|
|
|
expect(response.status).to eq(200)
|
|
|
|
|
|
|
|
result = response.parsed_body
|
|
|
|
|
|
|
|
external_upload_stub =
|
|
|
|
ExternalUploadStub.where(
|
|
|
|
unique_identifier: result["unique_identifier"],
|
|
|
|
original_filename: "test.png",
|
|
|
|
created_by: user,
|
FEATURE: Uppy direct S3 multipart uploads in composer (#14051)
This pull request introduces the endpoints required, and the JavaScript functionality in the `ComposerUppyUpload` mixin, for direct S3 multipart uploads. There are four new endpoints in the uploads controller:
* `create-multipart.json` - Creates the multipart upload in S3 along with an `ExternalUploadStub` record, storing information about the file in the same way as `generate-presigned-put.json` does for regular direct S3 uploads
* `batch-presign-multipart-parts.json` - Takes a list of part numbers and the unique identifier for an `ExternalUploadStub` record, and generates the presigned URLs for those parts if the multipart upload still exists and if the user has permission to access that upload
* `complete-multipart.json` - Completes the multipart upload in S3. Needs the full list of part numbers and their associated ETags which are returned when the part is uploaded to the presigned URL above. Only works if the user has permission to access the associated `ExternalUploadStub` record and the multipart upload still exists.
After we confirm the upload is complete in S3, we go through the regular `UploadCreator` flow, the same as `complete-external-upload.json`, and promote the temporary upload S3 into a full `Upload` record, moving it to its final destination.
* `abort-multipart.json` - Aborts the multipart upload on S3 and destroys the `ExternalUploadStub` record if the user has permission to access that upload.
Also added are a few new columns to `ExternalUploadStub`:
* multipart - Whether or not this is a multipart upload
* external_upload_identifier - The "upload ID" for an S3 multipart upload
* filesize - The size of the file when the `create-multipart.json` or `generate-presigned-put.json` is called. This is used for validation.
When the user completes a direct S3 upload, either regular or multipart, we take the `filesize` that was captured when the `ExternalUploadStub` was first created and compare it with the final `Content-Length` size of the file where it is stored in S3. Then, if the two do not match, we throw an error, delete the file on S3, and ban the user from uploading files for N (default 5) minutes. This would only happen if the user uploads a different file than what they first specified, or in the case of multipart uploads uploaded larger chunks than needed. This is done to prevent abuse of S3 storage by bad actors.
Also included in this PR is an update to vendor/uppy.js. This has been built locally from the latest uppy source at https://github.com/transloadit/uppy/commit/d613b849a6591083f8a0968aa8d66537e231bbcd. This must be done so that I can get my multipart upload changes into Discourse. When the Uppy team cuts a proper release, we can bump the package.json versions instead.
2021-08-24 18:46:54 -04:00
|
|
|
upload_type: "card_background",
|
|
|
|
filesize: 1024,
|
2021-07-27 18:42:25 -04:00
|
|
|
)
|
|
|
|
expect(external_upload_stub.exists?).to eq(true)
|
|
|
|
expect(result["key"]).to include(FileStore::S3Store::TEMPORARY_UPLOAD_PREFIX)
|
|
|
|
expect(result["url"]).to include(FileStore::S3Store::TEMPORARY_UPLOAD_PREFIX)
|
|
|
|
expect(result["url"]).to include("Amz-Expires")
|
|
|
|
end
|
|
|
|
|
2023-08-22 21:18:33 -04:00
|
|
|
it "includes accepted metadata in the response when provided" do
|
2021-09-27 08:45:05 -04:00
|
|
|
post "/uploads/generate-presigned-put.json",
|
|
|
|
**{
|
2021-07-27 18:42:25 -04:00
|
|
|
params: {
|
|
|
|
file_name: "test.png",
|
FEATURE: Uppy direct S3 multipart uploads in composer (#14051)
This pull request introduces the endpoints required, and the JavaScript functionality in the `ComposerUppyUpload` mixin, for direct S3 multipart uploads. There are four new endpoints in the uploads controller:
* `create-multipart.json` - Creates the multipart upload in S3 along with an `ExternalUploadStub` record, storing information about the file in the same way as `generate-presigned-put.json` does for regular direct S3 uploads
* `batch-presign-multipart-parts.json` - Takes a list of part numbers and the unique identifier for an `ExternalUploadStub` record, and generates the presigned URLs for those parts if the multipart upload still exists and if the user has permission to access that upload
* `complete-multipart.json` - Completes the multipart upload in S3. Needs the full list of part numbers and their associated ETags which are returned when the part is uploaded to the presigned URL above. Only works if the user has permission to access the associated `ExternalUploadStub` record and the multipart upload still exists.
After we confirm the upload is complete in S3, we go through the regular `UploadCreator` flow, the same as `complete-external-upload.json`, and promote the temporary upload S3 into a full `Upload` record, moving it to its final destination.
* `abort-multipart.json` - Aborts the multipart upload on S3 and destroys the `ExternalUploadStub` record if the user has permission to access that upload.
Also added are a few new columns to `ExternalUploadStub`:
* multipart - Whether or not this is a multipart upload
* external_upload_identifier - The "upload ID" for an S3 multipart upload
* filesize - The size of the file when the `create-multipart.json` or `generate-presigned-put.json` is called. This is used for validation.
When the user completes a direct S3 upload, either regular or multipart, we take the `filesize` that was captured when the `ExternalUploadStub` was first created and compare it with the final `Content-Length` size of the file where it is stored in S3. Then, if the two do not match, we throw an error, delete the file on S3, and ban the user from uploading files for N (default 5) minutes. This would only happen if the user uploads a different file than what they first specified, or in the case of multipart uploads uploaded larger chunks than needed. This is done to prevent abuse of S3 storage by bad actors.
Also included in this PR is an update to vendor/uppy.js. This has been built locally from the latest uppy source at https://github.com/transloadit/uppy/commit/d613b849a6591083f8a0968aa8d66537e231bbcd. This must be done so that I can get my multipart upload changes into Discourse. When the Uppy team cuts a proper release, we can bump the package.json versions instead.
2021-08-24 18:46:54 -04:00
|
|
|
file_size: 1024,
|
2021-07-27 18:42:25 -04:00
|
|
|
type: "card_background",
|
|
|
|
metadata: {
|
|
|
|
"sha1-checksum" => "testing",
|
|
|
|
"blah" => "wontbeincluded",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
expect(response.status).to eq(200)
|
|
|
|
|
|
|
|
result = response.parsed_body
|
2023-08-22 21:18:33 -04:00
|
|
|
expect(result["url"]).not_to include("&x-amz-meta-sha1-checksum=testing")
|
2021-07-27 18:42:25 -04:00
|
|
|
expect(result["url"]).not_to include("&x-amz-meta-blah=wontbeincluded")
|
2023-08-22 21:18:33 -04:00
|
|
|
expect(result["signed_headers"]).to eq(
|
|
|
|
"x-amz-acl" => "private",
|
|
|
|
"x-amz-meta-sha1-checksum" => "testing",
|
|
|
|
)
|
2021-07-27 18:42:25 -04:00
|
|
|
end
|
|
|
|
|
DEV: Introduce S3 transfer acceleration for uploads behind hidden setting (#24238)
This commit adds an `enable_s3_transfer_acceleration` site setting,
which is hidden to begin with. We are adding this because in certain
regions, using https://aws.amazon.com/s3/transfer-acceleration/ can
drastically speed up uploads, sometimes as much as 70% in certain
regions depending on the target bucket region. This is important for
us because we have direct S3 multipart uploads enabled everywhere
on our hosting.
To start, we only want this on the uploads bucket, not the backup one.
Also, this will accelerate both uploads **and** downloads, depending
on whether a presigned URL is used for downloading. This is the case
when secure uploads is enabled, not anywhere else at this time. To
enable the S3 acceleration on downloads more generally would be a
more in-depth change, since we currently store S3 Upload record URLs
like this:
```
url: "//test.s3.dualstack.us-east-2.amazonaws.com/original/2X/6/123456.png"
```
For acceleration, `s3.dualstack` would need to be changed to `s3-accelerate.dualstack`
here.
Note that for this to have any effect, Transfer Acceleration must be enabled
on the S3 bucket used for uploads per https://docs.aws.amazon.com/AmazonS3/latest/userguide/transfer-acceleration-examples.html.
2023-11-06 20:50:40 -05:00
|
|
|
context "when enable_s3_transfer_acceleration is true" do
|
|
|
|
before { SiteSetting.enable_s3_transfer_acceleration = true }
|
|
|
|
|
|
|
|
it "uses the s3-accelerate endpoint for presigned URLs" do
|
|
|
|
post "/uploads/generate-presigned-put.json",
|
|
|
|
**{
|
|
|
|
params: {
|
|
|
|
file_name: "test.png",
|
|
|
|
file_size: 1024,
|
|
|
|
type: "card_background",
|
|
|
|
metadata: {
|
|
|
|
"sha1-checksum" => "testing",
|
|
|
|
"blah" => "wontbeincluded",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
expect(response.status).to eq(200)
|
|
|
|
|
|
|
|
result = response.parsed_body
|
|
|
|
expect(result["url"]).to include("s3-accelerate")
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
2023-06-15 22:44:35 -04:00
|
|
|
describe "rate limiting" do
|
|
|
|
before { RateLimiter.enable }
|
2023-03-08 00:27:17 -05:00
|
|
|
|
2023-06-15 22:44:35 -04:00
|
|
|
use_redis_snapshotting
|
|
|
|
|
|
|
|
it "rate limits" do
|
|
|
|
SiteSetting.max_presigned_put_per_minute = 1
|
2021-08-10 00:30:22 -04:00
|
|
|
|
2023-06-15 22:44:35 -04:00
|
|
|
post "/uploads/generate-presigned-put.json",
|
|
|
|
params: {
|
|
|
|
file_name: "test.png",
|
|
|
|
type: "card_background",
|
|
|
|
file_size: 1024,
|
|
|
|
}
|
|
|
|
post "/uploads/generate-presigned-put.json",
|
|
|
|
params: {
|
|
|
|
file_name: "test.png",
|
|
|
|
type: "card_background",
|
|
|
|
file_size: 1024,
|
|
|
|
}
|
|
|
|
|
|
|
|
expect(response.status).to eq(429)
|
|
|
|
end
|
2021-07-27 18:42:25 -04:00
|
|
|
end
|
|
|
|
end
|
|
|
|
|
|
|
|
context "when the store is not external" do
|
2021-08-10 00:30:22 -04:00
|
|
|
before { sign_in(user) }
|
|
|
|
|
2021-07-27 18:42:25 -04:00
|
|
|
it "returns 404" do
|
FEATURE: Uppy direct S3 multipart uploads in composer (#14051)
This pull request introduces the endpoints required, and the JavaScript functionality in the `ComposerUppyUpload` mixin, for direct S3 multipart uploads. There are four new endpoints in the uploads controller:
* `create-multipart.json` - Creates the multipart upload in S3 along with an `ExternalUploadStub` record, storing information about the file in the same way as `generate-presigned-put.json` does for regular direct S3 uploads
* `batch-presign-multipart-parts.json` - Takes a list of part numbers and the unique identifier for an `ExternalUploadStub` record, and generates the presigned URLs for those parts if the multipart upload still exists and if the user has permission to access that upload
* `complete-multipart.json` - Completes the multipart upload in S3. Needs the full list of part numbers and their associated ETags which are returned when the part is uploaded to the presigned URL above. Only works if the user has permission to access the associated `ExternalUploadStub` record and the multipart upload still exists.
After we confirm the upload is complete in S3, we go through the regular `UploadCreator` flow, the same as `complete-external-upload.json`, and promote the temporary upload S3 into a full `Upload` record, moving it to its final destination.
* `abort-multipart.json` - Aborts the multipart upload on S3 and destroys the `ExternalUploadStub` record if the user has permission to access that upload.
Also added are a few new columns to `ExternalUploadStub`:
* multipart - Whether or not this is a multipart upload
* external_upload_identifier - The "upload ID" for an S3 multipart upload
* filesize - The size of the file when the `create-multipart.json` or `generate-presigned-put.json` is called. This is used for validation.
When the user completes a direct S3 upload, either regular or multipart, we take the `filesize` that was captured when the `ExternalUploadStub` was first created and compare it with the final `Content-Length` size of the file where it is stored in S3. Then, if the two do not match, we throw an error, delete the file on S3, and ban the user from uploading files for N (default 5) minutes. This would only happen if the user uploads a different file than what they first specified, or in the case of multipart uploads uploaded larger chunks than needed. This is done to prevent abuse of S3 storage by bad actors.
Also included in this PR is an update to vendor/uppy.js. This has been built locally from the latest uppy source at https://github.com/transloadit/uppy/commit/d613b849a6591083f8a0968aa8d66537e231bbcd. This must be done so that I can get my multipart upload changes into Discourse. When the Uppy team cuts a proper release, we can bump the package.json versions instead.
2021-08-24 18:46:54 -04:00
|
|
|
post "/uploads/generate-presigned-put.json",
|
|
|
|
params: {
|
|
|
|
file_name: "test.png",
|
|
|
|
type: "card_background",
|
|
|
|
file_size: 1024,
|
|
|
|
}
|
|
|
|
expect(response.status).to eq(404)
|
|
|
|
end
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
|
|
|
describe "#create_multipart" do
|
|
|
|
context "when the store is external" do
|
|
|
|
let(:mock_multipart_upload_id) do
|
|
|
|
"ibZBv_75gd9r8lH_gqXatLdxMVpAlj6CFTR.OwyF3953YdwbcQnMA2BLGn8Lx12fQNICtMw5KyteFeHw.Sjng--"
|
2023-01-09 06:18:21 -05:00
|
|
|
end
|
FEATURE: Direct S3 multipart uploads for backups (#14736)
This PR introduces a new `enable_experimental_backup_uploads` site setting (default false and hidden), which when enabled alongside `enable_direct_s3_uploads` will allow for direct S3 multipart uploads of backup .tar.gz files.
To make multipart external uploads work with both the S3BackupStore and the S3Store, I've had to move several methods out of S3Store and into S3Helper, including:
* presigned_url
* create_multipart
* abort_multipart
* complete_multipart
* presign_multipart_part
* list_multipart_parts
Then, S3Store and S3BackupStore either delegate directly to S3Helper or have their own special methods to call S3Helper for these methods. FileStore.temporary_upload_path has also removed its dependence on upload_path, and can now be used interchangeably between the stores. A similar change was made in the frontend as well, moving the multipart related JS code out of ComposerUppyUpload and into a mixin of its own, so it can also be used by UppyUploadMixin.
Some changes to ExternalUploadManager had to be made here as well. The backup direct uploads do not need an Upload record made for them in the database, so they can be moved to their final S3 resting place when completing the multipart upload.
This changeset is not perfect; it introduces some special cases in UploadController to handle backups that was previously in BackupController, because UploadController is where the multipart routes are located. A subsequent pull request will pull these routes into a module or some other sharing pattern, along with hooks, so the backup controller and the upload controller (and any future controllers that may need them) can include these routes in a nicer way.
2021-11-10 17:25:31 -05:00
|
|
|
let(:test_bucket_prefix) { "test_#{ENV["TEST_ENV_NUMBER"].presence || "0"}" }
|
FEATURE: Uppy direct S3 multipart uploads in composer (#14051)
This pull request introduces the endpoints required, and the JavaScript functionality in the `ComposerUppyUpload` mixin, for direct S3 multipart uploads. There are four new endpoints in the uploads controller:
* `create-multipart.json` - Creates the multipart upload in S3 along with an `ExternalUploadStub` record, storing information about the file in the same way as `generate-presigned-put.json` does for regular direct S3 uploads
* `batch-presign-multipart-parts.json` - Takes a list of part numbers and the unique identifier for an `ExternalUploadStub` record, and generates the presigned URLs for those parts if the multipart upload still exists and if the user has permission to access that upload
* `complete-multipart.json` - Completes the multipart upload in S3. Needs the full list of part numbers and their associated ETags which are returned when the part is uploaded to the presigned URL above. Only works if the user has permission to access the associated `ExternalUploadStub` record and the multipart upload still exists.
After we confirm the upload is complete in S3, we go through the regular `UploadCreator` flow, the same as `complete-external-upload.json`, and promote the temporary upload S3 into a full `Upload` record, moving it to its final destination.
* `abort-multipart.json` - Aborts the multipart upload on S3 and destroys the `ExternalUploadStub` record if the user has permission to access that upload.
Also added are a few new columns to `ExternalUploadStub`:
* multipart - Whether or not this is a multipart upload
* external_upload_identifier - The "upload ID" for an S3 multipart upload
* filesize - The size of the file when the `create-multipart.json` or `generate-presigned-put.json` is called. This is used for validation.
When the user completes a direct S3 upload, either regular or multipart, we take the `filesize` that was captured when the `ExternalUploadStub` was first created and compare it with the final `Content-Length` size of the file where it is stored in S3. Then, if the two do not match, we throw an error, delete the file on S3, and ban the user from uploading files for N (default 5) minutes. This would only happen if the user uploads a different file than what they first specified, or in the case of multipart uploads uploaded larger chunks than needed. This is done to prevent abuse of S3 storage by bad actors.
Also included in this PR is an update to vendor/uppy.js. This has been built locally from the latest uppy source at https://github.com/transloadit/uppy/commit/d613b849a6591083f8a0968aa8d66537e231bbcd. This must be done so that I can get my multipart upload changes into Discourse. When the Uppy team cuts a proper release, we can bump the package.json versions instead.
2021-08-24 18:46:54 -04:00
|
|
|
|
|
|
|
before do
|
|
|
|
sign_in(user)
|
|
|
|
SiteSetting.enable_direct_s3_uploads = true
|
|
|
|
setup_s3
|
|
|
|
end
|
|
|
|
|
|
|
|
it "errors if the correct params are not provided" do
|
|
|
|
post "/uploads/create-multipart.json", params: { file_name: "test.png" }
|
|
|
|
expect(response.status).to eq(400)
|
|
|
|
post "/uploads/create-multipart.json", params: { upload_type: "composer" }
|
|
|
|
expect(response.status).to eq(400)
|
|
|
|
end
|
|
|
|
|
|
|
|
it "returns 422 when the create request errors" do
|
|
|
|
FileStore::S3Store
|
|
|
|
.any_instance
|
|
|
|
.stubs(:create_multipart)
|
|
|
|
.raises(Aws::S3::Errors::ServiceError.new({}, "test"))
|
2021-09-27 08:45:05 -04:00
|
|
|
post "/uploads/create-multipart.json",
|
FEATURE: Uppy direct S3 multipart uploads in composer (#14051)
This pull request introduces the endpoints required, and the JavaScript functionality in the `ComposerUppyUpload` mixin, for direct S3 multipart uploads. There are four new endpoints in the uploads controller:
* `create-multipart.json` - Creates the multipart upload in S3 along with an `ExternalUploadStub` record, storing information about the file in the same way as `generate-presigned-put.json` does for regular direct S3 uploads
* `batch-presign-multipart-parts.json` - Takes a list of part numbers and the unique identifier for an `ExternalUploadStub` record, and generates the presigned URLs for those parts if the multipart upload still exists and if the user has permission to access that upload
* `complete-multipart.json` - Completes the multipart upload in S3. Needs the full list of part numbers and their associated ETags which are returned when the part is uploaded to the presigned URL above. Only works if the user has permission to access the associated `ExternalUploadStub` record and the multipart upload still exists.
After we confirm the upload is complete in S3, we go through the regular `UploadCreator` flow, the same as `complete-external-upload.json`, and promote the temporary upload S3 into a full `Upload` record, moving it to its final destination.
* `abort-multipart.json` - Aborts the multipart upload on S3 and destroys the `ExternalUploadStub` record if the user has permission to access that upload.
Also added are a few new columns to `ExternalUploadStub`:
* multipart - Whether or not this is a multipart upload
* external_upload_identifier - The "upload ID" for an S3 multipart upload
* filesize - The size of the file when the `create-multipart.json` or `generate-presigned-put.json` is called. This is used for validation.
When the user completes a direct S3 upload, either regular or multipart, we take the `filesize` that was captured when the `ExternalUploadStub` was first created and compare it with the final `Content-Length` size of the file where it is stored in S3. Then, if the two do not match, we throw an error, delete the file on S3, and ban the user from uploading files for N (default 5) minutes. This would only happen if the user uploads a different file than what they first specified, or in the case of multipart uploads uploaded larger chunks than needed. This is done to prevent abuse of S3 storage by bad actors.
Also included in this PR is an update to vendor/uppy.js. This has been built locally from the latest uppy source at https://github.com/transloadit/uppy/commit/d613b849a6591083f8a0968aa8d66537e231bbcd. This must be done so that I can get my multipart upload changes into Discourse. When the Uppy team cuts a proper release, we can bump the package.json versions instead.
2021-08-24 18:46:54 -04:00
|
|
|
**{ params: { file_name: "test.png", file_size: 1024, upload_type: "composer" } }
|
|
|
|
expect(response.status).to eq(422)
|
|
|
|
end
|
|
|
|
|
|
|
|
it "returns 422 when the file is an attachment and it's too big" do
|
FEATURE: Humanize file size error messages (#14398)
The file size error messages for max_image_size_kb and
max_attachment_size_kb are shown to the user in the KB
format, regardless of how large the limit is. Since we
are going to support uploading much larger files soon,
this KB-based limit soon becomes unfriendly to the end
user.
For example, if the max attachment size is set to 512000
KB, this is what the user sees:
> Sorry, the file you are trying to upload is too big (maximum
size is 512000KB)
This makes the user do math. In almost all file explorers that
a regular user would be familiar width, the file size is shown
in a format based on the maximum increment (e.g. KB, MB, GB).
This commit changes the behaviour to output a humanized file size
instead of the raw KB. For the above example, it would now say:
> Sorry, the file you are trying to upload is too big (maximum
size is 512 MB)
This humanization also handles decimals, e.g. 1536KB = 1.5 MB
2021-09-21 17:59:45 -04:00
|
|
|
SiteSetting.max_attachment_size_kb = 1024
|
2021-09-27 08:45:05 -04:00
|
|
|
post "/uploads/create-multipart.json",
|
FEATURE: Uppy direct S3 multipart uploads in composer (#14051)
This pull request introduces the endpoints required, and the JavaScript functionality in the `ComposerUppyUpload` mixin, for direct S3 multipart uploads. There are four new endpoints in the uploads controller:
* `create-multipart.json` - Creates the multipart upload in S3 along with an `ExternalUploadStub` record, storing information about the file in the same way as `generate-presigned-put.json` does for regular direct S3 uploads
* `batch-presign-multipart-parts.json` - Takes a list of part numbers and the unique identifier for an `ExternalUploadStub` record, and generates the presigned URLs for those parts if the multipart upload still exists and if the user has permission to access that upload
* `complete-multipart.json` - Completes the multipart upload in S3. Needs the full list of part numbers and their associated ETags which are returned when the part is uploaded to the presigned URL above. Only works if the user has permission to access the associated `ExternalUploadStub` record and the multipart upload still exists.
After we confirm the upload is complete in S3, we go through the regular `UploadCreator` flow, the same as `complete-external-upload.json`, and promote the temporary upload S3 into a full `Upload` record, moving it to its final destination.
* `abort-multipart.json` - Aborts the multipart upload on S3 and destroys the `ExternalUploadStub` record if the user has permission to access that upload.
Also added are a few new columns to `ExternalUploadStub`:
* multipart - Whether or not this is a multipart upload
* external_upload_identifier - The "upload ID" for an S3 multipart upload
* filesize - The size of the file when the `create-multipart.json` or `generate-presigned-put.json` is called. This is used for validation.
When the user completes a direct S3 upload, either regular or multipart, we take the `filesize` that was captured when the `ExternalUploadStub` was first created and compare it with the final `Content-Length` size of the file where it is stored in S3. Then, if the two do not match, we throw an error, delete the file on S3, and ban the user from uploading files for N (default 5) minutes. This would only happen if the user uploads a different file than what they first specified, or in the case of multipart uploads uploaded larger chunks than needed. This is done to prevent abuse of S3 storage by bad actors.
Also included in this PR is an update to vendor/uppy.js. This has been built locally from the latest uppy source at https://github.com/transloadit/uppy/commit/d613b849a6591083f8a0968aa8d66537e231bbcd. This must be done so that I can get my multipart upload changes into Discourse. When the Uppy team cuts a proper release, we can bump the package.json versions instead.
2021-08-24 18:46:54 -04:00
|
|
|
**{ params: { file_name: "test.zip", file_size: 9_999_999, upload_type: "composer" } }
|
|
|
|
expect(response.status).to eq(422)
|
FEATURE: Humanize file size error messages (#14398)
The file size error messages for max_image_size_kb and
max_attachment_size_kb are shown to the user in the KB
format, regardless of how large the limit is. Since we
are going to support uploading much larger files soon,
this KB-based limit soon becomes unfriendly to the end
user.
For example, if the max attachment size is set to 512000
KB, this is what the user sees:
> Sorry, the file you are trying to upload is too big (maximum
size is 512000KB)
This makes the user do math. In almost all file explorers that
a regular user would be familiar width, the file size is shown
in a format based on the maximum increment (e.g. KB, MB, GB).
This commit changes the behaviour to output a humanized file size
instead of the raw KB. For the above example, it would now say:
> Sorry, the file you are trying to upload is too big (maximum
size is 512 MB)
This humanization also handles decimals, e.g. 1536KB = 1.5 MB
2021-09-21 17:59:45 -04:00
|
|
|
expect(response.body).to include(
|
|
|
|
I18n.t("upload.attachments.too_large_humanized", max_size: "1 MB"),
|
|
|
|
)
|
FEATURE: Uppy direct S3 multipart uploads in composer (#14051)
This pull request introduces the endpoints required, and the JavaScript functionality in the `ComposerUppyUpload` mixin, for direct S3 multipart uploads. There are four new endpoints in the uploads controller:
* `create-multipart.json` - Creates the multipart upload in S3 along with an `ExternalUploadStub` record, storing information about the file in the same way as `generate-presigned-put.json` does for regular direct S3 uploads
* `batch-presign-multipart-parts.json` - Takes a list of part numbers and the unique identifier for an `ExternalUploadStub` record, and generates the presigned URLs for those parts if the multipart upload still exists and if the user has permission to access that upload
* `complete-multipart.json` - Completes the multipart upload in S3. Needs the full list of part numbers and their associated ETags which are returned when the part is uploaded to the presigned URL above. Only works if the user has permission to access the associated `ExternalUploadStub` record and the multipart upload still exists.
After we confirm the upload is complete in S3, we go through the regular `UploadCreator` flow, the same as `complete-external-upload.json`, and promote the temporary upload S3 into a full `Upload` record, moving it to its final destination.
* `abort-multipart.json` - Aborts the multipart upload on S3 and destroys the `ExternalUploadStub` record if the user has permission to access that upload.
Also added are a few new columns to `ExternalUploadStub`:
* multipart - Whether or not this is a multipart upload
* external_upload_identifier - The "upload ID" for an S3 multipart upload
* filesize - The size of the file when the `create-multipart.json` or `generate-presigned-put.json` is called. This is used for validation.
When the user completes a direct S3 upload, either regular or multipart, we take the `filesize` that was captured when the `ExternalUploadStub` was first created and compare it with the final `Content-Length` size of the file where it is stored in S3. Then, if the two do not match, we throw an error, delete the file on S3, and ban the user from uploading files for N (default 5) minutes. This would only happen if the user uploads a different file than what they first specified, or in the case of multipart uploads uploaded larger chunks than needed. This is done to prevent abuse of S3 storage by bad actors.
Also included in this PR is an update to vendor/uppy.js. This has been built locally from the latest uppy source at https://github.com/transloadit/uppy/commit/d613b849a6591083f8a0968aa8d66537e231bbcd. This must be done so that I can get my multipart upload changes into Discourse. When the Uppy team cuts a proper release, we can bump the package.json versions instead.
2021-08-24 18:46:54 -04:00
|
|
|
end
|
|
|
|
|
2023-05-18 04:36:34 -04:00
|
|
|
it "returns 422 when the file is an gif and it's too big, since gifs cannot be resized on client" do
|
|
|
|
SiteSetting.max_image_size_kb = 1024
|
|
|
|
post "/uploads/create-multipart.json",
|
|
|
|
**{ params: { file_name: "test.gif", file_size: 9_999_999, upload_type: "composer" } }
|
|
|
|
expect(response.status).to eq(422)
|
|
|
|
expect(response.body).to include(
|
|
|
|
I18n.t("upload.images.too_large_humanized", max_size: "1 MB"),
|
|
|
|
)
|
|
|
|
end
|
|
|
|
|
2022-03-06 21:39:33 -05:00
|
|
|
it "returns a sensible error if the file size is 0 bytes" do
|
|
|
|
SiteSetting.authorized_extensions = "*"
|
|
|
|
stub_create_multipart_request
|
|
|
|
|
|
|
|
post "/uploads/create-multipart.json",
|
|
|
|
**{ params: { file_name: "test.zip", file_size: 0, upload_type: "composer" } }
|
|
|
|
|
|
|
|
expect(response.status).to eq(422)
|
|
|
|
expect(response.body).to include(I18n.t("upload.size_zero_failure"))
|
|
|
|
end
|
|
|
|
|
FEATURE: Uppy direct S3 multipart uploads in composer (#14051)
This pull request introduces the endpoints required, and the JavaScript functionality in the `ComposerUppyUpload` mixin, for direct S3 multipart uploads. There are four new endpoints in the uploads controller:
* `create-multipart.json` - Creates the multipart upload in S3 along with an `ExternalUploadStub` record, storing information about the file in the same way as `generate-presigned-put.json` does for regular direct S3 uploads
* `batch-presign-multipart-parts.json` - Takes a list of part numbers and the unique identifier for an `ExternalUploadStub` record, and generates the presigned URLs for those parts if the multipart upload still exists and if the user has permission to access that upload
* `complete-multipart.json` - Completes the multipart upload in S3. Needs the full list of part numbers and their associated ETags which are returned when the part is uploaded to the presigned URL above. Only works if the user has permission to access the associated `ExternalUploadStub` record and the multipart upload still exists.
After we confirm the upload is complete in S3, we go through the regular `UploadCreator` flow, the same as `complete-external-upload.json`, and promote the temporary upload S3 into a full `Upload` record, moving it to its final destination.
* `abort-multipart.json` - Aborts the multipart upload on S3 and destroys the `ExternalUploadStub` record if the user has permission to access that upload.
Also added are a few new columns to `ExternalUploadStub`:
* multipart - Whether or not this is a multipart upload
* external_upload_identifier - The "upload ID" for an S3 multipart upload
* filesize - The size of the file when the `create-multipart.json` or `generate-presigned-put.json` is called. This is used for validation.
When the user completes a direct S3 upload, either regular or multipart, we take the `filesize` that was captured when the `ExternalUploadStub` was first created and compare it with the final `Content-Length` size of the file where it is stored in S3. Then, if the two do not match, we throw an error, delete the file on S3, and ban the user from uploading files for N (default 5) minutes. This would only happen if the user uploads a different file than what they first specified, or in the case of multipart uploads uploaded larger chunks than needed. This is done to prevent abuse of S3 storage by bad actors.
Also included in this PR is an update to vendor/uppy.js. This has been built locally from the latest uppy source at https://github.com/transloadit/uppy/commit/d613b849a6591083f8a0968aa8d66537e231bbcd. This must be done so that I can get my multipart upload changes into Discourse. When the Uppy team cuts a proper release, we can bump the package.json versions instead.
2021-08-24 18:46:54 -04:00
|
|
|
def stub_create_multipart_request
|
FEATURE: Direct S3 multipart uploads for backups (#14736)
This PR introduces a new `enable_experimental_backup_uploads` site setting (default false and hidden), which when enabled alongside `enable_direct_s3_uploads` will allow for direct S3 multipart uploads of backup .tar.gz files.
To make multipart external uploads work with both the S3BackupStore and the S3Store, I've had to move several methods out of S3Store and into S3Helper, including:
* presigned_url
* create_multipart
* abort_multipart
* complete_multipart
* presign_multipart_part
* list_multipart_parts
Then, S3Store and S3BackupStore either delegate directly to S3Helper or have their own special methods to call S3Helper for these methods. FileStore.temporary_upload_path has also removed its dependence on upload_path, and can now be used interchangeably between the stores. A similar change was made in the frontend as well, moving the multipart related JS code out of ComposerUppyUpload and into a mixin of its own, so it can also be used by UppyUploadMixin.
Some changes to ExternalUploadManager had to be made here as well. The backup direct uploads do not need an Upload record made for them in the database, so they can be moved to their final S3 resting place when completing the multipart upload.
This changeset is not perfect; it introduces some special cases in UploadController to handle backups that was previously in BackupController, because UploadController is where the multipart routes are located. A subsequent pull request will pull these routes into a module or some other sharing pattern, along with hooks, so the backup controller and the upload controller (and any future controllers that may need them) can include these routes in a nicer way.
2021-11-10 17:25:31 -05:00
|
|
|
FileStore::S3Store
|
|
|
|
.any_instance
|
|
|
|
.stubs(:temporary_upload_path)
|
|
|
|
.returns(
|
|
|
|
"uploads/default/#{test_bucket_prefix}/temp/28fccf8259bbe75b873a2bd2564b778c/test.png",
|
|
|
|
)
|
DEV: Correctly tag heredocs (#16061)
This allows text editors to use correct syntax coloring for the heredoc sections.
Heredoc tag names we use:
languages: SQL, JS, RUBY, LUA, HTML, CSS, SCSS, SH, HBS, XML, YAML/YML, MF, ICS
other: MD, TEXT/TXT, RAW, EMAIL
2022-02-28 14:50:55 -05:00
|
|
|
create_multipart_result = <<~XML
|
FEATURE: Uppy direct S3 multipart uploads in composer (#14051)
This pull request introduces the endpoints required, and the JavaScript functionality in the `ComposerUppyUpload` mixin, for direct S3 multipart uploads. There are four new endpoints in the uploads controller:
* `create-multipart.json` - Creates the multipart upload in S3 along with an `ExternalUploadStub` record, storing information about the file in the same way as `generate-presigned-put.json` does for regular direct S3 uploads
* `batch-presign-multipart-parts.json` - Takes a list of part numbers and the unique identifier for an `ExternalUploadStub` record, and generates the presigned URLs for those parts if the multipart upload still exists and if the user has permission to access that upload
* `complete-multipart.json` - Completes the multipart upload in S3. Needs the full list of part numbers and their associated ETags which are returned when the part is uploaded to the presigned URL above. Only works if the user has permission to access the associated `ExternalUploadStub` record and the multipart upload still exists.
After we confirm the upload is complete in S3, we go through the regular `UploadCreator` flow, the same as `complete-external-upload.json`, and promote the temporary upload S3 into a full `Upload` record, moving it to its final destination.
* `abort-multipart.json` - Aborts the multipart upload on S3 and destroys the `ExternalUploadStub` record if the user has permission to access that upload.
Also added are a few new columns to `ExternalUploadStub`:
* multipart - Whether or not this is a multipart upload
* external_upload_identifier - The "upload ID" for an S3 multipart upload
* filesize - The size of the file when the `create-multipart.json` or `generate-presigned-put.json` is called. This is used for validation.
When the user completes a direct S3 upload, either regular or multipart, we take the `filesize` that was captured when the `ExternalUploadStub` was first created and compare it with the final `Content-Length` size of the file where it is stored in S3. Then, if the two do not match, we throw an error, delete the file on S3, and ban the user from uploading files for N (default 5) minutes. This would only happen if the user uploads a different file than what they first specified, or in the case of multipart uploads uploaded larger chunks than needed. This is done to prevent abuse of S3 storage by bad actors.
Also included in this PR is an update to vendor/uppy.js. This has been built locally from the latest uppy source at https://github.com/transloadit/uppy/commit/d613b849a6591083f8a0968aa8d66537e231bbcd. This must be done so that I can get my multipart upload changes into Discourse. When the Uppy team cuts a proper release, we can bump the package.json versions instead.
2021-08-24 18:46:54 -04:00
|
|
|
<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n
|
|
|
|
<InitiateMultipartUploadResult>
|
|
|
|
<Bucket>s3-upload-bucket</Bucket>
|
FEATURE: Direct S3 multipart uploads for backups (#14736)
This PR introduces a new `enable_experimental_backup_uploads` site setting (default false and hidden), which when enabled alongside `enable_direct_s3_uploads` will allow for direct S3 multipart uploads of backup .tar.gz files.
To make multipart external uploads work with both the S3BackupStore and the S3Store, I've had to move several methods out of S3Store and into S3Helper, including:
* presigned_url
* create_multipart
* abort_multipart
* complete_multipart
* presign_multipart_part
* list_multipart_parts
Then, S3Store and S3BackupStore either delegate directly to S3Helper or have their own special methods to call S3Helper for these methods. FileStore.temporary_upload_path has also removed its dependence on upload_path, and can now be used interchangeably between the stores. A similar change was made in the frontend as well, moving the multipart related JS code out of ComposerUppyUpload and into a mixin of its own, so it can also be used by UppyUploadMixin.
Some changes to ExternalUploadManager had to be made here as well. The backup direct uploads do not need an Upload record made for them in the database, so they can be moved to their final S3 resting place when completing the multipart upload.
This changeset is not perfect; it introduces some special cases in UploadController to handle backups that was previously in BackupController, because UploadController is where the multipart routes are located. A subsequent pull request will pull these routes into a module or some other sharing pattern, along with hooks, so the backup controller and the upload controller (and any future controllers that may need them) can include these routes in a nicer way.
2021-11-10 17:25:31 -05:00
|
|
|
<Key>uploads/default/#{test_bucket_prefix}/temp/28fccf8259bbe75b873a2bd2564b778c/test.png</Key>
|
FEATURE: Uppy direct S3 multipart uploads in composer (#14051)
This pull request introduces the endpoints required, and the JavaScript functionality in the `ComposerUppyUpload` mixin, for direct S3 multipart uploads. There are four new endpoints in the uploads controller:
* `create-multipart.json` - Creates the multipart upload in S3 along with an `ExternalUploadStub` record, storing information about the file in the same way as `generate-presigned-put.json` does for regular direct S3 uploads
* `batch-presign-multipart-parts.json` - Takes a list of part numbers and the unique identifier for an `ExternalUploadStub` record, and generates the presigned URLs for those parts if the multipart upload still exists and if the user has permission to access that upload
* `complete-multipart.json` - Completes the multipart upload in S3. Needs the full list of part numbers and their associated ETags which are returned when the part is uploaded to the presigned URL above. Only works if the user has permission to access the associated `ExternalUploadStub` record and the multipart upload still exists.
After we confirm the upload is complete in S3, we go through the regular `UploadCreator` flow, the same as `complete-external-upload.json`, and promote the temporary upload S3 into a full `Upload` record, moving it to its final destination.
* `abort-multipart.json` - Aborts the multipart upload on S3 and destroys the `ExternalUploadStub` record if the user has permission to access that upload.
Also added are a few new columns to `ExternalUploadStub`:
* multipart - Whether or not this is a multipart upload
* external_upload_identifier - The "upload ID" for an S3 multipart upload
* filesize - The size of the file when the `create-multipart.json` or `generate-presigned-put.json` is called. This is used for validation.
When the user completes a direct S3 upload, either regular or multipart, we take the `filesize` that was captured when the `ExternalUploadStub` was first created and compare it with the final `Content-Length` size of the file where it is stored in S3. Then, if the two do not match, we throw an error, delete the file on S3, and ban the user from uploading files for N (default 5) minutes. This would only happen if the user uploads a different file than what they first specified, or in the case of multipart uploads uploaded larger chunks than needed. This is done to prevent abuse of S3 storage by bad actors.
Also included in this PR is an update to vendor/uppy.js. This has been built locally from the latest uppy source at https://github.com/transloadit/uppy/commit/d613b849a6591083f8a0968aa8d66537e231bbcd. This must be done so that I can get my multipart upload changes into Discourse. When the Uppy team cuts a proper release, we can bump the package.json versions instead.
2021-08-24 18:46:54 -04:00
|
|
|
<UploadId>#{mock_multipart_upload_id}</UploadId>
|
|
|
|
</InitiateMultipartUploadResult>
|
DEV: Correctly tag heredocs (#16061)
This allows text editors to use correct syntax coloring for the heredoc sections.
Heredoc tag names we use:
languages: SQL, JS, RUBY, LUA, HTML, CSS, SCSS, SH, HBS, XML, YAML/YML, MF, ICS
other: MD, TEXT/TXT, RAW, EMAIL
2022-02-28 14:50:55 -05:00
|
|
|
XML
|
FEATURE: Uppy direct S3 multipart uploads in composer (#14051)
This pull request introduces the endpoints required, and the JavaScript functionality in the `ComposerUppyUpload` mixin, for direct S3 multipart uploads. There are four new endpoints in the uploads controller:
* `create-multipart.json` - Creates the multipart upload in S3 along with an `ExternalUploadStub` record, storing information about the file in the same way as `generate-presigned-put.json` does for regular direct S3 uploads
* `batch-presign-multipart-parts.json` - Takes a list of part numbers and the unique identifier for an `ExternalUploadStub` record, and generates the presigned URLs for those parts if the multipart upload still exists and if the user has permission to access that upload
* `complete-multipart.json` - Completes the multipart upload in S3. Needs the full list of part numbers and their associated ETags which are returned when the part is uploaded to the presigned URL above. Only works if the user has permission to access the associated `ExternalUploadStub` record and the multipart upload still exists.
After we confirm the upload is complete in S3, we go through the regular `UploadCreator` flow, the same as `complete-external-upload.json`, and promote the temporary upload S3 into a full `Upload` record, moving it to its final destination.
* `abort-multipart.json` - Aborts the multipart upload on S3 and destroys the `ExternalUploadStub` record if the user has permission to access that upload.
Also added are a few new columns to `ExternalUploadStub`:
* multipart - Whether or not this is a multipart upload
* external_upload_identifier - The "upload ID" for an S3 multipart upload
* filesize - The size of the file when the `create-multipart.json` or `generate-presigned-put.json` is called. This is used for validation.
When the user completes a direct S3 upload, either regular or multipart, we take the `filesize` that was captured when the `ExternalUploadStub` was first created and compare it with the final `Content-Length` size of the file where it is stored in S3. Then, if the two do not match, we throw an error, delete the file on S3, and ban the user from uploading files for N (default 5) minutes. This would only happen if the user uploads a different file than what they first specified, or in the case of multipart uploads uploaded larger chunks than needed. This is done to prevent abuse of S3 storage by bad actors.
Also included in this PR is an update to vendor/uppy.js. This has been built locally from the latest uppy source at https://github.com/transloadit/uppy/commit/d613b849a6591083f8a0968aa8d66537e231bbcd. This must be done so that I can get my multipart upload changes into Discourse. When the Uppy team cuts a proper release, we can bump the package.json versions instead.
2021-08-24 18:46:54 -04:00
|
|
|
stub_request(
|
|
|
|
:post,
|
FEATURE: Direct S3 multipart uploads for backups (#14736)
This PR introduces a new `enable_experimental_backup_uploads` site setting (default false and hidden), which when enabled alongside `enable_direct_s3_uploads` will allow for direct S3 multipart uploads of backup .tar.gz files.
To make multipart external uploads work with both the S3BackupStore and the S3Store, I've had to move several methods out of S3Store and into S3Helper, including:
* presigned_url
* create_multipart
* abort_multipart
* complete_multipart
* presign_multipart_part
* list_multipart_parts
Then, S3Store and S3BackupStore either delegate directly to S3Helper or have their own special methods to call S3Helper for these methods. FileStore.temporary_upload_path has also removed its dependence on upload_path, and can now be used interchangeably between the stores. A similar change was made in the frontend as well, moving the multipart related JS code out of ComposerUppyUpload and into a mixin of its own, so it can also be used by UppyUploadMixin.
Some changes to ExternalUploadManager had to be made here as well. The backup direct uploads do not need an Upload record made for them in the database, so they can be moved to their final S3 resting place when completing the multipart upload.
This changeset is not perfect; it introduces some special cases in UploadController to handle backups that was previously in BackupController, because UploadController is where the multipart routes are located. A subsequent pull request will pull these routes into a module or some other sharing pattern, along with hooks, so the backup controller and the upload controller (and any future controllers that may need them) can include these routes in a nicer way.
2021-11-10 17:25:31 -05:00
|
|
|
"https://s3-upload-bucket.s3.us-west-1.amazonaws.com/uploads/default/#{test_bucket_prefix}/temp/28fccf8259bbe75b873a2bd2564b778c/test.png?uploads",
|
FEATURE: Uppy direct S3 multipart uploads in composer (#14051)
This pull request introduces the endpoints required, and the JavaScript functionality in the `ComposerUppyUpload` mixin, for direct S3 multipart uploads. There are four new endpoints in the uploads controller:
* `create-multipart.json` - Creates the multipart upload in S3 along with an `ExternalUploadStub` record, storing information about the file in the same way as `generate-presigned-put.json` does for regular direct S3 uploads
* `batch-presign-multipart-parts.json` - Takes a list of part numbers and the unique identifier for an `ExternalUploadStub` record, and generates the presigned URLs for those parts if the multipart upload still exists and if the user has permission to access that upload
* `complete-multipart.json` - Completes the multipart upload in S3. Needs the full list of part numbers and their associated ETags which are returned when the part is uploaded to the presigned URL above. Only works if the user has permission to access the associated `ExternalUploadStub` record and the multipart upload still exists.
After we confirm the upload is complete in S3, we go through the regular `UploadCreator` flow, the same as `complete-external-upload.json`, and promote the temporary upload S3 into a full `Upload` record, moving it to its final destination.
* `abort-multipart.json` - Aborts the multipart upload on S3 and destroys the `ExternalUploadStub` record if the user has permission to access that upload.
Also added are a few new columns to `ExternalUploadStub`:
* multipart - Whether or not this is a multipart upload
* external_upload_identifier - The "upload ID" for an S3 multipart upload
* filesize - The size of the file when the `create-multipart.json` or `generate-presigned-put.json` is called. This is used for validation.
When the user completes a direct S3 upload, either regular or multipart, we take the `filesize` that was captured when the `ExternalUploadStub` was first created and compare it with the final `Content-Length` size of the file where it is stored in S3. Then, if the two do not match, we throw an error, delete the file on S3, and ban the user from uploading files for N (default 5) minutes. This would only happen if the user uploads a different file than what they first specified, or in the case of multipart uploads uploaded larger chunks than needed. This is done to prevent abuse of S3 storage by bad actors.
Also included in this PR is an update to vendor/uppy.js. This has been built locally from the latest uppy source at https://github.com/transloadit/uppy/commit/d613b849a6591083f8a0968aa8d66537e231bbcd. This must be done so that I can get my multipart upload changes into Discourse. When the Uppy team cuts a proper release, we can bump the package.json versions instead.
2021-08-24 18:46:54 -04:00
|
|
|
).to_return({ status: 200, body: create_multipart_result })
|
|
|
|
end
|
|
|
|
|
|
|
|
it "creates a multipart upload and creates an external upload stub that is marked as multipart" do
|
|
|
|
stub_create_multipart_request
|
2021-09-27 08:45:05 -04:00
|
|
|
post "/uploads/create-multipart.json",
|
FEATURE: Uppy direct S3 multipart uploads in composer (#14051)
This pull request introduces the endpoints required, and the JavaScript functionality in the `ComposerUppyUpload` mixin, for direct S3 multipart uploads. There are four new endpoints in the uploads controller:
* `create-multipart.json` - Creates the multipart upload in S3 along with an `ExternalUploadStub` record, storing information about the file in the same way as `generate-presigned-put.json` does for regular direct S3 uploads
* `batch-presign-multipart-parts.json` - Takes a list of part numbers and the unique identifier for an `ExternalUploadStub` record, and generates the presigned URLs for those parts if the multipart upload still exists and if the user has permission to access that upload
* `complete-multipart.json` - Completes the multipart upload in S3. Needs the full list of part numbers and their associated ETags which are returned when the part is uploaded to the presigned URL above. Only works if the user has permission to access the associated `ExternalUploadStub` record and the multipart upload still exists.
After we confirm the upload is complete in S3, we go through the regular `UploadCreator` flow, the same as `complete-external-upload.json`, and promote the temporary upload S3 into a full `Upload` record, moving it to its final destination.
* `abort-multipart.json` - Aborts the multipart upload on S3 and destroys the `ExternalUploadStub` record if the user has permission to access that upload.
Also added are a few new columns to `ExternalUploadStub`:
* multipart - Whether or not this is a multipart upload
* external_upload_identifier - The "upload ID" for an S3 multipart upload
* filesize - The size of the file when the `create-multipart.json` or `generate-presigned-put.json` is called. This is used for validation.
When the user completes a direct S3 upload, either regular or multipart, we take the `filesize` that was captured when the `ExternalUploadStub` was first created and compare it with the final `Content-Length` size of the file where it is stored in S3. Then, if the two do not match, we throw an error, delete the file on S3, and ban the user from uploading files for N (default 5) minutes. This would only happen if the user uploads a different file than what they first specified, or in the case of multipart uploads uploaded larger chunks than needed. This is done to prevent abuse of S3 storage by bad actors.
Also included in this PR is an update to vendor/uppy.js. This has been built locally from the latest uppy source at https://github.com/transloadit/uppy/commit/d613b849a6591083f8a0968aa8d66537e231bbcd. This must be done so that I can get my multipart upload changes into Discourse. When the Uppy team cuts a proper release, we can bump the package.json versions instead.
2021-08-24 18:46:54 -04:00
|
|
|
**{ params: { file_name: "test.png", file_size: 1024, upload_type: "composer" } }
|
|
|
|
|
|
|
|
expect(response.status).to eq(200)
|
|
|
|
result = response.parsed_body
|
|
|
|
|
|
|
|
external_upload_stub =
|
|
|
|
ExternalUploadStub.where(
|
|
|
|
unique_identifier: result["unique_identifier"],
|
|
|
|
original_filename: "test.png",
|
|
|
|
created_by: user,
|
|
|
|
upload_type: "composer",
|
|
|
|
key: result["key"],
|
|
|
|
external_upload_identifier: mock_multipart_upload_id,
|
|
|
|
multipart: true,
|
|
|
|
filesize: 1024,
|
|
|
|
)
|
|
|
|
expect(external_upload_stub.exists?).to eq(true)
|
|
|
|
expect(result["key"]).to include(FileStore::S3Store::TEMPORARY_UPLOAD_PREFIX)
|
|
|
|
expect(result["external_upload_identifier"]).to eq(mock_multipart_upload_id)
|
|
|
|
expect(result["key"]).to eq(external_upload_stub.last.key)
|
|
|
|
end
|
|
|
|
|
2021-08-26 19:50:23 -04:00
|
|
|
it "includes accepted metadata when calling the store to create_multipart, but only allowed keys" do
|
|
|
|
stub_create_multipart_request
|
|
|
|
FileStore::S3Store
|
|
|
|
.any_instance
|
|
|
|
.expects(:create_multipart)
|
|
|
|
.with("test.png", "image/png", metadata: { "sha1-checksum" => "testing" })
|
|
|
|
.returns({ key: "test" })
|
2023-01-09 06:18:21 -05:00
|
|
|
|
2021-09-27 08:45:05 -04:00
|
|
|
post "/uploads/create-multipart.json",
|
|
|
|
**{
|
2021-08-26 19:50:23 -04:00
|
|
|
params: {
|
|
|
|
file_name: "test.png",
|
|
|
|
file_size: 1024,
|
|
|
|
upload_type: "composer",
|
|
|
|
metadata: {
|
|
|
|
"sha1-checksum" => "testing",
|
|
|
|
"blah" => "wontbeincluded",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
expect(response.status).to eq(200)
|
|
|
|
end
|
|
|
|
|
2023-06-15 22:44:35 -04:00
|
|
|
describe "rate limiting" do
|
|
|
|
before { RateLimiter.enable }
|
FEATURE: Uppy direct S3 multipart uploads in composer (#14051)
This pull request introduces the endpoints required, and the JavaScript functionality in the `ComposerUppyUpload` mixin, for direct S3 multipart uploads. There are four new endpoints in the uploads controller:
* `create-multipart.json` - Creates the multipart upload in S3 along with an `ExternalUploadStub` record, storing information about the file in the same way as `generate-presigned-put.json` does for regular direct S3 uploads
* `batch-presign-multipart-parts.json` - Takes a list of part numbers and the unique identifier for an `ExternalUploadStub` record, and generates the presigned URLs for those parts if the multipart upload still exists and if the user has permission to access that upload
* `complete-multipart.json` - Completes the multipart upload in S3. Needs the full list of part numbers and their associated ETags which are returned when the part is uploaded to the presigned URL above. Only works if the user has permission to access the associated `ExternalUploadStub` record and the multipart upload still exists.
After we confirm the upload is complete in S3, we go through the regular `UploadCreator` flow, the same as `complete-external-upload.json`, and promote the temporary upload S3 into a full `Upload` record, moving it to its final destination.
* `abort-multipart.json` - Aborts the multipart upload on S3 and destroys the `ExternalUploadStub` record if the user has permission to access that upload.
Also added are a few new columns to `ExternalUploadStub`:
* multipart - Whether or not this is a multipart upload
* external_upload_identifier - The "upload ID" for an S3 multipart upload
* filesize - The size of the file when the `create-multipart.json` or `generate-presigned-put.json` is called. This is used for validation.
When the user completes a direct S3 upload, either regular or multipart, we take the `filesize` that was captured when the `ExternalUploadStub` was first created and compare it with the final `Content-Length` size of the file where it is stored in S3. Then, if the two do not match, we throw an error, delete the file on S3, and ban the user from uploading files for N (default 5) minutes. This would only happen if the user uploads a different file than what they first specified, or in the case of multipart uploads uploaded larger chunks than needed. This is done to prevent abuse of S3 storage by bad actors.
Also included in this PR is an update to vendor/uppy.js. This has been built locally from the latest uppy source at https://github.com/transloadit/uppy/commit/d613b849a6591083f8a0968aa8d66537e231bbcd. This must be done so that I can get my multipart upload changes into Discourse. When the Uppy team cuts a proper release, we can bump the package.json versions instead.
2021-08-24 18:46:54 -04:00
|
|
|
|
2023-06-15 22:44:35 -04:00
|
|
|
use_redis_snapshotting
|
FEATURE: Uppy direct S3 multipart uploads in composer (#14051)
This pull request introduces the endpoints required, and the JavaScript functionality in the `ComposerUppyUpload` mixin, for direct S3 multipart uploads. There are four new endpoints in the uploads controller:
* `create-multipart.json` - Creates the multipart upload in S3 along with an `ExternalUploadStub` record, storing information about the file in the same way as `generate-presigned-put.json` does for regular direct S3 uploads
* `batch-presign-multipart-parts.json` - Takes a list of part numbers and the unique identifier for an `ExternalUploadStub` record, and generates the presigned URLs for those parts if the multipart upload still exists and if the user has permission to access that upload
* `complete-multipart.json` - Completes the multipart upload in S3. Needs the full list of part numbers and their associated ETags which are returned when the part is uploaded to the presigned URL above. Only works if the user has permission to access the associated `ExternalUploadStub` record and the multipart upload still exists.
After we confirm the upload is complete in S3, we go through the regular `UploadCreator` flow, the same as `complete-external-upload.json`, and promote the temporary upload S3 into a full `Upload` record, moving it to its final destination.
* `abort-multipart.json` - Aborts the multipart upload on S3 and destroys the `ExternalUploadStub` record if the user has permission to access that upload.
Also added are a few new columns to `ExternalUploadStub`:
* multipart - Whether or not this is a multipart upload
* external_upload_identifier - The "upload ID" for an S3 multipart upload
* filesize - The size of the file when the `create-multipart.json` or `generate-presigned-put.json` is called. This is used for validation.
When the user completes a direct S3 upload, either regular or multipart, we take the `filesize` that was captured when the `ExternalUploadStub` was first created and compare it with the final `Content-Length` size of the file where it is stored in S3. Then, if the two do not match, we throw an error, delete the file on S3, and ban the user from uploading files for N (default 5) minutes. This would only happen if the user uploads a different file than what they first specified, or in the case of multipart uploads uploaded larger chunks than needed. This is done to prevent abuse of S3 storage by bad actors.
Also included in this PR is an update to vendor/uppy.js. This has been built locally from the latest uppy source at https://github.com/transloadit/uppy/commit/d613b849a6591083f8a0968aa8d66537e231bbcd. This must be done so that I can get my multipart upload changes into Discourse. When the Uppy team cuts a proper release, we can bump the package.json versions instead.
2021-08-24 18:46:54 -04:00
|
|
|
|
2023-06-15 22:44:35 -04:00
|
|
|
it "rate limits" do
|
|
|
|
SiteSetting.max_create_multipart_per_minute = 1
|
|
|
|
|
|
|
|
stub_create_multipart_request
|
|
|
|
post "/uploads/create-multipart.json",
|
|
|
|
params: {
|
|
|
|
file_name: "test.png",
|
|
|
|
upload_type: "composer",
|
|
|
|
file_size: 1024,
|
|
|
|
}
|
|
|
|
expect(response.status).to eq(200)
|
|
|
|
|
|
|
|
post "/uploads/create-multipart.json",
|
|
|
|
params: {
|
|
|
|
file_name: "test.png",
|
|
|
|
upload_type: "composer",
|
|
|
|
file_size: 1024,
|
|
|
|
}
|
|
|
|
expect(response.status).to eq(429)
|
|
|
|
end
|
FEATURE: Uppy direct S3 multipart uploads in composer (#14051)
This pull request introduces the endpoints required, and the JavaScript functionality in the `ComposerUppyUpload` mixin, for direct S3 multipart uploads. There are four new endpoints in the uploads controller:
* `create-multipart.json` - Creates the multipart upload in S3 along with an `ExternalUploadStub` record, storing information about the file in the same way as `generate-presigned-put.json` does for regular direct S3 uploads
* `batch-presign-multipart-parts.json` - Takes a list of part numbers and the unique identifier for an `ExternalUploadStub` record, and generates the presigned URLs for those parts if the multipart upload still exists and if the user has permission to access that upload
* `complete-multipart.json` - Completes the multipart upload in S3. Needs the full list of part numbers and their associated ETags which are returned when the part is uploaded to the presigned URL above. Only works if the user has permission to access the associated `ExternalUploadStub` record and the multipart upload still exists.
After we confirm the upload is complete in S3, we go through the regular `UploadCreator` flow, the same as `complete-external-upload.json`, and promote the temporary upload S3 into a full `Upload` record, moving it to its final destination.
* `abort-multipart.json` - Aborts the multipart upload on S3 and destroys the `ExternalUploadStub` record if the user has permission to access that upload.
Also added are a few new columns to `ExternalUploadStub`:
* multipart - Whether or not this is a multipart upload
* external_upload_identifier - The "upload ID" for an S3 multipart upload
* filesize - The size of the file when the `create-multipart.json` or `generate-presigned-put.json` is called. This is used for validation.
When the user completes a direct S3 upload, either regular or multipart, we take the `filesize` that was captured when the `ExternalUploadStub` was first created and compare it with the final `Content-Length` size of the file where it is stored in S3. Then, if the two do not match, we throw an error, delete the file on S3, and ban the user from uploading files for N (default 5) minutes. This would only happen if the user uploads a different file than what they first specified, or in the case of multipart uploads uploaded larger chunks than needed. This is done to prevent abuse of S3 storage by bad actors.
Also included in this PR is an update to vendor/uppy.js. This has been built locally from the latest uppy source at https://github.com/transloadit/uppy/commit/d613b849a6591083f8a0968aa8d66537e231bbcd. This must be done so that I can get my multipart upload changes into Discourse. When the Uppy team cuts a proper release, we can bump the package.json versions instead.
2021-08-24 18:46:54 -04:00
|
|
|
end
|
|
|
|
end
|
|
|
|
|
|
|
|
context "when the store is not external" do
|
|
|
|
before { sign_in(user) }
|
|
|
|
|
|
|
|
it "returns 404" do
|
|
|
|
post "/uploads/create-multipart.json",
|
|
|
|
params: {
|
|
|
|
file_name: "test.png",
|
|
|
|
upload_type: "composer",
|
|
|
|
file_size: 1024,
|
|
|
|
}
|
|
|
|
expect(response.status).to eq(404)
|
|
|
|
end
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
|
|
|
describe "#batch_presign_multipart_parts" do
|
|
|
|
fab!(:mock_multipart_upload_id) do
|
|
|
|
"ibZBv_75gd9r8lH_gqXatLdxMVpAlj6CFTR.OwyF3953YdwbcQnMA2BLGn8Lx12fQNICtMw5KyteFeHw.Sjng--"
|
2023-01-09 06:18:21 -05:00
|
|
|
end
|
FEATURE: Uppy direct S3 multipart uploads in composer (#14051)
This pull request introduces the endpoints required, and the JavaScript functionality in the `ComposerUppyUpload` mixin, for direct S3 multipart uploads. There are four new endpoints in the uploads controller:
* `create-multipart.json` - Creates the multipart upload in S3 along with an `ExternalUploadStub` record, storing information about the file in the same way as `generate-presigned-put.json` does for regular direct S3 uploads
* `batch-presign-multipart-parts.json` - Takes a list of part numbers and the unique identifier for an `ExternalUploadStub` record, and generates the presigned URLs for those parts if the multipart upload still exists and if the user has permission to access that upload
* `complete-multipart.json` - Completes the multipart upload in S3. Needs the full list of part numbers and their associated ETags which are returned when the part is uploaded to the presigned URL above. Only works if the user has permission to access the associated `ExternalUploadStub` record and the multipart upload still exists.
After we confirm the upload is complete in S3, we go through the regular `UploadCreator` flow, the same as `complete-external-upload.json`, and promote the temporary upload S3 into a full `Upload` record, moving it to its final destination.
* `abort-multipart.json` - Aborts the multipart upload on S3 and destroys the `ExternalUploadStub` record if the user has permission to access that upload.
Also added are a few new columns to `ExternalUploadStub`:
* multipart - Whether or not this is a multipart upload
* external_upload_identifier - The "upload ID" for an S3 multipart upload
* filesize - The size of the file when the `create-multipart.json` or `generate-presigned-put.json` is called. This is used for validation.
When the user completes a direct S3 upload, either regular or multipart, we take the `filesize` that was captured when the `ExternalUploadStub` was first created and compare it with the final `Content-Length` size of the file where it is stored in S3. Then, if the two do not match, we throw an error, delete the file on S3, and ban the user from uploading files for N (default 5) minutes. This would only happen if the user uploads a different file than what they first specified, or in the case of multipart uploads uploaded larger chunks than needed. This is done to prevent abuse of S3 storage by bad actors.
Also included in this PR is an update to vendor/uppy.js. This has been built locally from the latest uppy source at https://github.com/transloadit/uppy/commit/d613b849a6591083f8a0968aa8d66537e231bbcd. This must be done so that I can get my multipart upload changes into Discourse. When the Uppy team cuts a proper release, we can bump the package.json versions instead.
2021-08-24 18:46:54 -04:00
|
|
|
fab!(:external_upload_stub) do
|
|
|
|
Fabricate(
|
|
|
|
:image_external_upload_stub,
|
|
|
|
created_by: user,
|
|
|
|
multipart: true,
|
|
|
|
external_upload_identifier: mock_multipart_upload_id,
|
|
|
|
)
|
|
|
|
end
|
|
|
|
|
|
|
|
context "when the store is external" do
|
|
|
|
before do
|
|
|
|
sign_in(user)
|
|
|
|
SiteSetting.enable_direct_s3_uploads = true
|
|
|
|
setup_s3
|
|
|
|
end
|
|
|
|
|
|
|
|
def stub_list_multipart_request
|
DEV: Correctly tag heredocs (#16061)
This allows text editors to use correct syntax coloring for the heredoc sections.
Heredoc tag names we use:
languages: SQL, JS, RUBY, LUA, HTML, CSS, SCSS, SH, HBS, XML, YAML/YML, MF, ICS
other: MD, TEXT/TXT, RAW, EMAIL
2022-02-28 14:50:55 -05:00
|
|
|
list_multipart_result = <<~XML
|
FEATURE: Uppy direct S3 multipart uploads in composer (#14051)
This pull request introduces the endpoints required, and the JavaScript functionality in the `ComposerUppyUpload` mixin, for direct S3 multipart uploads. There are four new endpoints in the uploads controller:
* `create-multipart.json` - Creates the multipart upload in S3 along with an `ExternalUploadStub` record, storing information about the file in the same way as `generate-presigned-put.json` does for regular direct S3 uploads
* `batch-presign-multipart-parts.json` - Takes a list of part numbers and the unique identifier for an `ExternalUploadStub` record, and generates the presigned URLs for those parts if the multipart upload still exists and if the user has permission to access that upload
* `complete-multipart.json` - Completes the multipart upload in S3. Needs the full list of part numbers and their associated ETags which are returned when the part is uploaded to the presigned URL above. Only works if the user has permission to access the associated `ExternalUploadStub` record and the multipart upload still exists.
After we confirm the upload is complete in S3, we go through the regular `UploadCreator` flow, the same as `complete-external-upload.json`, and promote the temporary upload S3 into a full `Upload` record, moving it to its final destination.
* `abort-multipart.json` - Aborts the multipart upload on S3 and destroys the `ExternalUploadStub` record if the user has permission to access that upload.
Also added are a few new columns to `ExternalUploadStub`:
* multipart - Whether or not this is a multipart upload
* external_upload_identifier - The "upload ID" for an S3 multipart upload
* filesize - The size of the file when the `create-multipart.json` or `generate-presigned-put.json` is called. This is used for validation.
When the user completes a direct S3 upload, either regular or multipart, we take the `filesize` that was captured when the `ExternalUploadStub` was first created and compare it with the final `Content-Length` size of the file where it is stored in S3. Then, if the two do not match, we throw an error, delete the file on S3, and ban the user from uploading files for N (default 5) minutes. This would only happen if the user uploads a different file than what they first specified, or in the case of multipart uploads uploaded larger chunks than needed. This is done to prevent abuse of S3 storage by bad actors.
Also included in this PR is an update to vendor/uppy.js. This has been built locally from the latest uppy source at https://github.com/transloadit/uppy/commit/d613b849a6591083f8a0968aa8d66537e231bbcd. This must be done so that I can get my multipart upload changes into Discourse. When the Uppy team cuts a proper release, we can bump the package.json versions instead.
2021-08-24 18:46:54 -04:00
|
|
|
<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n
|
|
|
|
<ListPartsResult>
|
|
|
|
<Bucket>s3-upload-bucket</Bucket>
|
|
|
|
<Key>#{external_upload_stub.key}</Key>
|
|
|
|
<UploadId>#{mock_multipart_upload_id}</UploadId>
|
|
|
|
<PartNumberMarker>0</PartNumberMarker>
|
|
|
|
<NextPartNumberMarker>0</NextPartNumberMarker>
|
|
|
|
<MaxParts>1</MaxParts>
|
|
|
|
<IsTruncated>false</IsTruncated>
|
|
|
|
<Part>
|
|
|
|
<ETag>test</ETag>
|
|
|
|
<LastModified>#{Time.zone.now}</LastModified>
|
|
|
|
<PartNumber>1</PartNumber>
|
|
|
|
<Size>#{5.megabytes}</Size>
|
|
|
|
</Part>
|
|
|
|
<Initiator>
|
|
|
|
<DisplayName>test-upload-user</DisplayName>
|
|
|
|
<ID>arn:aws:iam::123:user/test-upload-user</ID>
|
|
|
|
</Initiator>
|
|
|
|
<Owner>
|
|
|
|
<DisplayName></DisplayName>
|
|
|
|
<ID>12345</ID>
|
|
|
|
</Owner>
|
|
|
|
<StorageClass>STANDARD</StorageClass>
|
|
|
|
</ListPartsResult>
|
DEV: Correctly tag heredocs (#16061)
This allows text editors to use correct syntax coloring for the heredoc sections.
Heredoc tag names we use:
languages: SQL, JS, RUBY, LUA, HTML, CSS, SCSS, SH, HBS, XML, YAML/YML, MF, ICS
other: MD, TEXT/TXT, RAW, EMAIL
2022-02-28 14:50:55 -05:00
|
|
|
XML
|
2021-11-09 17:01:28 -05:00
|
|
|
stub_request(
|
|
|
|
:get,
|
DEV: Introduce S3 transfer acceleration for uploads behind hidden setting (#24238)
This commit adds an `enable_s3_transfer_acceleration` site setting,
which is hidden to begin with. We are adding this because in certain
regions, using https://aws.amazon.com/s3/transfer-acceleration/ can
drastically speed up uploads, sometimes as much as 70% in certain
regions depending on the target bucket region. This is important for
us because we have direct S3 multipart uploads enabled everywhere
on our hosting.
To start, we only want this on the uploads bucket, not the backup one.
Also, this will accelerate both uploads **and** downloads, depending
on whether a presigned URL is used for downloading. This is the case
when secure uploads is enabled, not anywhere else at this time. To
enable the S3 acceleration on downloads more generally would be a
more in-depth change, since we currently store S3 Upload record URLs
like this:
```
url: "//test.s3.dualstack.us-east-2.amazonaws.com/original/2X/6/123456.png"
```
For acceleration, `s3.dualstack` would need to be changed to `s3-accelerate.dualstack`
here.
Note that for this to have any effect, Transfer Acceleration must be enabled
on the S3 bucket used for uploads per https://docs.aws.amazon.com/AmazonS3/latest/userguide/transfer-acceleration-examples.html.
2023-11-06 20:50:40 -05:00
|
|
|
"https://s3-upload-bucket.#{SiteSetting.enable_s3_transfer_acceleration ? "s3-accelerate" : "s3.us-west-1"}.amazonaws.com/#{external_upload_stub.key}?max-parts=1&uploadId=#{mock_multipart_upload_id}",
|
2021-11-09 17:01:28 -05:00
|
|
|
).to_return({ status: 200, body: list_multipart_result })
|
FEATURE: Uppy direct S3 multipart uploads in composer (#14051)
This pull request introduces the endpoints required, and the JavaScript functionality in the `ComposerUppyUpload` mixin, for direct S3 multipart uploads. There are four new endpoints in the uploads controller:
* `create-multipart.json` - Creates the multipart upload in S3 along with an `ExternalUploadStub` record, storing information about the file in the same way as `generate-presigned-put.json` does for regular direct S3 uploads
* `batch-presign-multipart-parts.json` - Takes a list of part numbers and the unique identifier for an `ExternalUploadStub` record, and generates the presigned URLs for those parts if the multipart upload still exists and if the user has permission to access that upload
* `complete-multipart.json` - Completes the multipart upload in S3. Needs the full list of part numbers and their associated ETags which are returned when the part is uploaded to the presigned URL above. Only works if the user has permission to access the associated `ExternalUploadStub` record and the multipart upload still exists.
After we confirm the upload is complete in S3, we go through the regular `UploadCreator` flow, the same as `complete-external-upload.json`, and promote the temporary upload S3 into a full `Upload` record, moving it to its final destination.
* `abort-multipart.json` - Aborts the multipart upload on S3 and destroys the `ExternalUploadStub` record if the user has permission to access that upload.
Also added are a few new columns to `ExternalUploadStub`:
* multipart - Whether or not this is a multipart upload
* external_upload_identifier - The "upload ID" for an S3 multipart upload
* filesize - The size of the file when the `create-multipart.json` or `generate-presigned-put.json` is called. This is used for validation.
When the user completes a direct S3 upload, either regular or multipart, we take the `filesize` that was captured when the `ExternalUploadStub` was first created and compare it with the final `Content-Length` size of the file where it is stored in S3. Then, if the two do not match, we throw an error, delete the file on S3, and ban the user from uploading files for N (default 5) minutes. This would only happen if the user uploads a different file than what they first specified, or in the case of multipart uploads uploaded larger chunks than needed. This is done to prevent abuse of S3 storage by bad actors.
Also included in this PR is an update to vendor/uppy.js. This has been built locally from the latest uppy source at https://github.com/transloadit/uppy/commit/d613b849a6591083f8a0968aa8d66537e231bbcd. This must be done so that I can get my multipart upload changes into Discourse. When the Uppy team cuts a proper release, we can bump the package.json versions instead.
2021-08-24 18:46:54 -04:00
|
|
|
end
|
|
|
|
|
|
|
|
it "errors if the correct params are not provided" do
|
|
|
|
post "/uploads/batch-presign-multipart-parts.json", params: {}
|
|
|
|
expect(response.status).to eq(400)
|
|
|
|
end
|
|
|
|
|
|
|
|
it "errors if the part_numbers do not contain numbers between 1 and 10000" do
|
|
|
|
post "/uploads/batch-presign-multipart-parts.json",
|
|
|
|
params: {
|
|
|
|
unique_identifier: external_upload_stub.unique_identifier,
|
|
|
|
part_numbers: [-1, 0, 1, 2, 3, 4],
|
|
|
|
}
|
|
|
|
expect(response.status).to eq(400)
|
|
|
|
expect(response.body).to include(
|
|
|
|
"You supplied invalid parameters to the request: Each part number should be between 1 and 10000",
|
|
|
|
)
|
|
|
|
post "/uploads/batch-presign-multipart-parts.json",
|
|
|
|
params: {
|
|
|
|
unique_identifier: external_upload_stub.unique_identifier,
|
|
|
|
part_numbers: [3, 4, "blah"],
|
|
|
|
}
|
|
|
|
expect(response.status).to eq(400)
|
|
|
|
expect(response.body).to include(
|
|
|
|
"You supplied invalid parameters to the request: Each part number should be between 1 and 10000",
|
|
|
|
)
|
|
|
|
end
|
|
|
|
|
|
|
|
it "returns 404 when the upload stub does not exist" do
|
|
|
|
post "/uploads/batch-presign-multipart-parts.json",
|
|
|
|
params: {
|
|
|
|
unique_identifier: "unknown",
|
|
|
|
part_numbers: [1, 2, 3],
|
|
|
|
}
|
|
|
|
expect(response.status).to eq(404)
|
|
|
|
end
|
|
|
|
|
|
|
|
it "returns 404 when the upload stub does not belong to the user" do
|
|
|
|
external_upload_stub.update!(created_by: Fabricate(:user))
|
|
|
|
post "/uploads/batch-presign-multipart-parts.json",
|
|
|
|
params: {
|
|
|
|
unique_identifier: external_upload_stub.unique_identifier,
|
|
|
|
part_numbers: [1, 2, 3],
|
|
|
|
}
|
|
|
|
expect(response.status).to eq(404)
|
|
|
|
end
|
|
|
|
|
|
|
|
it "returns 404 when the multipart upload does not exist" do
|
|
|
|
FileStore::S3Store
|
|
|
|
.any_instance
|
|
|
|
.stubs(:list_multipart_parts)
|
|
|
|
.raises(Aws::S3::Errors::NoSuchUpload.new("test", "test"))
|
|
|
|
post "/uploads/batch-presign-multipart-parts.json",
|
|
|
|
params: {
|
|
|
|
unique_identifier: external_upload_stub.unique_identifier,
|
|
|
|
part_numbers: [1, 2, 3],
|
|
|
|
}
|
|
|
|
expect(response.status).to eq(404)
|
|
|
|
end
|
|
|
|
|
|
|
|
it "returns an object with the presigned URLs with the part numbers as keys" do
|
|
|
|
stub_list_multipart_request
|
|
|
|
post "/uploads/batch-presign-multipart-parts.json",
|
|
|
|
params: {
|
|
|
|
unique_identifier: external_upload_stub.unique_identifier,
|
|
|
|
part_numbers: [2, 3, 4],
|
|
|
|
}
|
|
|
|
|
|
|
|
expect(response.status).to eq(200)
|
|
|
|
result = response.parsed_body
|
|
|
|
expect(result["presigned_urls"].keys).to eq(%w[2 3 4])
|
|
|
|
expect(result["presigned_urls"]["2"]).to include(
|
|
|
|
"?partNumber=2&uploadId=#{mock_multipart_upload_id}",
|
|
|
|
)
|
|
|
|
expect(result["presigned_urls"]["3"]).to include(
|
|
|
|
"?partNumber=3&uploadId=#{mock_multipart_upload_id}",
|
|
|
|
)
|
|
|
|
expect(result["presigned_urls"]["4"]).to include(
|
|
|
|
"?partNumber=4&uploadId=#{mock_multipart_upload_id}",
|
|
|
|
)
|
|
|
|
end
|
|
|
|
|
DEV: Introduce S3 transfer acceleration for uploads behind hidden setting (#24238)
This commit adds an `enable_s3_transfer_acceleration` site setting,
which is hidden to begin with. We are adding this because in certain
regions, using https://aws.amazon.com/s3/transfer-acceleration/ can
drastically speed up uploads, sometimes as much as 70% in certain
regions depending on the target bucket region. This is important for
us because we have direct S3 multipart uploads enabled everywhere
on our hosting.
To start, we only want this on the uploads bucket, not the backup one.
Also, this will accelerate both uploads **and** downloads, depending
on whether a presigned URL is used for downloading. This is the case
when secure uploads is enabled, not anywhere else at this time. To
enable the S3 acceleration on downloads more generally would be a
more in-depth change, since we currently store S3 Upload record URLs
like this:
```
url: "//test.s3.dualstack.us-east-2.amazonaws.com/original/2X/6/123456.png"
```
For acceleration, `s3.dualstack` would need to be changed to `s3-accelerate.dualstack`
here.
Note that for this to have any effect, Transfer Acceleration must be enabled
on the S3 bucket used for uploads per https://docs.aws.amazon.com/AmazonS3/latest/userguide/transfer-acceleration-examples.html.
2023-11-06 20:50:40 -05:00
|
|
|
context "when enable_s3_transfer_acceleration is true" do
|
|
|
|
before { SiteSetting.enable_s3_transfer_acceleration = true }
|
|
|
|
|
|
|
|
it "uses the s3-accelerate endpoint for presigned URLs" do
|
|
|
|
stub_list_multipart_request
|
|
|
|
post "/uploads/batch-presign-multipart-parts.json",
|
|
|
|
params: {
|
|
|
|
unique_identifier: external_upload_stub.unique_identifier,
|
|
|
|
part_numbers: [2, 3, 4],
|
|
|
|
}
|
|
|
|
|
|
|
|
expect(response.status).to eq(200)
|
|
|
|
result = response.parsed_body
|
|
|
|
expect(result["presigned_urls"].keys).to eq(%w[2 3 4])
|
|
|
|
expect(result["presigned_urls"]["2"]).to include("s3-accelerate")
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
2023-06-15 22:44:35 -04:00
|
|
|
describe "rate limiting" do
|
|
|
|
before { RateLimiter.enable }
|
FEATURE: Uppy direct S3 multipart uploads in composer (#14051)
This pull request introduces the endpoints required, and the JavaScript functionality in the `ComposerUppyUpload` mixin, for direct S3 multipart uploads. There are four new endpoints in the uploads controller:
* `create-multipart.json` - Creates the multipart upload in S3 along with an `ExternalUploadStub` record, storing information about the file in the same way as `generate-presigned-put.json` does for regular direct S3 uploads
* `batch-presign-multipart-parts.json` - Takes a list of part numbers and the unique identifier for an `ExternalUploadStub` record, and generates the presigned URLs for those parts if the multipart upload still exists and if the user has permission to access that upload
* `complete-multipart.json` - Completes the multipart upload in S3. Needs the full list of part numbers and their associated ETags which are returned when the part is uploaded to the presigned URL above. Only works if the user has permission to access the associated `ExternalUploadStub` record and the multipart upload still exists.
After we confirm the upload is complete in S3, we go through the regular `UploadCreator` flow, the same as `complete-external-upload.json`, and promote the temporary upload S3 into a full `Upload` record, moving it to its final destination.
* `abort-multipart.json` - Aborts the multipart upload on S3 and destroys the `ExternalUploadStub` record if the user has permission to access that upload.
Also added are a few new columns to `ExternalUploadStub`:
* multipart - Whether or not this is a multipart upload
* external_upload_identifier - The "upload ID" for an S3 multipart upload
* filesize - The size of the file when the `create-multipart.json` or `generate-presigned-put.json` is called. This is used for validation.
When the user completes a direct S3 upload, either regular or multipart, we take the `filesize` that was captured when the `ExternalUploadStub` was first created and compare it with the final `Content-Length` size of the file where it is stored in S3. Then, if the two do not match, we throw an error, delete the file on S3, and ban the user from uploading files for N (default 5) minutes. This would only happen if the user uploads a different file than what they first specified, or in the case of multipart uploads uploaded larger chunks than needed. This is done to prevent abuse of S3 storage by bad actors.
Also included in this PR is an update to vendor/uppy.js. This has been built locally from the latest uppy source at https://github.com/transloadit/uppy/commit/d613b849a6591083f8a0968aa8d66537e231bbcd. This must be done so that I can get my multipart upload changes into Discourse. When the Uppy team cuts a proper release, we can bump the package.json versions instead.
2021-08-24 18:46:54 -04:00
|
|
|
|
2023-06-15 22:44:35 -04:00
|
|
|
use_redis_snapshotting
|
FEATURE: Uppy direct S3 multipart uploads in composer (#14051)
This pull request introduces the endpoints required, and the JavaScript functionality in the `ComposerUppyUpload` mixin, for direct S3 multipart uploads. There are four new endpoints in the uploads controller:
* `create-multipart.json` - Creates the multipart upload in S3 along with an `ExternalUploadStub` record, storing information about the file in the same way as `generate-presigned-put.json` does for regular direct S3 uploads
* `batch-presign-multipart-parts.json` - Takes a list of part numbers and the unique identifier for an `ExternalUploadStub` record, and generates the presigned URLs for those parts if the multipart upload still exists and if the user has permission to access that upload
* `complete-multipart.json` - Completes the multipart upload in S3. Needs the full list of part numbers and their associated ETags which are returned when the part is uploaded to the presigned URL above. Only works if the user has permission to access the associated `ExternalUploadStub` record and the multipart upload still exists.
After we confirm the upload is complete in S3, we go through the regular `UploadCreator` flow, the same as `complete-external-upload.json`, and promote the temporary upload S3 into a full `Upload` record, moving it to its final destination.
* `abort-multipart.json` - Aborts the multipart upload on S3 and destroys the `ExternalUploadStub` record if the user has permission to access that upload.
Also added are a few new columns to `ExternalUploadStub`:
* multipart - Whether or not this is a multipart upload
* external_upload_identifier - The "upload ID" for an S3 multipart upload
* filesize - The size of the file when the `create-multipart.json` or `generate-presigned-put.json` is called. This is used for validation.
When the user completes a direct S3 upload, either regular or multipart, we take the `filesize` that was captured when the `ExternalUploadStub` was first created and compare it with the final `Content-Length` size of the file where it is stored in S3. Then, if the two do not match, we throw an error, delete the file on S3, and ban the user from uploading files for N (default 5) minutes. This would only happen if the user uploads a different file than what they first specified, or in the case of multipart uploads uploaded larger chunks than needed. This is done to prevent abuse of S3 storage by bad actors.
Also included in this PR is an update to vendor/uppy.js. This has been built locally from the latest uppy source at https://github.com/transloadit/uppy/commit/d613b849a6591083f8a0968aa8d66537e231bbcd. This must be done so that I can get my multipart upload changes into Discourse. When the Uppy team cuts a proper release, we can bump the package.json versions instead.
2021-08-24 18:46:54 -04:00
|
|
|
|
2023-06-15 22:44:35 -04:00
|
|
|
it "rate limits" do
|
|
|
|
SiteSetting.max_batch_presign_multipart_per_minute = 1
|
FEATURE: Uppy direct S3 multipart uploads in composer (#14051)
This pull request introduces the endpoints required, and the JavaScript functionality in the `ComposerUppyUpload` mixin, for direct S3 multipart uploads. There are four new endpoints in the uploads controller:
* `create-multipart.json` - Creates the multipart upload in S3 along with an `ExternalUploadStub` record, storing information about the file in the same way as `generate-presigned-put.json` does for regular direct S3 uploads
* `batch-presign-multipart-parts.json` - Takes a list of part numbers and the unique identifier for an `ExternalUploadStub` record, and generates the presigned URLs for those parts if the multipart upload still exists and if the user has permission to access that upload
* `complete-multipart.json` - Completes the multipart upload in S3. Needs the full list of part numbers and their associated ETags which are returned when the part is uploaded to the presigned URL above. Only works if the user has permission to access the associated `ExternalUploadStub` record and the multipart upload still exists.
After we confirm the upload is complete in S3, we go through the regular `UploadCreator` flow, the same as `complete-external-upload.json`, and promote the temporary upload S3 into a full `Upload` record, moving it to its final destination.
* `abort-multipart.json` - Aborts the multipart upload on S3 and destroys the `ExternalUploadStub` record if the user has permission to access that upload.
Also added are a few new columns to `ExternalUploadStub`:
* multipart - Whether or not this is a multipart upload
* external_upload_identifier - The "upload ID" for an S3 multipart upload
* filesize - The size of the file when the `create-multipart.json` or `generate-presigned-put.json` is called. This is used for validation.
When the user completes a direct S3 upload, either regular or multipart, we take the `filesize` that was captured when the `ExternalUploadStub` was first created and compare it with the final `Content-Length` size of the file where it is stored in S3. Then, if the two do not match, we throw an error, delete the file on S3, and ban the user from uploading files for N (default 5) minutes. This would only happen if the user uploads a different file than what they first specified, or in the case of multipart uploads uploaded larger chunks than needed. This is done to prevent abuse of S3 storage by bad actors.
Also included in this PR is an update to vendor/uppy.js. This has been built locally from the latest uppy source at https://github.com/transloadit/uppy/commit/d613b849a6591083f8a0968aa8d66537e231bbcd. This must be done so that I can get my multipart upload changes into Discourse. When the Uppy team cuts a proper release, we can bump the package.json versions instead.
2021-08-24 18:46:54 -04:00
|
|
|
|
2023-06-15 22:44:35 -04:00
|
|
|
stub_list_multipart_request
|
|
|
|
post "/uploads/batch-presign-multipart-parts.json",
|
|
|
|
params: {
|
|
|
|
unique_identifier: external_upload_stub.unique_identifier,
|
|
|
|
part_numbers: [1, 2, 3],
|
|
|
|
}
|
|
|
|
|
|
|
|
expect(response.status).to eq(200)
|
|
|
|
|
|
|
|
post "/uploads/batch-presign-multipart-parts.json",
|
|
|
|
params: {
|
|
|
|
unique_identifier: external_upload_stub.unique_identifier,
|
|
|
|
part_numbers: [1, 2, 3],
|
|
|
|
}
|
FEATURE: Uppy direct S3 multipart uploads in composer (#14051)
This pull request introduces the endpoints required, and the JavaScript functionality in the `ComposerUppyUpload` mixin, for direct S3 multipart uploads. There are four new endpoints in the uploads controller:
* `create-multipart.json` - Creates the multipart upload in S3 along with an `ExternalUploadStub` record, storing information about the file in the same way as `generate-presigned-put.json` does for regular direct S3 uploads
* `batch-presign-multipart-parts.json` - Takes a list of part numbers and the unique identifier for an `ExternalUploadStub` record, and generates the presigned URLs for those parts if the multipart upload still exists and if the user has permission to access that upload
* `complete-multipart.json` - Completes the multipart upload in S3. Needs the full list of part numbers and their associated ETags which are returned when the part is uploaded to the presigned URL above. Only works if the user has permission to access the associated `ExternalUploadStub` record and the multipart upload still exists.
After we confirm the upload is complete in S3, we go through the regular `UploadCreator` flow, the same as `complete-external-upload.json`, and promote the temporary upload S3 into a full `Upload` record, moving it to its final destination.
* `abort-multipart.json` - Aborts the multipart upload on S3 and destroys the `ExternalUploadStub` record if the user has permission to access that upload.
Also added are a few new columns to `ExternalUploadStub`:
* multipart - Whether or not this is a multipart upload
* external_upload_identifier - The "upload ID" for an S3 multipart upload
* filesize - The size of the file when the `create-multipart.json` or `generate-presigned-put.json` is called. This is used for validation.
When the user completes a direct S3 upload, either regular or multipart, we take the `filesize` that was captured when the `ExternalUploadStub` was first created and compare it with the final `Content-Length` size of the file where it is stored in S3. Then, if the two do not match, we throw an error, delete the file on S3, and ban the user from uploading files for N (default 5) minutes. This would only happen if the user uploads a different file than what they first specified, or in the case of multipart uploads uploaded larger chunks than needed. This is done to prevent abuse of S3 storage by bad actors.
Also included in this PR is an update to vendor/uppy.js. This has been built locally from the latest uppy source at https://github.com/transloadit/uppy/commit/d613b849a6591083f8a0968aa8d66537e231bbcd. This must be done so that I can get my multipart upload changes into Discourse. When the Uppy team cuts a proper release, we can bump the package.json versions instead.
2021-08-24 18:46:54 -04:00
|
|
|
|
2023-06-15 22:44:35 -04:00
|
|
|
expect(response.status).to eq(429)
|
|
|
|
end
|
FEATURE: Uppy direct S3 multipart uploads in composer (#14051)
This pull request introduces the endpoints required, and the JavaScript functionality in the `ComposerUppyUpload` mixin, for direct S3 multipart uploads. There are four new endpoints in the uploads controller:
* `create-multipart.json` - Creates the multipart upload in S3 along with an `ExternalUploadStub` record, storing information about the file in the same way as `generate-presigned-put.json` does for regular direct S3 uploads
* `batch-presign-multipart-parts.json` - Takes a list of part numbers and the unique identifier for an `ExternalUploadStub` record, and generates the presigned URLs for those parts if the multipart upload still exists and if the user has permission to access that upload
* `complete-multipart.json` - Completes the multipart upload in S3. Needs the full list of part numbers and their associated ETags which are returned when the part is uploaded to the presigned URL above. Only works if the user has permission to access the associated `ExternalUploadStub` record and the multipart upload still exists.
After we confirm the upload is complete in S3, we go through the regular `UploadCreator` flow, the same as `complete-external-upload.json`, and promote the temporary upload S3 into a full `Upload` record, moving it to its final destination.
* `abort-multipart.json` - Aborts the multipart upload on S3 and destroys the `ExternalUploadStub` record if the user has permission to access that upload.
Also added are a few new columns to `ExternalUploadStub`:
* multipart - Whether or not this is a multipart upload
* external_upload_identifier - The "upload ID" for an S3 multipart upload
* filesize - The size of the file when the `create-multipart.json` or `generate-presigned-put.json` is called. This is used for validation.
When the user completes a direct S3 upload, either regular or multipart, we take the `filesize` that was captured when the `ExternalUploadStub` was first created and compare it with the final `Content-Length` size of the file where it is stored in S3. Then, if the two do not match, we throw an error, delete the file on S3, and ban the user from uploading files for N (default 5) minutes. This would only happen if the user uploads a different file than what they first specified, or in the case of multipart uploads uploaded larger chunks than needed. This is done to prevent abuse of S3 storage by bad actors.
Also included in this PR is an update to vendor/uppy.js. This has been built locally from the latest uppy source at https://github.com/transloadit/uppy/commit/d613b849a6591083f8a0968aa8d66537e231bbcd. This must be done so that I can get my multipart upload changes into Discourse. When the Uppy team cuts a proper release, we can bump the package.json versions instead.
2021-08-24 18:46:54 -04:00
|
|
|
end
|
|
|
|
end
|
|
|
|
|
|
|
|
context "when the store is not external" do
|
|
|
|
before { sign_in(user) }
|
|
|
|
|
|
|
|
it "returns 404" do
|
|
|
|
post "/uploads/batch-presign-multipart-parts.json",
|
|
|
|
params: {
|
|
|
|
unique_identifier: external_upload_stub.unique_identifier,
|
|
|
|
part_numbers: [1, 2, 3],
|
|
|
|
}
|
|
|
|
expect(response.status).to eq(404)
|
|
|
|
end
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
|
|
|
describe "#complete_multipart" do
|
|
|
|
let(:upload_base_url) do
|
DEV: Introduce S3 transfer acceleration for uploads behind hidden setting (#24238)
This commit adds an `enable_s3_transfer_acceleration` site setting,
which is hidden to begin with. We are adding this because in certain
regions, using https://aws.amazon.com/s3/transfer-acceleration/ can
drastically speed up uploads, sometimes as much as 70% in certain
regions depending on the target bucket region. This is important for
us because we have direct S3 multipart uploads enabled everywhere
on our hosting.
To start, we only want this on the uploads bucket, not the backup one.
Also, this will accelerate both uploads **and** downloads, depending
on whether a presigned URL is used for downloading. This is the case
when secure uploads is enabled, not anywhere else at this time. To
enable the S3 acceleration on downloads more generally would be a
more in-depth change, since we currently store S3 Upload record URLs
like this:
```
url: "//test.s3.dualstack.us-east-2.amazonaws.com/original/2X/6/123456.png"
```
For acceleration, `s3.dualstack` would need to be changed to `s3-accelerate.dualstack`
here.
Note that for this to have any effect, Transfer Acceleration must be enabled
on the S3 bucket used for uploads per https://docs.aws.amazon.com/AmazonS3/latest/userguide/transfer-acceleration-examples.html.
2023-11-06 20:50:40 -05:00
|
|
|
"https://#{SiteSetting.s3_upload_bucket}.#{SiteSetting.enable_s3_transfer_acceleration ? "s3-accelerate" : "s3.#{SiteSetting.s3_region}"}.amazonaws.com"
|
2023-01-09 06:18:21 -05:00
|
|
|
end
|
FEATURE: Uppy direct S3 multipart uploads in composer (#14051)
This pull request introduces the endpoints required, and the JavaScript functionality in the `ComposerUppyUpload` mixin, for direct S3 multipart uploads. There are four new endpoints in the uploads controller:
* `create-multipart.json` - Creates the multipart upload in S3 along with an `ExternalUploadStub` record, storing information about the file in the same way as `generate-presigned-put.json` does for regular direct S3 uploads
* `batch-presign-multipart-parts.json` - Takes a list of part numbers and the unique identifier for an `ExternalUploadStub` record, and generates the presigned URLs for those parts if the multipart upload still exists and if the user has permission to access that upload
* `complete-multipart.json` - Completes the multipart upload in S3. Needs the full list of part numbers and their associated ETags which are returned when the part is uploaded to the presigned URL above. Only works if the user has permission to access the associated `ExternalUploadStub` record and the multipart upload still exists.
After we confirm the upload is complete in S3, we go through the regular `UploadCreator` flow, the same as `complete-external-upload.json`, and promote the temporary upload S3 into a full `Upload` record, moving it to its final destination.
* `abort-multipart.json` - Aborts the multipart upload on S3 and destroys the `ExternalUploadStub` record if the user has permission to access that upload.
Also added are a few new columns to `ExternalUploadStub`:
* multipart - Whether or not this is a multipart upload
* external_upload_identifier - The "upload ID" for an S3 multipart upload
* filesize - The size of the file when the `create-multipart.json` or `generate-presigned-put.json` is called. This is used for validation.
When the user completes a direct S3 upload, either regular or multipart, we take the `filesize` that was captured when the `ExternalUploadStub` was first created and compare it with the final `Content-Length` size of the file where it is stored in S3. Then, if the two do not match, we throw an error, delete the file on S3, and ban the user from uploading files for N (default 5) minutes. This would only happen if the user uploads a different file than what they first specified, or in the case of multipart uploads uploaded larger chunks than needed. This is done to prevent abuse of S3 storage by bad actors.
Also included in this PR is an update to vendor/uppy.js. This has been built locally from the latest uppy source at https://github.com/transloadit/uppy/commit/d613b849a6591083f8a0968aa8d66537e231bbcd. This must be done so that I can get my multipart upload changes into Discourse. When the Uppy team cuts a proper release, we can bump the package.json versions instead.
2021-08-24 18:46:54 -04:00
|
|
|
let(:mock_multipart_upload_id) do
|
|
|
|
"ibZBv_75gd9r8lH_gqXatLdxMVpAlj6CFTR.OwyF3953YdwbcQnMA2BLGn8Lx12fQNICtMw5KyteFeHw.Sjng--"
|
2023-01-09 06:18:21 -05:00
|
|
|
end
|
FEATURE: Uppy direct S3 multipart uploads in composer (#14051)
This pull request introduces the endpoints required, and the JavaScript functionality in the `ComposerUppyUpload` mixin, for direct S3 multipart uploads. There are four new endpoints in the uploads controller:
* `create-multipart.json` - Creates the multipart upload in S3 along with an `ExternalUploadStub` record, storing information about the file in the same way as `generate-presigned-put.json` does for regular direct S3 uploads
* `batch-presign-multipart-parts.json` - Takes a list of part numbers and the unique identifier for an `ExternalUploadStub` record, and generates the presigned URLs for those parts if the multipart upload still exists and if the user has permission to access that upload
* `complete-multipart.json` - Completes the multipart upload in S3. Needs the full list of part numbers and their associated ETags which are returned when the part is uploaded to the presigned URL above. Only works if the user has permission to access the associated `ExternalUploadStub` record and the multipart upload still exists.
After we confirm the upload is complete in S3, we go through the regular `UploadCreator` flow, the same as `complete-external-upload.json`, and promote the temporary upload S3 into a full `Upload` record, moving it to its final destination.
* `abort-multipart.json` - Aborts the multipart upload on S3 and destroys the `ExternalUploadStub` record if the user has permission to access that upload.
Also added are a few new columns to `ExternalUploadStub`:
* multipart - Whether or not this is a multipart upload
* external_upload_identifier - The "upload ID" for an S3 multipart upload
* filesize - The size of the file when the `create-multipart.json` or `generate-presigned-put.json` is called. This is used for validation.
When the user completes a direct S3 upload, either regular or multipart, we take the `filesize` that was captured when the `ExternalUploadStub` was first created and compare it with the final `Content-Length` size of the file where it is stored in S3. Then, if the two do not match, we throw an error, delete the file on S3, and ban the user from uploading files for N (default 5) minutes. This would only happen if the user uploads a different file than what they first specified, or in the case of multipart uploads uploaded larger chunks than needed. This is done to prevent abuse of S3 storage by bad actors.
Also included in this PR is an update to vendor/uppy.js. This has been built locally from the latest uppy source at https://github.com/transloadit/uppy/commit/d613b849a6591083f8a0968aa8d66537e231bbcd. This must be done so that I can get my multipart upload changes into Discourse. When the Uppy team cuts a proper release, we can bump the package.json versions instead.
2021-08-24 18:46:54 -04:00
|
|
|
let!(:external_upload_stub) do
|
|
|
|
Fabricate(
|
|
|
|
:image_external_upload_stub,
|
|
|
|
created_by: user,
|
|
|
|
multipart: true,
|
|
|
|
external_upload_identifier: mock_multipart_upload_id,
|
|
|
|
)
|
|
|
|
end
|
|
|
|
|
|
|
|
context "when the store is external" do
|
|
|
|
before do
|
|
|
|
sign_in(user)
|
|
|
|
SiteSetting.enable_direct_s3_uploads = true
|
|
|
|
setup_s3
|
|
|
|
end
|
|
|
|
|
|
|
|
def stub_list_multipart_request
|
DEV: Correctly tag heredocs (#16061)
This allows text editors to use correct syntax coloring for the heredoc sections.
Heredoc tag names we use:
languages: SQL, JS, RUBY, LUA, HTML, CSS, SCSS, SH, HBS, XML, YAML/YML, MF, ICS
other: MD, TEXT/TXT, RAW, EMAIL
2022-02-28 14:50:55 -05:00
|
|
|
list_multipart_result = <<~XML
|
FEATURE: Uppy direct S3 multipart uploads in composer (#14051)
This pull request introduces the endpoints required, and the JavaScript functionality in the `ComposerUppyUpload` mixin, for direct S3 multipart uploads. There are four new endpoints in the uploads controller:
* `create-multipart.json` - Creates the multipart upload in S3 along with an `ExternalUploadStub` record, storing information about the file in the same way as `generate-presigned-put.json` does for regular direct S3 uploads
* `batch-presign-multipart-parts.json` - Takes a list of part numbers and the unique identifier for an `ExternalUploadStub` record, and generates the presigned URLs for those parts if the multipart upload still exists and if the user has permission to access that upload
* `complete-multipart.json` - Completes the multipart upload in S3. Needs the full list of part numbers and their associated ETags which are returned when the part is uploaded to the presigned URL above. Only works if the user has permission to access the associated `ExternalUploadStub` record and the multipart upload still exists.
After we confirm the upload is complete in S3, we go through the regular `UploadCreator` flow, the same as `complete-external-upload.json`, and promote the temporary upload S3 into a full `Upload` record, moving it to its final destination.
* `abort-multipart.json` - Aborts the multipart upload on S3 and destroys the `ExternalUploadStub` record if the user has permission to access that upload.
Also added are a few new columns to `ExternalUploadStub`:
* multipart - Whether or not this is a multipart upload
* external_upload_identifier - The "upload ID" for an S3 multipart upload
* filesize - The size of the file when the `create-multipart.json` or `generate-presigned-put.json` is called. This is used for validation.
When the user completes a direct S3 upload, either regular or multipart, we take the `filesize` that was captured when the `ExternalUploadStub` was first created and compare it with the final `Content-Length` size of the file where it is stored in S3. Then, if the two do not match, we throw an error, delete the file on S3, and ban the user from uploading files for N (default 5) minutes. This would only happen if the user uploads a different file than what they first specified, or in the case of multipart uploads uploaded larger chunks than needed. This is done to prevent abuse of S3 storage by bad actors.
Also included in this PR is an update to vendor/uppy.js. This has been built locally from the latest uppy source at https://github.com/transloadit/uppy/commit/d613b849a6591083f8a0968aa8d66537e231bbcd. This must be done so that I can get my multipart upload changes into Discourse. When the Uppy team cuts a proper release, we can bump the package.json versions instead.
2021-08-24 18:46:54 -04:00
|
|
|
<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n
|
|
|
|
<ListPartsResult>
|
|
|
|
<Bucket>s3-upload-bucket</Bucket>
|
|
|
|
<Key>#{external_upload_stub.key}</Key>
|
|
|
|
<UploadId>#{mock_multipart_upload_id}</UploadId>
|
|
|
|
<PartNumberMarker>0</PartNumberMarker>
|
|
|
|
<NextPartNumberMarker>0</NextPartNumberMarker>
|
|
|
|
<MaxParts>1</MaxParts>
|
|
|
|
<IsTruncated>false</IsTruncated>
|
|
|
|
<Part>
|
|
|
|
<ETag>test</ETag>
|
|
|
|
<LastModified>#{Time.zone.now}</LastModified>
|
|
|
|
<PartNumber>1</PartNumber>
|
|
|
|
<Size>#{5.megabytes}</Size>
|
|
|
|
</Part>
|
|
|
|
<Initiator>
|
|
|
|
<DisplayName>test-upload-user</DisplayName>
|
|
|
|
<ID>arn:aws:iam::123:user/test-upload-user</ID>
|
|
|
|
</Initiator>
|
|
|
|
<Owner>
|
|
|
|
<DisplayName></DisplayName>
|
|
|
|
<ID>12345</ID>
|
|
|
|
</Owner>
|
|
|
|
<StorageClass>STANDARD</StorageClass>
|
|
|
|
</ListPartsResult>
|
DEV: Correctly tag heredocs (#16061)
This allows text editors to use correct syntax coloring for the heredoc sections.
Heredoc tag names we use:
languages: SQL, JS, RUBY, LUA, HTML, CSS, SCSS, SH, HBS, XML, YAML/YML, MF, ICS
other: MD, TEXT/TXT, RAW, EMAIL
2022-02-28 14:50:55 -05:00
|
|
|
XML
|
2021-11-09 17:01:28 -05:00
|
|
|
stub_request(
|
|
|
|
:get,
|
|
|
|
"#{upload_base_url}/#{external_upload_stub.key}?max-parts=1&uploadId=#{mock_multipart_upload_id}",
|
|
|
|
).to_return({ status: 200, body: list_multipart_result })
|
FEATURE: Uppy direct S3 multipart uploads in composer (#14051)
This pull request introduces the endpoints required, and the JavaScript functionality in the `ComposerUppyUpload` mixin, for direct S3 multipart uploads. There are four new endpoints in the uploads controller:
* `create-multipart.json` - Creates the multipart upload in S3 along with an `ExternalUploadStub` record, storing information about the file in the same way as `generate-presigned-put.json` does for regular direct S3 uploads
* `batch-presign-multipart-parts.json` - Takes a list of part numbers and the unique identifier for an `ExternalUploadStub` record, and generates the presigned URLs for those parts if the multipart upload still exists and if the user has permission to access that upload
* `complete-multipart.json` - Completes the multipart upload in S3. Needs the full list of part numbers and their associated ETags which are returned when the part is uploaded to the presigned URL above. Only works if the user has permission to access the associated `ExternalUploadStub` record and the multipart upload still exists.
After we confirm the upload is complete in S3, we go through the regular `UploadCreator` flow, the same as `complete-external-upload.json`, and promote the temporary upload S3 into a full `Upload` record, moving it to its final destination.
* `abort-multipart.json` - Aborts the multipart upload on S3 and destroys the `ExternalUploadStub` record if the user has permission to access that upload.
Also added are a few new columns to `ExternalUploadStub`:
* multipart - Whether or not this is a multipart upload
* external_upload_identifier - The "upload ID" for an S3 multipart upload
* filesize - The size of the file when the `create-multipart.json` or `generate-presigned-put.json` is called. This is used for validation.
When the user completes a direct S3 upload, either regular or multipart, we take the `filesize` that was captured when the `ExternalUploadStub` was first created and compare it with the final `Content-Length` size of the file where it is stored in S3. Then, if the two do not match, we throw an error, delete the file on S3, and ban the user from uploading files for N (default 5) minutes. This would only happen if the user uploads a different file than what they first specified, or in the case of multipart uploads uploaded larger chunks than needed. This is done to prevent abuse of S3 storage by bad actors.
Also included in this PR is an update to vendor/uppy.js. This has been built locally from the latest uppy source at https://github.com/transloadit/uppy/commit/d613b849a6591083f8a0968aa8d66537e231bbcd. This must be done so that I can get my multipart upload changes into Discourse. When the Uppy team cuts a proper release, we can bump the package.json versions instead.
2021-08-24 18:46:54 -04:00
|
|
|
end
|
|
|
|
|
|
|
|
it "errors if the correct params are not provided" do
|
|
|
|
post "/uploads/complete-multipart.json", params: {}
|
|
|
|
expect(response.status).to eq(400)
|
|
|
|
end
|
|
|
|
|
|
|
|
it "errors if the part_numbers do not contain numbers between 1 and 10000" do
|
|
|
|
stub_list_multipart_request
|
|
|
|
post "/uploads/complete-multipart.json",
|
|
|
|
params: {
|
|
|
|
unique_identifier: external_upload_stub.unique_identifier,
|
|
|
|
parts: [{ part_number: -1, etag: "test1" }],
|
|
|
|
}
|
|
|
|
expect(response.status).to eq(400)
|
|
|
|
expect(response.body).to include(
|
|
|
|
"You supplied invalid parameters to the request: Each part number should be between 1 and 10000",
|
|
|
|
)
|
|
|
|
post "/uploads/complete-multipart.json",
|
|
|
|
params: {
|
|
|
|
unique_identifier: external_upload_stub.unique_identifier,
|
|
|
|
parts: [{ part_number: 20_001, etag: "test1" }],
|
|
|
|
}
|
|
|
|
expect(response.status).to eq(400)
|
|
|
|
expect(response.body).to include(
|
|
|
|
"You supplied invalid parameters to the request: Each part number should be between 1 and 10000",
|
|
|
|
)
|
|
|
|
post "/uploads/complete-multipart.json",
|
|
|
|
params: {
|
|
|
|
unique_identifier: external_upload_stub.unique_identifier,
|
|
|
|
parts: [{ part_number: "blah", etag: "test1" }],
|
|
|
|
}
|
|
|
|
expect(response.status).to eq(400)
|
|
|
|
expect(response.body).to include(
|
|
|
|
"You supplied invalid parameters to the request: Each part number should be between 1 and 10000",
|
|
|
|
)
|
|
|
|
end
|
|
|
|
|
|
|
|
it "errors if any of the parts objects have missing values" do
|
|
|
|
stub_list_multipart_request
|
|
|
|
post "/uploads/complete-multipart.json",
|
|
|
|
params: {
|
|
|
|
unique_identifier: external_upload_stub.unique_identifier,
|
|
|
|
parts: [{ part_number: 1 }],
|
|
|
|
}
|
|
|
|
expect(response.status).to eq(400)
|
|
|
|
expect(response.body).to include("All parts must have an etag")
|
|
|
|
end
|
|
|
|
|
|
|
|
it "returns 404 when the upload stub does not exist" do
|
|
|
|
post "/uploads/complete-multipart.json",
|
|
|
|
params: {
|
|
|
|
unique_identifier: "unknown",
|
|
|
|
parts: [{ part_number: 1, etag: "test1" }],
|
|
|
|
}
|
|
|
|
expect(response.status).to eq(404)
|
|
|
|
end
|
|
|
|
|
|
|
|
it "returns 422 when the complete request errors" do
|
|
|
|
FileStore::S3Store
|
|
|
|
.any_instance
|
|
|
|
.stubs(:complete_multipart)
|
|
|
|
.raises(Aws::S3::Errors::ServiceError.new({}, "test"))
|
|
|
|
stub_list_multipart_request
|
|
|
|
post "/uploads/complete-multipart.json",
|
|
|
|
params: {
|
|
|
|
unique_identifier: external_upload_stub.unique_identifier,
|
|
|
|
parts: [{ part_number: 1, etag: "test1" }],
|
|
|
|
}
|
|
|
|
expect(response.status).to eq(422)
|
|
|
|
end
|
|
|
|
|
|
|
|
it "returns 404 when the upload stub does not belong to the user" do
|
|
|
|
external_upload_stub.update!(created_by: Fabricate(:user))
|
|
|
|
post "/uploads/complete-multipart.json",
|
|
|
|
params: {
|
|
|
|
unique_identifier: external_upload_stub.unique_identifier,
|
|
|
|
parts: [{ part_number: 1, etag: "test1" }],
|
|
|
|
}
|
|
|
|
expect(response.status).to eq(404)
|
|
|
|
end
|
|
|
|
|
|
|
|
it "returns 404 when the multipart upload does not exist" do
|
|
|
|
FileStore::S3Store
|
|
|
|
.any_instance
|
|
|
|
.stubs(:list_multipart_parts)
|
|
|
|
.raises(Aws::S3::Errors::NoSuchUpload.new("test", "test"))
|
|
|
|
post "/uploads/complete-multipart.json",
|
|
|
|
params: {
|
|
|
|
unique_identifier: external_upload_stub.unique_identifier,
|
|
|
|
parts: [{ part_number: 1, etag: "test1" }],
|
|
|
|
}
|
|
|
|
expect(response.status).to eq(404)
|
|
|
|
end
|
|
|
|
|
|
|
|
it "completes the multipart upload, creates the Upload record, and returns a serialized Upload record" do
|
|
|
|
temp_location = "#{upload_base_url}/#{external_upload_stub.key}"
|
|
|
|
stub_list_multipart_request
|
|
|
|
stub_request(
|
|
|
|
:post,
|
|
|
|
"#{temp_location}?uploadId=#{external_upload_stub.external_upload_identifier}",
|
|
|
|
).with(
|
2021-10-20 18:04:08 -04:00
|
|
|
body:
|
|
|
|
"<CompleteMultipartUpload xmlns=\"http://s3.amazonaws.com/doc/2006-03-01/\"><Part><ETag>test1</ETag><PartNumber>1</PartNumber></Part><Part><ETag>test2</ETag><PartNumber>2</PartNumber></Part></CompleteMultipartUpload>",
|
FEATURE: Uppy direct S3 multipart uploads in composer (#14051)
This pull request introduces the endpoints required, and the JavaScript functionality in the `ComposerUppyUpload` mixin, for direct S3 multipart uploads. There are four new endpoints in the uploads controller:
* `create-multipart.json` - Creates the multipart upload in S3 along with an `ExternalUploadStub` record, storing information about the file in the same way as `generate-presigned-put.json` does for regular direct S3 uploads
* `batch-presign-multipart-parts.json` - Takes a list of part numbers and the unique identifier for an `ExternalUploadStub` record, and generates the presigned URLs for those parts if the multipart upload still exists and if the user has permission to access that upload
* `complete-multipart.json` - Completes the multipart upload in S3. Needs the full list of part numbers and their associated ETags which are returned when the part is uploaded to the presigned URL above. Only works if the user has permission to access the associated `ExternalUploadStub` record and the multipart upload still exists.
After we confirm the upload is complete in S3, we go through the regular `UploadCreator` flow, the same as `complete-external-upload.json`, and promote the temporary upload S3 into a full `Upload` record, moving it to its final destination.
* `abort-multipart.json` - Aborts the multipart upload on S3 and destroys the `ExternalUploadStub` record if the user has permission to access that upload.
Also added are a few new columns to `ExternalUploadStub`:
* multipart - Whether or not this is a multipart upload
* external_upload_identifier - The "upload ID" for an S3 multipart upload
* filesize - The size of the file when the `create-multipart.json` or `generate-presigned-put.json` is called. This is used for validation.
When the user completes a direct S3 upload, either regular or multipart, we take the `filesize` that was captured when the `ExternalUploadStub` was first created and compare it with the final `Content-Length` size of the file where it is stored in S3. Then, if the two do not match, we throw an error, delete the file on S3, and ban the user from uploading files for N (default 5) minutes. This would only happen if the user uploads a different file than what they first specified, or in the case of multipart uploads uploaded larger chunks than needed. This is done to prevent abuse of S3 storage by bad actors.
Also included in this PR is an update to vendor/uppy.js. This has been built locally from the latest uppy source at https://github.com/transloadit/uppy/commit/d613b849a6591083f8a0968aa8d66537e231bbcd. This must be done so that I can get my multipart upload changes into Discourse. When the Uppy team cuts a proper release, we can bump the package.json versions instead.
2021-08-24 18:46:54 -04:00
|
|
|
).to_return(status: 200, body: <<~XML)
|
|
|
|
<?xml version="1.0" encoding="UTF-8"?>
|
|
|
|
<CompleteMultipartUploadResult>
|
|
|
|
<Location>#{temp_location}</Location>
|
|
|
|
<Bucket>s3-upload-bucket</Bucket>
|
|
|
|
<Key>#{external_upload_stub.key}</Key>
|
|
|
|
<ETag>testfinal</ETag>
|
|
|
|
</CompleteMultipartUploadResult>
|
|
|
|
XML
|
|
|
|
|
|
|
|
# all the functionality for ExternalUploadManager is already tested along
|
|
|
|
# with stubs to S3 in its own test, we can just stub the response here
|
|
|
|
upload = Fabricate(:upload)
|
FEATURE: Direct S3 multipart uploads for backups (#14736)
This PR introduces a new `enable_experimental_backup_uploads` site setting (default false and hidden), which when enabled alongside `enable_direct_s3_uploads` will allow for direct S3 multipart uploads of backup .tar.gz files.
To make multipart external uploads work with both the S3BackupStore and the S3Store, I've had to move several methods out of S3Store and into S3Helper, including:
* presigned_url
* create_multipart
* abort_multipart
* complete_multipart
* presign_multipart_part
* list_multipart_parts
Then, S3Store and S3BackupStore either delegate directly to S3Helper or have their own special methods to call S3Helper for these methods. FileStore.temporary_upload_path has also removed its dependence on upload_path, and can now be used interchangeably between the stores. A similar change was made in the frontend as well, moving the multipart related JS code out of ComposerUppyUpload and into a mixin of its own, so it can also be used by UppyUploadMixin.
Some changes to ExternalUploadManager had to be made here as well. The backup direct uploads do not need an Upload record made for them in the database, so they can be moved to their final S3 resting place when completing the multipart upload.
This changeset is not perfect; it introduces some special cases in UploadController to handle backups that was previously in BackupController, because UploadController is where the multipart routes are located. A subsequent pull request will pull these routes into a module or some other sharing pattern, along with hooks, so the backup controller and the upload controller (and any future controllers that may need them) can include these routes in a nicer way.
2021-11-10 17:25:31 -05:00
|
|
|
ExternalUploadManager.any_instance.stubs(:transform!).returns(upload)
|
FEATURE: Uppy direct S3 multipart uploads in composer (#14051)
This pull request introduces the endpoints required, and the JavaScript functionality in the `ComposerUppyUpload` mixin, for direct S3 multipart uploads. There are four new endpoints in the uploads controller:
* `create-multipart.json` - Creates the multipart upload in S3 along with an `ExternalUploadStub` record, storing information about the file in the same way as `generate-presigned-put.json` does for regular direct S3 uploads
* `batch-presign-multipart-parts.json` - Takes a list of part numbers and the unique identifier for an `ExternalUploadStub` record, and generates the presigned URLs for those parts if the multipart upload still exists and if the user has permission to access that upload
* `complete-multipart.json` - Completes the multipart upload in S3. Needs the full list of part numbers and their associated ETags which are returned when the part is uploaded to the presigned URL above. Only works if the user has permission to access the associated `ExternalUploadStub` record and the multipart upload still exists.
After we confirm the upload is complete in S3, we go through the regular `UploadCreator` flow, the same as `complete-external-upload.json`, and promote the temporary upload S3 into a full `Upload` record, moving it to its final destination.
* `abort-multipart.json` - Aborts the multipart upload on S3 and destroys the `ExternalUploadStub` record if the user has permission to access that upload.
Also added are a few new columns to `ExternalUploadStub`:
* multipart - Whether or not this is a multipart upload
* external_upload_identifier - The "upload ID" for an S3 multipart upload
* filesize - The size of the file when the `create-multipart.json` or `generate-presigned-put.json` is called. This is used for validation.
When the user completes a direct S3 upload, either regular or multipart, we take the `filesize` that was captured when the `ExternalUploadStub` was first created and compare it with the final `Content-Length` size of the file where it is stored in S3. Then, if the two do not match, we throw an error, delete the file on S3, and ban the user from uploading files for N (default 5) minutes. This would only happen if the user uploads a different file than what they first specified, or in the case of multipart uploads uploaded larger chunks than needed. This is done to prevent abuse of S3 storage by bad actors.
Also included in this PR is an update to vendor/uppy.js. This has been built locally from the latest uppy source at https://github.com/transloadit/uppy/commit/d613b849a6591083f8a0968aa8d66537e231bbcd. This must be done so that I can get my multipart upload changes into Discourse. When the Uppy team cuts a proper release, we can bump the package.json versions instead.
2021-08-24 18:46:54 -04:00
|
|
|
|
|
|
|
post "/uploads/complete-multipart.json",
|
|
|
|
params: {
|
|
|
|
unique_identifier: external_upload_stub.unique_identifier,
|
|
|
|
parts: [{ part_number: 1, etag: "test1" }, { part_number: 2, etag: "test2" }],
|
|
|
|
}
|
|
|
|
|
|
|
|
expect(response.status).to eq(200)
|
|
|
|
result = response.parsed_body
|
|
|
|
expect(result[:upload]).to eq(JSON.parse(UploadSerializer.new(upload).to_json)[:upload])
|
|
|
|
end
|
|
|
|
|
2023-06-15 22:44:35 -04:00
|
|
|
describe "rate limiting" do
|
|
|
|
before { RateLimiter.enable }
|
2023-03-08 00:27:17 -05:00
|
|
|
|
2023-06-15 22:44:35 -04:00
|
|
|
use_redis_snapshotting
|
|
|
|
|
|
|
|
it "rate limits" do
|
|
|
|
SiteSetting.max_complete_multipart_per_minute = 1
|
FEATURE: Uppy direct S3 multipart uploads in composer (#14051)
This pull request introduces the endpoints required, and the JavaScript functionality in the `ComposerUppyUpload` mixin, for direct S3 multipart uploads. There are four new endpoints in the uploads controller:
* `create-multipart.json` - Creates the multipart upload in S3 along with an `ExternalUploadStub` record, storing information about the file in the same way as `generate-presigned-put.json` does for regular direct S3 uploads
* `batch-presign-multipart-parts.json` - Takes a list of part numbers and the unique identifier for an `ExternalUploadStub` record, and generates the presigned URLs for those parts if the multipart upload still exists and if the user has permission to access that upload
* `complete-multipart.json` - Completes the multipart upload in S3. Needs the full list of part numbers and their associated ETags which are returned when the part is uploaded to the presigned URL above. Only works if the user has permission to access the associated `ExternalUploadStub` record and the multipart upload still exists.
After we confirm the upload is complete in S3, we go through the regular `UploadCreator` flow, the same as `complete-external-upload.json`, and promote the temporary upload S3 into a full `Upload` record, moving it to its final destination.
* `abort-multipart.json` - Aborts the multipart upload on S3 and destroys the `ExternalUploadStub` record if the user has permission to access that upload.
Also added are a few new columns to `ExternalUploadStub`:
* multipart - Whether or not this is a multipart upload
* external_upload_identifier - The "upload ID" for an S3 multipart upload
* filesize - The size of the file when the `create-multipart.json` or `generate-presigned-put.json` is called. This is used for validation.
When the user completes a direct S3 upload, either regular or multipart, we take the `filesize` that was captured when the `ExternalUploadStub` was first created and compare it with the final `Content-Length` size of the file where it is stored in S3. Then, if the two do not match, we throw an error, delete the file on S3, and ban the user from uploading files for N (default 5) minutes. This would only happen if the user uploads a different file than what they first specified, or in the case of multipart uploads uploaded larger chunks than needed. This is done to prevent abuse of S3 storage by bad actors.
Also included in this PR is an update to vendor/uppy.js. This has been built locally from the latest uppy source at https://github.com/transloadit/uppy/commit/d613b849a6591083f8a0968aa8d66537e231bbcd. This must be done so that I can get my multipart upload changes into Discourse. When the Uppy team cuts a proper release, we can bump the package.json versions instead.
2021-08-24 18:46:54 -04:00
|
|
|
|
2023-06-15 22:44:35 -04:00
|
|
|
post "/uploads/complete-multipart.json",
|
|
|
|
params: {
|
|
|
|
unique_identifier: "blah",
|
|
|
|
parts: [{ part_number: 1, etag: "test1" }, { part_number: 2, etag: "test2" }],
|
|
|
|
}
|
|
|
|
post "/uploads/complete-multipart.json",
|
|
|
|
params: {
|
|
|
|
unique_identifier: "blah",
|
|
|
|
parts: [{ part_number: 1, etag: "test1" }, { part_number: 2, etag: "test2" }],
|
|
|
|
}
|
|
|
|
|
|
|
|
expect(response.status).to eq(429)
|
|
|
|
end
|
FEATURE: Uppy direct S3 multipart uploads in composer (#14051)
This pull request introduces the endpoints required, and the JavaScript functionality in the `ComposerUppyUpload` mixin, for direct S3 multipart uploads. There are four new endpoints in the uploads controller:
* `create-multipart.json` - Creates the multipart upload in S3 along with an `ExternalUploadStub` record, storing information about the file in the same way as `generate-presigned-put.json` does for regular direct S3 uploads
* `batch-presign-multipart-parts.json` - Takes a list of part numbers and the unique identifier for an `ExternalUploadStub` record, and generates the presigned URLs for those parts if the multipart upload still exists and if the user has permission to access that upload
* `complete-multipart.json` - Completes the multipart upload in S3. Needs the full list of part numbers and their associated ETags which are returned when the part is uploaded to the presigned URL above. Only works if the user has permission to access the associated `ExternalUploadStub` record and the multipart upload still exists.
After we confirm the upload is complete in S3, we go through the regular `UploadCreator` flow, the same as `complete-external-upload.json`, and promote the temporary upload S3 into a full `Upload` record, moving it to its final destination.
* `abort-multipart.json` - Aborts the multipart upload on S3 and destroys the `ExternalUploadStub` record if the user has permission to access that upload.
Also added are a few new columns to `ExternalUploadStub`:
* multipart - Whether or not this is a multipart upload
* external_upload_identifier - The "upload ID" for an S3 multipart upload
* filesize - The size of the file when the `create-multipart.json` or `generate-presigned-put.json` is called. This is used for validation.
When the user completes a direct S3 upload, either regular or multipart, we take the `filesize` that was captured when the `ExternalUploadStub` was first created and compare it with the final `Content-Length` size of the file where it is stored in S3. Then, if the two do not match, we throw an error, delete the file on S3, and ban the user from uploading files for N (default 5) minutes. This would only happen if the user uploads a different file than what they first specified, or in the case of multipart uploads uploaded larger chunks than needed. This is done to prevent abuse of S3 storage by bad actors.
Also included in this PR is an update to vendor/uppy.js. This has been built locally from the latest uppy source at https://github.com/transloadit/uppy/commit/d613b849a6591083f8a0968aa8d66537e231bbcd. This must be done so that I can get my multipart upload changes into Discourse. When the Uppy team cuts a proper release, we can bump the package.json versions instead.
2021-08-24 18:46:54 -04:00
|
|
|
end
|
|
|
|
end
|
|
|
|
|
|
|
|
context "when the store is not external" do
|
|
|
|
before { sign_in(user) }
|
|
|
|
|
|
|
|
it "returns 404" do
|
|
|
|
post "/uploads/complete-multipart.json",
|
|
|
|
params: {
|
|
|
|
unique_identifier: external_upload_stub.external_upload_identifier,
|
|
|
|
parts: [{ part_number: 1, etag: "test1" }, { part_number: 2, etag: "test2" }],
|
|
|
|
}
|
|
|
|
expect(response.status).to eq(404)
|
|
|
|
end
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
|
|
|
describe "#abort_multipart" do
|
|
|
|
let(:upload_base_url) do
|
DEV: Introduce S3 transfer acceleration for uploads behind hidden setting (#24238)
This commit adds an `enable_s3_transfer_acceleration` site setting,
which is hidden to begin with. We are adding this because in certain
regions, using https://aws.amazon.com/s3/transfer-acceleration/ can
drastically speed up uploads, sometimes as much as 70% in certain
regions depending on the target bucket region. This is important for
us because we have direct S3 multipart uploads enabled everywhere
on our hosting.
To start, we only want this on the uploads bucket, not the backup one.
Also, this will accelerate both uploads **and** downloads, depending
on whether a presigned URL is used for downloading. This is the case
when secure uploads is enabled, not anywhere else at this time. To
enable the S3 acceleration on downloads more generally would be a
more in-depth change, since we currently store S3 Upload record URLs
like this:
```
url: "//test.s3.dualstack.us-east-2.amazonaws.com/original/2X/6/123456.png"
```
For acceleration, `s3.dualstack` would need to be changed to `s3-accelerate.dualstack`
here.
Note that for this to have any effect, Transfer Acceleration must be enabled
on the S3 bucket used for uploads per https://docs.aws.amazon.com/AmazonS3/latest/userguide/transfer-acceleration-examples.html.
2023-11-06 20:50:40 -05:00
|
|
|
"https://#{SiteSetting.s3_upload_bucket}.#{SiteSetting.enable_s3_transfer_acceleration ? "s3-accelerate" : "s3.#{SiteSetting.s3_region}"}.amazonaws.com"
|
2023-01-09 06:18:21 -05:00
|
|
|
end
|
FEATURE: Uppy direct S3 multipart uploads in composer (#14051)
This pull request introduces the endpoints required, and the JavaScript functionality in the `ComposerUppyUpload` mixin, for direct S3 multipart uploads. There are four new endpoints in the uploads controller:
* `create-multipart.json` - Creates the multipart upload in S3 along with an `ExternalUploadStub` record, storing information about the file in the same way as `generate-presigned-put.json` does for regular direct S3 uploads
* `batch-presign-multipart-parts.json` - Takes a list of part numbers and the unique identifier for an `ExternalUploadStub` record, and generates the presigned URLs for those parts if the multipart upload still exists and if the user has permission to access that upload
* `complete-multipart.json` - Completes the multipart upload in S3. Needs the full list of part numbers and their associated ETags which are returned when the part is uploaded to the presigned URL above. Only works if the user has permission to access the associated `ExternalUploadStub` record and the multipart upload still exists.
After we confirm the upload is complete in S3, we go through the regular `UploadCreator` flow, the same as `complete-external-upload.json`, and promote the temporary upload S3 into a full `Upload` record, moving it to its final destination.
* `abort-multipart.json` - Aborts the multipart upload on S3 and destroys the `ExternalUploadStub` record if the user has permission to access that upload.
Also added are a few new columns to `ExternalUploadStub`:
* multipart - Whether or not this is a multipart upload
* external_upload_identifier - The "upload ID" for an S3 multipart upload
* filesize - The size of the file when the `create-multipart.json` or `generate-presigned-put.json` is called. This is used for validation.
When the user completes a direct S3 upload, either regular or multipart, we take the `filesize` that was captured when the `ExternalUploadStub` was first created and compare it with the final `Content-Length` size of the file where it is stored in S3. Then, if the two do not match, we throw an error, delete the file on S3, and ban the user from uploading files for N (default 5) minutes. This would only happen if the user uploads a different file than what they first specified, or in the case of multipart uploads uploaded larger chunks than needed. This is done to prevent abuse of S3 storage by bad actors.
Also included in this PR is an update to vendor/uppy.js. This has been built locally from the latest uppy source at https://github.com/transloadit/uppy/commit/d613b849a6591083f8a0968aa8d66537e231bbcd. This must be done so that I can get my multipart upload changes into Discourse. When the Uppy team cuts a proper release, we can bump the package.json versions instead.
2021-08-24 18:46:54 -04:00
|
|
|
let(:mock_multipart_upload_id) do
|
|
|
|
"ibZBv_75gd9r8lH_gqXatLdxMVpAlj6CFTR.OwyF3953YdwbcQnMA2BLGn8Lx12fQNICtMw5KyteFeHw.Sjng--"
|
2023-01-09 06:18:21 -05:00
|
|
|
end
|
FEATURE: Uppy direct S3 multipart uploads in composer (#14051)
This pull request introduces the endpoints required, and the JavaScript functionality in the `ComposerUppyUpload` mixin, for direct S3 multipart uploads. There are four new endpoints in the uploads controller:
* `create-multipart.json` - Creates the multipart upload in S3 along with an `ExternalUploadStub` record, storing information about the file in the same way as `generate-presigned-put.json` does for regular direct S3 uploads
* `batch-presign-multipart-parts.json` - Takes a list of part numbers and the unique identifier for an `ExternalUploadStub` record, and generates the presigned URLs for those parts if the multipart upload still exists and if the user has permission to access that upload
* `complete-multipart.json` - Completes the multipart upload in S3. Needs the full list of part numbers and their associated ETags which are returned when the part is uploaded to the presigned URL above. Only works if the user has permission to access the associated `ExternalUploadStub` record and the multipart upload still exists.
After we confirm the upload is complete in S3, we go through the regular `UploadCreator` flow, the same as `complete-external-upload.json`, and promote the temporary upload S3 into a full `Upload` record, moving it to its final destination.
* `abort-multipart.json` - Aborts the multipart upload on S3 and destroys the `ExternalUploadStub` record if the user has permission to access that upload.
Also added are a few new columns to `ExternalUploadStub`:
* multipart - Whether or not this is a multipart upload
* external_upload_identifier - The "upload ID" for an S3 multipart upload
* filesize - The size of the file when the `create-multipart.json` or `generate-presigned-put.json` is called. This is used for validation.
When the user completes a direct S3 upload, either regular or multipart, we take the `filesize` that was captured when the `ExternalUploadStub` was first created and compare it with the final `Content-Length` size of the file where it is stored in S3. Then, if the two do not match, we throw an error, delete the file on S3, and ban the user from uploading files for N (default 5) minutes. This would only happen if the user uploads a different file than what they first specified, or in the case of multipart uploads uploaded larger chunks than needed. This is done to prevent abuse of S3 storage by bad actors.
Also included in this PR is an update to vendor/uppy.js. This has been built locally from the latest uppy source at https://github.com/transloadit/uppy/commit/d613b849a6591083f8a0968aa8d66537e231bbcd. This must be done so that I can get my multipart upload changes into Discourse. When the Uppy team cuts a proper release, we can bump the package.json versions instead.
2021-08-24 18:46:54 -04:00
|
|
|
let!(:external_upload_stub) do
|
|
|
|
Fabricate(
|
|
|
|
:image_external_upload_stub,
|
|
|
|
created_by: user,
|
|
|
|
multipart: true,
|
|
|
|
external_upload_identifier: mock_multipart_upload_id,
|
|
|
|
)
|
|
|
|
end
|
|
|
|
|
|
|
|
context "when the store is external" do
|
|
|
|
before do
|
|
|
|
sign_in(user)
|
|
|
|
SiteSetting.enable_direct_s3_uploads = true
|
|
|
|
setup_s3
|
|
|
|
end
|
|
|
|
|
|
|
|
def stub_abort_request
|
|
|
|
temp_location = "#{upload_base_url}/#{external_upload_stub.key}"
|
|
|
|
stub_request(
|
|
|
|
:delete,
|
|
|
|
"#{temp_location}?uploadId=#{external_upload_stub.external_upload_identifier}",
|
|
|
|
).to_return(status: 200, body: "")
|
|
|
|
end
|
|
|
|
|
|
|
|
it "errors if the correct params are not provided" do
|
|
|
|
post "/uploads/abort-multipart.json", params: {}
|
|
|
|
expect(response.status).to eq(400)
|
|
|
|
end
|
|
|
|
|
|
|
|
it "returns 200 when the stub does not exist, assumes it has already been deleted" do
|
|
|
|
FileStore::S3Store.any_instance.expects(:abort_multipart).never
|
|
|
|
post "/uploads/abort-multipart.json", params: { external_upload_identifier: "unknown" }
|
|
|
|
expect(response.status).to eq(200)
|
|
|
|
end
|
|
|
|
|
|
|
|
it "returns 404 when the upload stub does not belong to the user" do
|
|
|
|
external_upload_stub.update!(created_by: Fabricate(:user))
|
|
|
|
post "/uploads/abort-multipart.json",
|
|
|
|
params: {
|
|
|
|
external_upload_identifier: external_upload_stub.external_upload_identifier,
|
|
|
|
}
|
|
|
|
expect(response.status).to eq(404)
|
|
|
|
end
|
|
|
|
|
|
|
|
it "aborts the multipart upload and deletes the stub" do
|
|
|
|
stub_abort_request
|
|
|
|
|
|
|
|
post "/uploads/abort-multipart.json",
|
|
|
|
params: {
|
|
|
|
external_upload_identifier: external_upload_stub.external_upload_identifier,
|
|
|
|
}
|
|
|
|
|
|
|
|
expect(response.status).to eq(200)
|
|
|
|
expect(ExternalUploadStub.exists?(id: external_upload_stub.id)).to eq(false)
|
|
|
|
end
|
|
|
|
|
|
|
|
it "returns 422 when the abort request errors" do
|
|
|
|
FileStore::S3Store
|
|
|
|
.any_instance
|
|
|
|
.stubs(:abort_multipart)
|
|
|
|
.raises(Aws::S3::Errors::ServiceError.new({}, "test"))
|
|
|
|
post "/uploads/abort-multipart.json",
|
|
|
|
params: {
|
|
|
|
external_upload_identifier: external_upload_stub.external_upload_identifier,
|
|
|
|
}
|
|
|
|
expect(response.status).to eq(422)
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
|
|
|
context "when the store is not external" do
|
|
|
|
before { sign_in(user) }
|
|
|
|
|
|
|
|
it "returns 404" do
|
|
|
|
post "/uploads/complete-multipart.json",
|
|
|
|
params: {
|
|
|
|
unique_identifier: external_upload_stub.external_upload_identifier,
|
|
|
|
parts: [{ part_number: 1, etag: "test1" }, { part_number: 2, etag: "test2" }],
|
|
|
|
}
|
2021-07-27 18:42:25 -04:00
|
|
|
expect(response.status).to eq(404)
|
|
|
|
end
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
|
|
|
describe "#complete_external_upload" do
|
|
|
|
before { sign_in(user) }
|
|
|
|
|
|
|
|
context "when the store is external" do
|
FEATURE: Uppy direct S3 multipart uploads in composer (#14051)
This pull request introduces the endpoints required, and the JavaScript functionality in the `ComposerUppyUpload` mixin, for direct S3 multipart uploads. There are four new endpoints in the uploads controller:
* `create-multipart.json` - Creates the multipart upload in S3 along with an `ExternalUploadStub` record, storing information about the file in the same way as `generate-presigned-put.json` does for regular direct S3 uploads
* `batch-presign-multipart-parts.json` - Takes a list of part numbers and the unique identifier for an `ExternalUploadStub` record, and generates the presigned URLs for those parts if the multipart upload still exists and if the user has permission to access that upload
* `complete-multipart.json` - Completes the multipart upload in S3. Needs the full list of part numbers and their associated ETags which are returned when the part is uploaded to the presigned URL above. Only works if the user has permission to access the associated `ExternalUploadStub` record and the multipart upload still exists.
After we confirm the upload is complete in S3, we go through the regular `UploadCreator` flow, the same as `complete-external-upload.json`, and promote the temporary upload S3 into a full `Upload` record, moving it to its final destination.
* `abort-multipart.json` - Aborts the multipart upload on S3 and destroys the `ExternalUploadStub` record if the user has permission to access that upload.
Also added are a few new columns to `ExternalUploadStub`:
* multipart - Whether or not this is a multipart upload
* external_upload_identifier - The "upload ID" for an S3 multipart upload
* filesize - The size of the file when the `create-multipart.json` or `generate-presigned-put.json` is called. This is used for validation.
When the user completes a direct S3 upload, either regular or multipart, we take the `filesize` that was captured when the `ExternalUploadStub` was first created and compare it with the final `Content-Length` size of the file where it is stored in S3. Then, if the two do not match, we throw an error, delete the file on S3, and ban the user from uploading files for N (default 5) minutes. This would only happen if the user uploads a different file than what they first specified, or in the case of multipart uploads uploaded larger chunks than needed. This is done to prevent abuse of S3 storage by bad actors.
Also included in this PR is an update to vendor/uppy.js. This has been built locally from the latest uppy source at https://github.com/transloadit/uppy/commit/d613b849a6591083f8a0968aa8d66537e231bbcd. This must be done so that I can get my multipart upload changes into Discourse. When the Uppy team cuts a proper release, we can bump the package.json versions instead.
2021-08-24 18:46:54 -04:00
|
|
|
fab!(:external_upload_stub) { Fabricate(:image_external_upload_stub, created_by: user) }
|
2021-07-27 18:42:25 -04:00
|
|
|
let(:upload) { Fabricate(:upload) }
|
|
|
|
|
|
|
|
before do
|
|
|
|
SiteSetting.enable_direct_s3_uploads = true
|
|
|
|
SiteSetting.enable_upload_debug_mode = true
|
|
|
|
setup_s3
|
|
|
|
end
|
|
|
|
|
|
|
|
it "returns 404 when the upload stub does not exist" do
|
|
|
|
post "/uploads/complete-external-upload.json", params: { unique_identifier: "unknown" }
|
|
|
|
expect(response.status).to eq(404)
|
|
|
|
end
|
|
|
|
|
|
|
|
it "returns 404 when the upload stub does not belong to the user" do
|
|
|
|
external_upload_stub.update!(created_by: Fabricate(:user))
|
|
|
|
post "/uploads/complete-external-upload.json",
|
|
|
|
params: {
|
|
|
|
unique_identifier: external_upload_stub.unique_identifier,
|
|
|
|
}
|
|
|
|
expect(response.status).to eq(404)
|
|
|
|
end
|
|
|
|
|
|
|
|
it "handles ChecksumMismatchError" do
|
FEATURE: Direct S3 multipart uploads for backups (#14736)
This PR introduces a new `enable_experimental_backup_uploads` site setting (default false and hidden), which when enabled alongside `enable_direct_s3_uploads` will allow for direct S3 multipart uploads of backup .tar.gz files.
To make multipart external uploads work with both the S3BackupStore and the S3Store, I've had to move several methods out of S3Store and into S3Helper, including:
* presigned_url
* create_multipart
* abort_multipart
* complete_multipart
* presign_multipart_part
* list_multipart_parts
Then, S3Store and S3BackupStore either delegate directly to S3Helper or have their own special methods to call S3Helper for these methods. FileStore.temporary_upload_path has also removed its dependence on upload_path, and can now be used interchangeably between the stores. A similar change was made in the frontend as well, moving the multipart related JS code out of ComposerUppyUpload and into a mixin of its own, so it can also be used by UppyUploadMixin.
Some changes to ExternalUploadManager had to be made here as well. The backup direct uploads do not need an Upload record made for them in the database, so they can be moved to their final S3 resting place when completing the multipart upload.
This changeset is not perfect; it introduces some special cases in UploadController to handle backups that was previously in BackupController, because UploadController is where the multipart routes are located. A subsequent pull request will pull these routes into a module or some other sharing pattern, along with hooks, so the backup controller and the upload controller (and any future controllers that may need them) can include these routes in a nicer way.
2021-11-10 17:25:31 -05:00
|
|
|
ExternalUploadManager
|
|
|
|
.any_instance
|
|
|
|
.stubs(:transform!)
|
|
|
|
.raises(ExternalUploadManager::ChecksumMismatchError)
|
2021-07-27 18:42:25 -04:00
|
|
|
post "/uploads/complete-external-upload.json",
|
|
|
|
params: {
|
|
|
|
unique_identifier: external_upload_stub.unique_identifier,
|
|
|
|
}
|
FEATURE: Uppy direct S3 multipart uploads in composer (#14051)
This pull request introduces the endpoints required, and the JavaScript functionality in the `ComposerUppyUpload` mixin, for direct S3 multipart uploads. There are four new endpoints in the uploads controller:
* `create-multipart.json` - Creates the multipart upload in S3 along with an `ExternalUploadStub` record, storing information about the file in the same way as `generate-presigned-put.json` does for regular direct S3 uploads
* `batch-presign-multipart-parts.json` - Takes a list of part numbers and the unique identifier for an `ExternalUploadStub` record, and generates the presigned URLs for those parts if the multipart upload still exists and if the user has permission to access that upload
* `complete-multipart.json` - Completes the multipart upload in S3. Needs the full list of part numbers and their associated ETags which are returned when the part is uploaded to the presigned URL above. Only works if the user has permission to access the associated `ExternalUploadStub` record and the multipart upload still exists.
After we confirm the upload is complete in S3, we go through the regular `UploadCreator` flow, the same as `complete-external-upload.json`, and promote the temporary upload S3 into a full `Upload` record, moving it to its final destination.
* `abort-multipart.json` - Aborts the multipart upload on S3 and destroys the `ExternalUploadStub` record if the user has permission to access that upload.
Also added are a few new columns to `ExternalUploadStub`:
* multipart - Whether or not this is a multipart upload
* external_upload_identifier - The "upload ID" for an S3 multipart upload
* filesize - The size of the file when the `create-multipart.json` or `generate-presigned-put.json` is called. This is used for validation.
When the user completes a direct S3 upload, either regular or multipart, we take the `filesize` that was captured when the `ExternalUploadStub` was first created and compare it with the final `Content-Length` size of the file where it is stored in S3. Then, if the two do not match, we throw an error, delete the file on S3, and ban the user from uploading files for N (default 5) minutes. This would only happen if the user uploads a different file than what they first specified, or in the case of multipart uploads uploaded larger chunks than needed. This is done to prevent abuse of S3 storage by bad actors.
Also included in this PR is an update to vendor/uppy.js. This has been built locally from the latest uppy source at https://github.com/transloadit/uppy/commit/d613b849a6591083f8a0968aa8d66537e231bbcd. This must be done so that I can get my multipart upload changes into Discourse. When the Uppy team cuts a proper release, we can bump the package.json versions instead.
2021-08-24 18:46:54 -04:00
|
|
|
expect(response.status).to eq(422)
|
2023-08-22 21:18:33 -04:00
|
|
|
expect(response.parsed_body["errors"].first).to eq(
|
|
|
|
I18n.t("upload.checksum_mismatch_failure"),
|
|
|
|
)
|
FEATURE: Uppy direct S3 multipart uploads in composer (#14051)
This pull request introduces the endpoints required, and the JavaScript functionality in the `ComposerUppyUpload` mixin, for direct S3 multipart uploads. There are four new endpoints in the uploads controller:
* `create-multipart.json` - Creates the multipart upload in S3 along with an `ExternalUploadStub` record, storing information about the file in the same way as `generate-presigned-put.json` does for regular direct S3 uploads
* `batch-presign-multipart-parts.json` - Takes a list of part numbers and the unique identifier for an `ExternalUploadStub` record, and generates the presigned URLs for those parts if the multipart upload still exists and if the user has permission to access that upload
* `complete-multipart.json` - Completes the multipart upload in S3. Needs the full list of part numbers and their associated ETags which are returned when the part is uploaded to the presigned URL above. Only works if the user has permission to access the associated `ExternalUploadStub` record and the multipart upload still exists.
After we confirm the upload is complete in S3, we go through the regular `UploadCreator` flow, the same as `complete-external-upload.json`, and promote the temporary upload S3 into a full `Upload` record, moving it to its final destination.
* `abort-multipart.json` - Aborts the multipart upload on S3 and destroys the `ExternalUploadStub` record if the user has permission to access that upload.
Also added are a few new columns to `ExternalUploadStub`:
* multipart - Whether or not this is a multipart upload
* external_upload_identifier - The "upload ID" for an S3 multipart upload
* filesize - The size of the file when the `create-multipart.json` or `generate-presigned-put.json` is called. This is used for validation.
When the user completes a direct S3 upload, either regular or multipart, we take the `filesize` that was captured when the `ExternalUploadStub` was first created and compare it with the final `Content-Length` size of the file where it is stored in S3. Then, if the two do not match, we throw an error, delete the file on S3, and ban the user from uploading files for N (default 5) minutes. This would only happen if the user uploads a different file than what they first specified, or in the case of multipart uploads uploaded larger chunks than needed. This is done to prevent abuse of S3 storage by bad actors.
Also included in this PR is an update to vendor/uppy.js. This has been built locally from the latest uppy source at https://github.com/transloadit/uppy/commit/d613b849a6591083f8a0968aa8d66537e231bbcd. This must be done so that I can get my multipart upload changes into Discourse. When the Uppy team cuts a proper release, we can bump the package.json versions instead.
2021-08-24 18:46:54 -04:00
|
|
|
end
|
|
|
|
|
|
|
|
it "handles SizeMismatchError" do
|
FEATURE: Direct S3 multipart uploads for backups (#14736)
This PR introduces a new `enable_experimental_backup_uploads` site setting (default false and hidden), which when enabled alongside `enable_direct_s3_uploads` will allow for direct S3 multipart uploads of backup .tar.gz files.
To make multipart external uploads work with both the S3BackupStore and the S3Store, I've had to move several methods out of S3Store and into S3Helper, including:
* presigned_url
* create_multipart
* abort_multipart
* complete_multipart
* presign_multipart_part
* list_multipart_parts
Then, S3Store and S3BackupStore either delegate directly to S3Helper or have their own special methods to call S3Helper for these methods. FileStore.temporary_upload_path has also removed its dependence on upload_path, and can now be used interchangeably between the stores. A similar change was made in the frontend as well, moving the multipart related JS code out of ComposerUppyUpload and into a mixin of its own, so it can also be used by UppyUploadMixin.
Some changes to ExternalUploadManager had to be made here as well. The backup direct uploads do not need an Upload record made for them in the database, so they can be moved to their final S3 resting place when completing the multipart upload.
This changeset is not perfect; it introduces some special cases in UploadController to handle backups that was previously in BackupController, because UploadController is where the multipart routes are located. A subsequent pull request will pull these routes into a module or some other sharing pattern, along with hooks, so the backup controller and the upload controller (and any future controllers that may need them) can include these routes in a nicer way.
2021-11-10 17:25:31 -05:00
|
|
|
ExternalUploadManager
|
|
|
|
.any_instance
|
|
|
|
.stubs(:transform!)
|
|
|
|
.raises(ExternalUploadManager::SizeMismatchError.new("expected: 10, actual: 1000"))
|
FEATURE: Uppy direct S3 multipart uploads in composer (#14051)
This pull request introduces the endpoints required, and the JavaScript functionality in the `ComposerUppyUpload` mixin, for direct S3 multipart uploads. There are four new endpoints in the uploads controller:
* `create-multipart.json` - Creates the multipart upload in S3 along with an `ExternalUploadStub` record, storing information about the file in the same way as `generate-presigned-put.json` does for regular direct S3 uploads
* `batch-presign-multipart-parts.json` - Takes a list of part numbers and the unique identifier for an `ExternalUploadStub` record, and generates the presigned URLs for those parts if the multipart upload still exists and if the user has permission to access that upload
* `complete-multipart.json` - Completes the multipart upload in S3. Needs the full list of part numbers and their associated ETags which are returned when the part is uploaded to the presigned URL above. Only works if the user has permission to access the associated `ExternalUploadStub` record and the multipart upload still exists.
After we confirm the upload is complete in S3, we go through the regular `UploadCreator` flow, the same as `complete-external-upload.json`, and promote the temporary upload S3 into a full `Upload` record, moving it to its final destination.
* `abort-multipart.json` - Aborts the multipart upload on S3 and destroys the `ExternalUploadStub` record if the user has permission to access that upload.
Also added are a few new columns to `ExternalUploadStub`:
* multipart - Whether or not this is a multipart upload
* external_upload_identifier - The "upload ID" for an S3 multipart upload
* filesize - The size of the file when the `create-multipart.json` or `generate-presigned-put.json` is called. This is used for validation.
When the user completes a direct S3 upload, either regular or multipart, we take the `filesize` that was captured when the `ExternalUploadStub` was first created and compare it with the final `Content-Length` size of the file where it is stored in S3. Then, if the two do not match, we throw an error, delete the file on S3, and ban the user from uploading files for N (default 5) minutes. This would only happen if the user uploads a different file than what they first specified, or in the case of multipart uploads uploaded larger chunks than needed. This is done to prevent abuse of S3 storage by bad actors.
Also included in this PR is an update to vendor/uppy.js. This has been built locally from the latest uppy source at https://github.com/transloadit/uppy/commit/d613b849a6591083f8a0968aa8d66537e231bbcd. This must be done so that I can get my multipart upload changes into Discourse. When the Uppy team cuts a proper release, we can bump the package.json versions instead.
2021-08-24 18:46:54 -04:00
|
|
|
post "/uploads/complete-external-upload.json",
|
|
|
|
params: {
|
|
|
|
unique_identifier: external_upload_stub.unique_identifier,
|
|
|
|
}
|
2021-07-27 18:42:25 -04:00
|
|
|
expect(response.status).to eq(422)
|
2023-08-22 21:18:33 -04:00
|
|
|
expect(response.parsed_body["errors"].first).to eq(
|
|
|
|
I18n.t("upload.size_mismatch_failure", additional_detail: "expected: 10, actual: 1000"),
|
|
|
|
)
|
2021-07-27 18:42:25 -04:00
|
|
|
end
|
|
|
|
|
|
|
|
it "handles CannotPromoteError" do
|
FEATURE: Direct S3 multipart uploads for backups (#14736)
This PR introduces a new `enable_experimental_backup_uploads` site setting (default false and hidden), which when enabled alongside `enable_direct_s3_uploads` will allow for direct S3 multipart uploads of backup .tar.gz files.
To make multipart external uploads work with both the S3BackupStore and the S3Store, I've had to move several methods out of S3Store and into S3Helper, including:
* presigned_url
* create_multipart
* abort_multipart
* complete_multipart
* presign_multipart_part
* list_multipart_parts
Then, S3Store and S3BackupStore either delegate directly to S3Helper or have their own special methods to call S3Helper for these methods. FileStore.temporary_upload_path has also removed its dependence on upload_path, and can now be used interchangeably between the stores. A similar change was made in the frontend as well, moving the multipart related JS code out of ComposerUppyUpload and into a mixin of its own, so it can also be used by UppyUploadMixin.
Some changes to ExternalUploadManager had to be made here as well. The backup direct uploads do not need an Upload record made for them in the database, so they can be moved to their final S3 resting place when completing the multipart upload.
This changeset is not perfect; it introduces some special cases in UploadController to handle backups that was previously in BackupController, because UploadController is where the multipart routes are located. A subsequent pull request will pull these routes into a module or some other sharing pattern, along with hooks, so the backup controller and the upload controller (and any future controllers that may need them) can include these routes in a nicer way.
2021-11-10 17:25:31 -05:00
|
|
|
ExternalUploadManager
|
|
|
|
.any_instance
|
|
|
|
.stubs(:transform!)
|
|
|
|
.raises(ExternalUploadManager::CannotPromoteError)
|
2021-07-27 18:42:25 -04:00
|
|
|
post "/uploads/complete-external-upload.json",
|
|
|
|
params: {
|
|
|
|
unique_identifier: external_upload_stub.unique_identifier,
|
|
|
|
}
|
|
|
|
expect(response.status).to eq(422)
|
2023-08-22 21:18:33 -04:00
|
|
|
expect(response.parsed_body["errors"].first).to eq(I18n.t("upload.cannot_promote_failure"))
|
2021-07-27 18:42:25 -04:00
|
|
|
end
|
|
|
|
|
|
|
|
it "handles DownloadFailedError and Aws::S3::Errors::NotFound" do
|
FEATURE: Direct S3 multipart uploads for backups (#14736)
This PR introduces a new `enable_experimental_backup_uploads` site setting (default false and hidden), which when enabled alongside `enable_direct_s3_uploads` will allow for direct S3 multipart uploads of backup .tar.gz files.
To make multipart external uploads work with both the S3BackupStore and the S3Store, I've had to move several methods out of S3Store and into S3Helper, including:
* presigned_url
* create_multipart
* abort_multipart
* complete_multipart
* presign_multipart_part
* list_multipart_parts
Then, S3Store and S3BackupStore either delegate directly to S3Helper or have their own special methods to call S3Helper for these methods. FileStore.temporary_upload_path has also removed its dependence on upload_path, and can now be used interchangeably between the stores. A similar change was made in the frontend as well, moving the multipart related JS code out of ComposerUppyUpload and into a mixin of its own, so it can also be used by UppyUploadMixin.
Some changes to ExternalUploadManager had to be made here as well. The backup direct uploads do not need an Upload record made for them in the database, so they can be moved to their final S3 resting place when completing the multipart upload.
This changeset is not perfect; it introduces some special cases in UploadController to handle backups that was previously in BackupController, because UploadController is where the multipart routes are located. A subsequent pull request will pull these routes into a module or some other sharing pattern, along with hooks, so the backup controller and the upload controller (and any future controllers that may need them) can include these routes in a nicer way.
2021-11-10 17:25:31 -05:00
|
|
|
ExternalUploadManager
|
|
|
|
.any_instance
|
|
|
|
.stubs(:transform!)
|
|
|
|
.raises(ExternalUploadManager::DownloadFailedError)
|
2021-07-27 18:42:25 -04:00
|
|
|
post "/uploads/complete-external-upload.json",
|
|
|
|
params: {
|
|
|
|
unique_identifier: external_upload_stub.unique_identifier,
|
|
|
|
}
|
|
|
|
expect(response.status).to eq(422)
|
2023-08-22 21:18:33 -04:00
|
|
|
expect(response.parsed_body["errors"].first).to eq(I18n.t("upload.download_failure"))
|
FEATURE: Direct S3 multipart uploads for backups (#14736)
This PR introduces a new `enable_experimental_backup_uploads` site setting (default false and hidden), which when enabled alongside `enable_direct_s3_uploads` will allow for direct S3 multipart uploads of backup .tar.gz files.
To make multipart external uploads work with both the S3BackupStore and the S3Store, I've had to move several methods out of S3Store and into S3Helper, including:
* presigned_url
* create_multipart
* abort_multipart
* complete_multipart
* presign_multipart_part
* list_multipart_parts
Then, S3Store and S3BackupStore either delegate directly to S3Helper or have their own special methods to call S3Helper for these methods. FileStore.temporary_upload_path has also removed its dependence on upload_path, and can now be used interchangeably between the stores. A similar change was made in the frontend as well, moving the multipart related JS code out of ComposerUppyUpload and into a mixin of its own, so it can also be used by UppyUploadMixin.
Some changes to ExternalUploadManager had to be made here as well. The backup direct uploads do not need an Upload record made for them in the database, so they can be moved to their final S3 resting place when completing the multipart upload.
This changeset is not perfect; it introduces some special cases in UploadController to handle backups that was previously in BackupController, because UploadController is where the multipart routes are located. A subsequent pull request will pull these routes into a module or some other sharing pattern, along with hooks, so the backup controller and the upload controller (and any future controllers that may need them) can include these routes in a nicer way.
2021-11-10 17:25:31 -05:00
|
|
|
ExternalUploadManager
|
|
|
|
.any_instance
|
|
|
|
.stubs(:transform!)
|
|
|
|
.raises(Aws::S3::Errors::NotFound.new("error", "not found"))
|
2021-07-27 18:42:25 -04:00
|
|
|
post "/uploads/complete-external-upload.json",
|
|
|
|
params: {
|
|
|
|
unique_identifier: external_upload_stub.unique_identifier,
|
|
|
|
}
|
|
|
|
expect(response.status).to eq(422)
|
2023-08-22 21:18:33 -04:00
|
|
|
expect(response.parsed_body["errors"].first).to eq(I18n.t("upload.download_failure"))
|
2021-07-27 18:42:25 -04:00
|
|
|
end
|
|
|
|
|
|
|
|
it "handles a generic upload failure" do
|
FEATURE: Direct S3 multipart uploads for backups (#14736)
This PR introduces a new `enable_experimental_backup_uploads` site setting (default false and hidden), which when enabled alongside `enable_direct_s3_uploads` will allow for direct S3 multipart uploads of backup .tar.gz files.
To make multipart external uploads work with both the S3BackupStore and the S3Store, I've had to move several methods out of S3Store and into S3Helper, including:
* presigned_url
* create_multipart
* abort_multipart
* complete_multipart
* presign_multipart_part
* list_multipart_parts
Then, S3Store and S3BackupStore either delegate directly to S3Helper or have their own special methods to call S3Helper for these methods. FileStore.temporary_upload_path has also removed its dependence on upload_path, and can now be used interchangeably between the stores. A similar change was made in the frontend as well, moving the multipart related JS code out of ComposerUppyUpload and into a mixin of its own, so it can also be used by UppyUploadMixin.
Some changes to ExternalUploadManager had to be made here as well. The backup direct uploads do not need an Upload record made for them in the database, so they can be moved to their final S3 resting place when completing the multipart upload.
This changeset is not perfect; it introduces some special cases in UploadController to handle backups that was previously in BackupController, because UploadController is where the multipart routes are located. A subsequent pull request will pull these routes into a module or some other sharing pattern, along with hooks, so the backup controller and the upload controller (and any future controllers that may need them) can include these routes in a nicer way.
2021-11-10 17:25:31 -05:00
|
|
|
ExternalUploadManager.any_instance.stubs(:transform!).raises(StandardError)
|
2021-07-27 18:42:25 -04:00
|
|
|
post "/uploads/complete-external-upload.json",
|
|
|
|
params: {
|
|
|
|
unique_identifier: external_upload_stub.unique_identifier,
|
|
|
|
}
|
|
|
|
expect(response.status).to eq(422)
|
|
|
|
expect(response.parsed_body["errors"].first).to eq(I18n.t("upload.failed"))
|
|
|
|
end
|
|
|
|
|
|
|
|
it "handles validation errors on the upload" do
|
|
|
|
upload.errors.add(:base, "test error")
|
FEATURE: Direct S3 multipart uploads for backups (#14736)
This PR introduces a new `enable_experimental_backup_uploads` site setting (default false and hidden), which when enabled alongside `enable_direct_s3_uploads` will allow for direct S3 multipart uploads of backup .tar.gz files.
To make multipart external uploads work with both the S3BackupStore and the S3Store, I've had to move several methods out of S3Store and into S3Helper, including:
* presigned_url
* create_multipart
* abort_multipart
* complete_multipart
* presign_multipart_part
* list_multipart_parts
Then, S3Store and S3BackupStore either delegate directly to S3Helper or have their own special methods to call S3Helper for these methods. FileStore.temporary_upload_path has also removed its dependence on upload_path, and can now be used interchangeably between the stores. A similar change was made in the frontend as well, moving the multipart related JS code out of ComposerUppyUpload and into a mixin of its own, so it can also be used by UppyUploadMixin.
Some changes to ExternalUploadManager had to be made here as well. The backup direct uploads do not need an Upload record made for them in the database, so they can be moved to their final S3 resting place when completing the multipart upload.
This changeset is not perfect; it introduces some special cases in UploadController to handle backups that was previously in BackupController, because UploadController is where the multipart routes are located. A subsequent pull request will pull these routes into a module or some other sharing pattern, along with hooks, so the backup controller and the upload controller (and any future controllers that may need them) can include these routes in a nicer way.
2021-11-10 17:25:31 -05:00
|
|
|
ExternalUploadManager.any_instance.stubs(:transform!).returns(upload)
|
2021-07-27 18:42:25 -04:00
|
|
|
post "/uploads/complete-external-upload.json",
|
|
|
|
params: {
|
|
|
|
unique_identifier: external_upload_stub.unique_identifier,
|
|
|
|
}
|
|
|
|
expect(response.status).to eq(422)
|
|
|
|
expect(response.parsed_body["errors"]).to eq(["test error"])
|
|
|
|
end
|
|
|
|
|
|
|
|
it "deletes the stub and returns the serialized upload when complete" do
|
FEATURE: Direct S3 multipart uploads for backups (#14736)
This PR introduces a new `enable_experimental_backup_uploads` site setting (default false and hidden), which when enabled alongside `enable_direct_s3_uploads` will allow for direct S3 multipart uploads of backup .tar.gz files.
To make multipart external uploads work with both the S3BackupStore and the S3Store, I've had to move several methods out of S3Store and into S3Helper, including:
* presigned_url
* create_multipart
* abort_multipart
* complete_multipart
* presign_multipart_part
* list_multipart_parts
Then, S3Store and S3BackupStore either delegate directly to S3Helper or have their own special methods to call S3Helper for these methods. FileStore.temporary_upload_path has also removed its dependence on upload_path, and can now be used interchangeably between the stores. A similar change was made in the frontend as well, moving the multipart related JS code out of ComposerUppyUpload and into a mixin of its own, so it can also be used by UppyUploadMixin.
Some changes to ExternalUploadManager had to be made here as well. The backup direct uploads do not need an Upload record made for them in the database, so they can be moved to their final S3 resting place when completing the multipart upload.
This changeset is not perfect; it introduces some special cases in UploadController to handle backups that was previously in BackupController, because UploadController is where the multipart routes are located. A subsequent pull request will pull these routes into a module or some other sharing pattern, along with hooks, so the backup controller and the upload controller (and any future controllers that may need them) can include these routes in a nicer way.
2021-11-10 17:25:31 -05:00
|
|
|
ExternalUploadManager.any_instance.stubs(:transform!).returns(upload)
|
2021-07-27 18:42:25 -04:00
|
|
|
post "/uploads/complete-external-upload.json",
|
|
|
|
params: {
|
|
|
|
unique_identifier: external_upload_stub.unique_identifier,
|
|
|
|
}
|
|
|
|
expect(ExternalUploadStub.exists?(id: external_upload_stub.id)).to eq(false)
|
|
|
|
expect(response.status).to eq(200)
|
|
|
|
expect(response.parsed_body).to eq(UploadsController.serialize_upload(upload))
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
|
|
|
context "when the store is not external" do
|
|
|
|
it "returns 404" do
|
|
|
|
post "/uploads/generate-presigned-put.json",
|
|
|
|
params: {
|
|
|
|
file_name: "test.png",
|
|
|
|
type: "card_background",
|
|
|
|
}
|
|
|
|
expect(response.status).to eq(404)
|
|
|
|
end
|
|
|
|
end
|
|
|
|
end
|
2013-04-02 19:17:17 -04:00
|
|
|
end
|