2019-05-02 18:17:27 -04:00
|
|
|
# frozen_string_literal: true
|
|
|
|
|
2016-06-30 10:55:01 -04:00
|
|
|
require "uri"
|
2017-06-13 07:27:05 -04:00
|
|
|
require "mini_mime"
|
2022-03-21 10:28:52 -04:00
|
|
|
require "file_store/base_store"
|
|
|
|
require "s3_helper"
|
|
|
|
require "file_helper"
|
2013-07-31 17:26:34 -04:00
|
|
|
|
2013-11-05 13:04:47 -05:00
|
|
|
module FileStore
|
|
|
|
class S3Store < BaseStore
|
2015-05-25 11:59:00 -04:00
|
|
|
TOMBSTONE_PREFIX ||= "tombstone/"
|
|
|
|
|
FEATURE: Direct S3 multipart uploads for backups (#14736)
This PR introduces a new `enable_experimental_backup_uploads` site setting (default false and hidden), which when enabled alongside `enable_direct_s3_uploads` will allow for direct S3 multipart uploads of backup .tar.gz files.
To make multipart external uploads work with both the S3BackupStore and the S3Store, I've had to move several methods out of S3Store and into S3Helper, including:
* presigned_url
* create_multipart
* abort_multipart
* complete_multipart
* presign_multipart_part
* list_multipart_parts
Then, S3Store and S3BackupStore either delegate directly to S3Helper or have their own special methods to call S3Helper for these methods. FileStore.temporary_upload_path has also removed its dependence on upload_path, and can now be used interchangeably between the stores. A similar change was made in the frontend as well, moving the multipart related JS code out of ComposerUppyUpload and into a mixin of its own, so it can also be used by UppyUploadMixin.
Some changes to ExternalUploadManager had to be made here as well. The backup direct uploads do not need an Upload record made for them in the database, so they can be moved to their final S3 resting place when completing the multipart upload.
This changeset is not perfect; it introduces some special cases in UploadController to handle backups that was previously in BackupController, because UploadController is where the multipart routes are located. A subsequent pull request will pull these routes into a module or some other sharing pattern, along with hooks, so the backup controller and the upload controller (and any future controllers that may need them) can include these routes in a nicer way.
2021-11-10 17:25:31 -05:00
|
|
|
delegate :abort_multipart,
|
|
|
|
:presign_multipart_part,
|
|
|
|
:list_multipart_parts,
|
|
|
|
:complete_multipart,
|
|
|
|
to: :s3_helper
|
|
|
|
|
2016-08-19 02:08:04 -04:00
|
|
|
def initialize(s3_helper = nil)
|
2020-12-30 13:13:13 -05:00
|
|
|
@s3_helper = s3_helper
|
|
|
|
end
|
|
|
|
|
|
|
|
def s3_helper
|
|
|
|
@s3_helper ||=
|
|
|
|
S3Helper.new(
|
|
|
|
s3_bucket,
|
2018-12-19 00:32:32 -05:00
|
|
|
Rails.configuration.multisite ? multisite_tombstone_prefix : TOMBSTONE_PREFIX,
|
|
|
|
)
|
2014-09-24 16:52:09 -04:00
|
|
|
end
|
2013-07-31 17:26:34 -04:00
|
|
|
|
2015-05-29 12:39:47 -04:00
|
|
|
def store_upload(file, upload, content_type = nil)
|
2021-05-27 11:42:25 -04:00
|
|
|
upload.url = nil
|
2016-08-15 04:06:29 -04:00
|
|
|
path = get_path_for_upload(upload)
|
2020-01-15 22:50:27 -05:00
|
|
|
url, upload.etag =
|
|
|
|
store_file(
|
|
|
|
file,
|
|
|
|
path,
|
|
|
|
filename: upload.original_filename,
|
|
|
|
content_type: content_type,
|
|
|
|
cache_locally: true,
|
|
|
|
private_acl: upload.secure?,
|
|
|
|
)
|
2019-01-04 01:16:22 -05:00
|
|
|
url
|
2013-11-05 13:04:47 -05:00
|
|
|
end
|
2013-07-31 17:26:34 -04:00
|
|
|
|
FEATURE: Direct S3 multipart uploads for backups (#14736)
This PR introduces a new `enable_experimental_backup_uploads` site setting (default false and hidden), which when enabled alongside `enable_direct_s3_uploads` will allow for direct S3 multipart uploads of backup .tar.gz files.
To make multipart external uploads work with both the S3BackupStore and the S3Store, I've had to move several methods out of S3Store and into S3Helper, including:
* presigned_url
* create_multipart
* abort_multipart
* complete_multipart
* presign_multipart_part
* list_multipart_parts
Then, S3Store and S3BackupStore either delegate directly to S3Helper or have their own special methods to call S3Helper for these methods. FileStore.temporary_upload_path has also removed its dependence on upload_path, and can now be used interchangeably between the stores. A similar change was made in the frontend as well, moving the multipart related JS code out of ComposerUppyUpload and into a mixin of its own, so it can also be used by UppyUploadMixin.
Some changes to ExternalUploadManager had to be made here as well. The backup direct uploads do not need an Upload record made for them in the database, so they can be moved to their final S3 resting place when completing the multipart upload.
This changeset is not perfect; it introduces some special cases in UploadController to handle backups that was previously in BackupController, because UploadController is where the multipart routes are located. A subsequent pull request will pull these routes into a module or some other sharing pattern, along with hooks, so the backup controller and the upload controller (and any future controllers that may need them) can include these routes in a nicer way.
2021-11-10 17:25:31 -05:00
|
|
|
def move_existing_stored_upload(existing_external_upload_key:, upload: nil, content_type: nil)
|
2021-07-27 18:42:25 -04:00
|
|
|
upload.url = nil
|
|
|
|
path = get_path_for_upload(upload)
|
|
|
|
url, upload.etag =
|
|
|
|
store_file(
|
|
|
|
nil,
|
|
|
|
path,
|
|
|
|
filename: upload.original_filename,
|
|
|
|
content_type: content_type,
|
|
|
|
cache_locally: false,
|
|
|
|
private_acl: upload.secure?,
|
|
|
|
move_existing: true,
|
|
|
|
existing_external_upload_key: existing_external_upload_key,
|
|
|
|
)
|
|
|
|
url
|
|
|
|
end
|
|
|
|
|
2019-11-17 20:25:42 -05:00
|
|
|
def store_optimized_image(file, optimized_image, content_type = nil, secure: false)
|
2021-05-27 11:42:25 -04:00
|
|
|
optimized_image.url = nil
|
2016-08-15 04:06:29 -04:00
|
|
|
path = get_path_for_optimized_image(optimized_image)
|
2019-11-17 20:25:42 -05:00
|
|
|
url, optimized_image.etag =
|
|
|
|
store_file(file, path, content_type: content_type, private_acl: secure)
|
2019-01-04 01:16:22 -05:00
|
|
|
url
|
2016-08-12 05:18:19 -04:00
|
|
|
end
|
|
|
|
|
2021-07-27 18:42:25 -04:00
|
|
|
# File is an actual Tempfile on disk
|
|
|
|
#
|
|
|
|
# An existing_external_upload_key is given for cases where move_existing is specified.
|
|
|
|
# This is an object already uploaded directly to S3 that we are now moving
|
|
|
|
# to its final resting place with the correct sha and key.
|
|
|
|
#
|
2015-05-29 12:39:47 -04:00
|
|
|
# options
|
|
|
|
# - filename
|
|
|
|
# - content_type
|
|
|
|
# - cache_locally
|
2021-07-27 18:42:25 -04:00
|
|
|
# - move_existing
|
|
|
|
# - existing_external_upload_key
|
2015-05-29 12:39:47 -04:00
|
|
|
def store_file(file, path, opts = {})
|
2019-05-02 18:17:27 -04:00
|
|
|
path = path.dup
|
|
|
|
|
2016-10-17 13:16:29 -04:00
|
|
|
filename = opts[:filename].presence || File.basename(path)
|
2015-05-29 12:39:47 -04:00
|
|
|
# cache file locally when needed
|
|
|
|
cache_file(file, File.basename(path)) if opts[:cache_locally]
|
2016-10-17 13:16:29 -04:00
|
|
|
options = {
|
2019-11-17 20:25:42 -05:00
|
|
|
acl: opts[:private_acl] ? "private" : "public-read",
|
2019-08-06 13:55:17 -04:00
|
|
|
cache_control: "max-age=31556952, public, immutable",
|
2017-06-13 07:27:05 -04:00
|
|
|
content_type:
|
|
|
|
opts[:content_type].presence || MiniMime.lookup_by_filename(filename)&.content_type,
|
2016-10-17 13:16:29 -04:00
|
|
|
}
|
2020-06-16 21:16:37 -04:00
|
|
|
|
2020-07-08 23:31:48 -04:00
|
|
|
# add a "content disposition: attachment" header with the original
|
|
|
|
# filename for everything but safe images (not SVG). audio and video will
|
|
|
|
# still stream correctly in HTML players, and when a direct link is
|
|
|
|
# provided to any file but an image it will download correctly in the
|
|
|
|
# browser.
|
|
|
|
if !FileHelper.is_inline_image?(filename)
|
2020-06-23 03:10:56 -04:00
|
|
|
options[:content_disposition] = ActionDispatch::Http::ContentDisposition.format(
|
|
|
|
disposition: "attachment",
|
|
|
|
filename: filename,
|
|
|
|
)
|
2020-06-16 21:16:37 -04:00
|
|
|
end
|
2018-11-28 23:11:48 -05:00
|
|
|
|
2018-12-02 23:04:14 -05:00
|
|
|
path.prepend(File.join(upload_path, "/")) if Rails.configuration.multisite
|
2018-11-28 23:11:48 -05:00
|
|
|
|
2019-01-04 01:16:22 -05:00
|
|
|
# if this fails, it will throw an exception
|
2021-07-27 18:42:25 -04:00
|
|
|
if opts[:move_existing] && opts[:existing_external_upload_key]
|
FEATURE: Uppy direct S3 multipart uploads in composer (#14051)
This pull request introduces the endpoints required, and the JavaScript functionality in the `ComposerUppyUpload` mixin, for direct S3 multipart uploads. There are four new endpoints in the uploads controller:
* `create-multipart.json` - Creates the multipart upload in S3 along with an `ExternalUploadStub` record, storing information about the file in the same way as `generate-presigned-put.json` does for regular direct S3 uploads
* `batch-presign-multipart-parts.json` - Takes a list of part numbers and the unique identifier for an `ExternalUploadStub` record, and generates the presigned URLs for those parts if the multipart upload still exists and if the user has permission to access that upload
* `complete-multipart.json` - Completes the multipart upload in S3. Needs the full list of part numbers and their associated ETags which are returned when the part is uploaded to the presigned URL above. Only works if the user has permission to access the associated `ExternalUploadStub` record and the multipart upload still exists.
After we confirm the upload is complete in S3, we go through the regular `UploadCreator` flow, the same as `complete-external-upload.json`, and promote the temporary upload S3 into a full `Upload` record, moving it to its final destination.
* `abort-multipart.json` - Aborts the multipart upload on S3 and destroys the `ExternalUploadStub` record if the user has permission to access that upload.
Also added are a few new columns to `ExternalUploadStub`:
* multipart - Whether or not this is a multipart upload
* external_upload_identifier - The "upload ID" for an S3 multipart upload
* filesize - The size of the file when the `create-multipart.json` or `generate-presigned-put.json` is called. This is used for validation.
When the user completes a direct S3 upload, either regular or multipart, we take the `filesize` that was captured when the `ExternalUploadStub` was first created and compare it with the final `Content-Length` size of the file where it is stored in S3. Then, if the two do not match, we throw an error, delete the file on S3, and ban the user from uploading files for N (default 5) minutes. This would only happen if the user uploads a different file than what they first specified, or in the case of multipart uploads uploaded larger chunks than needed. This is done to prevent abuse of S3 storage by bad actors.
Also included in this PR is an update to vendor/uppy.js. This has been built locally from the latest uppy source at https://github.com/transloadit/uppy/commit/d613b849a6591083f8a0968aa8d66537e231bbcd. This must be done so that I can get my multipart upload changes into Discourse. When the Uppy team cuts a proper release, we can bump the package.json versions instead.
2021-08-24 18:46:54 -04:00
|
|
|
original_path = opts[:existing_external_upload_key]
|
2021-09-09 22:59:51 -04:00
|
|
|
options[:apply_metadata_to_destination] = true
|
2021-07-27 18:42:25 -04:00
|
|
|
path, etag = s3_helper.copy(original_path, path, options: options)
|
FEATURE: Uppy direct S3 multipart uploads in composer (#14051)
This pull request introduces the endpoints required, and the JavaScript functionality in the `ComposerUppyUpload` mixin, for direct S3 multipart uploads. There are four new endpoints in the uploads controller:
* `create-multipart.json` - Creates the multipart upload in S3 along with an `ExternalUploadStub` record, storing information about the file in the same way as `generate-presigned-put.json` does for regular direct S3 uploads
* `batch-presign-multipart-parts.json` - Takes a list of part numbers and the unique identifier for an `ExternalUploadStub` record, and generates the presigned URLs for those parts if the multipart upload still exists and if the user has permission to access that upload
* `complete-multipart.json` - Completes the multipart upload in S3. Needs the full list of part numbers and their associated ETags which are returned when the part is uploaded to the presigned URL above. Only works if the user has permission to access the associated `ExternalUploadStub` record and the multipart upload still exists.
After we confirm the upload is complete in S3, we go through the regular `UploadCreator` flow, the same as `complete-external-upload.json`, and promote the temporary upload S3 into a full `Upload` record, moving it to its final destination.
* `abort-multipart.json` - Aborts the multipart upload on S3 and destroys the `ExternalUploadStub` record if the user has permission to access that upload.
Also added are a few new columns to `ExternalUploadStub`:
* multipart - Whether or not this is a multipart upload
* external_upload_identifier - The "upload ID" for an S3 multipart upload
* filesize - The size of the file when the `create-multipart.json` or `generate-presigned-put.json` is called. This is used for validation.
When the user completes a direct S3 upload, either regular or multipart, we take the `filesize` that was captured when the `ExternalUploadStub` was first created and compare it with the final `Content-Length` size of the file where it is stored in S3. Then, if the two do not match, we throw an error, delete the file on S3, and ban the user from uploading files for N (default 5) minutes. This would only happen if the user uploads a different file than what they first specified, or in the case of multipart uploads uploaded larger chunks than needed. This is done to prevent abuse of S3 storage by bad actors.
Also included in this PR is an update to vendor/uppy.js. This has been built locally from the latest uppy source at https://github.com/transloadit/uppy/commit/d613b849a6591083f8a0968aa8d66537e231bbcd. This must be done so that I can get my multipart upload changes into Discourse. When the Uppy team cuts a proper release, we can bump the package.json versions instead.
2021-08-24 18:46:54 -04:00
|
|
|
delete_file(original_path)
|
2021-07-27 18:42:25 -04:00
|
|
|
else
|
|
|
|
path, etag = s3_helper.upload(file, path, options)
|
|
|
|
end
|
2019-01-04 01:16:22 -05:00
|
|
|
|
|
|
|
# return the upload url and etag
|
2019-11-14 15:10:51 -05:00
|
|
|
[File.join(absolute_base_url, path), etag]
|
2013-11-05 13:04:47 -05:00
|
|
|
end
|
2013-07-31 17:26:34 -04:00
|
|
|
|
FEATURE: Uppy direct S3 multipart uploads in composer (#14051)
This pull request introduces the endpoints required, and the JavaScript functionality in the `ComposerUppyUpload` mixin, for direct S3 multipart uploads. There are four new endpoints in the uploads controller:
* `create-multipart.json` - Creates the multipart upload in S3 along with an `ExternalUploadStub` record, storing information about the file in the same way as `generate-presigned-put.json` does for regular direct S3 uploads
* `batch-presign-multipart-parts.json` - Takes a list of part numbers and the unique identifier for an `ExternalUploadStub` record, and generates the presigned URLs for those parts if the multipart upload still exists and if the user has permission to access that upload
* `complete-multipart.json` - Completes the multipart upload in S3. Needs the full list of part numbers and their associated ETags which are returned when the part is uploaded to the presigned URL above. Only works if the user has permission to access the associated `ExternalUploadStub` record and the multipart upload still exists.
After we confirm the upload is complete in S3, we go through the regular `UploadCreator` flow, the same as `complete-external-upload.json`, and promote the temporary upload S3 into a full `Upload` record, moving it to its final destination.
* `abort-multipart.json` - Aborts the multipart upload on S3 and destroys the `ExternalUploadStub` record if the user has permission to access that upload.
Also added are a few new columns to `ExternalUploadStub`:
* multipart - Whether or not this is a multipart upload
* external_upload_identifier - The "upload ID" for an S3 multipart upload
* filesize - The size of the file when the `create-multipart.json` or `generate-presigned-put.json` is called. This is used for validation.
When the user completes a direct S3 upload, either regular or multipart, we take the `filesize` that was captured when the `ExternalUploadStub` was first created and compare it with the final `Content-Length` size of the file where it is stored in S3. Then, if the two do not match, we throw an error, delete the file on S3, and ban the user from uploading files for N (default 5) minutes. This would only happen if the user uploads a different file than what they first specified, or in the case of multipart uploads uploaded larger chunks than needed. This is done to prevent abuse of S3 storage by bad actors.
Also included in this PR is an update to vendor/uppy.js. This has been built locally from the latest uppy source at https://github.com/transloadit/uppy/commit/d613b849a6591083f8a0968aa8d66537e231bbcd. This must be done so that I can get my multipart upload changes into Discourse. When the Uppy team cuts a proper release, we can bump the package.json versions instead.
2021-08-24 18:46:54 -04:00
|
|
|
def delete_file(path)
|
|
|
|
# delete the object outright without moving to tombstone,
|
|
|
|
# not recommended for most use cases
|
|
|
|
s3_helper.delete_object(path)
|
|
|
|
end
|
|
|
|
|
2016-08-14 23:21:24 -04:00
|
|
|
def remove_file(url, path)
|
2015-05-29 12:39:47 -04:00
|
|
|
return unless has_been_uploaded?(url)
|
|
|
|
# copy the removed file to tombstone
|
2020-12-30 13:13:13 -05:00
|
|
|
s3_helper.remove(path, true)
|
2013-11-05 13:04:47 -05:00
|
|
|
end
|
2013-08-13 16:08:29 -04:00
|
|
|
|
2018-08-07 23:26:05 -04:00
|
|
|
def copy_file(url, source, destination)
|
|
|
|
return unless has_been_uploaded?(url)
|
2020-12-30 13:13:13 -05:00
|
|
|
s3_helper.copy(source, destination)
|
2018-08-07 23:26:05 -04:00
|
|
|
end
|
|
|
|
|
2013-11-05 13:04:47 -05:00
|
|
|
def has_been_uploaded?(url)
|
2015-05-26 05:47:33 -04:00
|
|
|
return false if url.blank?
|
2016-06-30 10:55:01 -04:00
|
|
|
|
2020-05-23 00:56:13 -04:00
|
|
|
begin
|
2020-06-17 03:47:05 -04:00
|
|
|
parsed_url = URI.parse(UrlHelper.encode(url))
|
2020-06-25 01:00:15 -04:00
|
|
|
rescue StandardError
|
2021-05-20 21:43:47 -04:00
|
|
|
# There are many exceptions possible here including Addressable::URI:: exceptions
|
2020-06-25 01:00:15 -04:00
|
|
|
# and URI:: exceptions, catch all may seem wide, but it makes no sense to raise ever
|
|
|
|
# on an invalid url here
|
2020-05-23 00:56:13 -04:00
|
|
|
return false
|
|
|
|
end
|
|
|
|
|
2016-06-30 10:55:01 -04:00
|
|
|
base_hostname = URI.parse(absolute_base_url).hostname
|
2020-05-23 00:56:13 -04:00
|
|
|
if url[base_hostname]
|
|
|
|
# if the hostnames match it means the upload is in the same
|
|
|
|
# bucket on s3. however, the bucket folder path may differ in
|
|
|
|
# some cases, and we do not want to assume the url is uploaded
|
|
|
|
# here. e.g. the path of the current site could be /prod and the
|
|
|
|
# other site could be /staging
|
|
|
|
if s3_bucket_folder_path.present?
|
|
|
|
return parsed_url.path.starts_with?("/#{s3_bucket_folder_path}")
|
|
|
|
else
|
|
|
|
return true
|
|
|
|
end
|
|
|
|
return false
|
|
|
|
end
|
2016-06-30 10:55:01 -04:00
|
|
|
|
2017-10-06 01:20:01 -04:00
|
|
|
return false if SiteSetting.Upload.s3_cdn_url.blank?
|
2021-10-01 02:55:17 -04:00
|
|
|
|
|
|
|
s3_cdn_url = URI.parse(SiteSetting.Upload.s3_cdn_url || "")
|
|
|
|
cdn_hostname = s3_cdn_url.hostname
|
|
|
|
|
|
|
|
if cdn_hostname.presence && url[cdn_hostname] &&
|
|
|
|
(s3_cdn_url.path.blank? || parsed_url.path.starts_with?(s3_cdn_url.path))
|
|
|
|
return true
|
2023-01-09 07:10:19 -05:00
|
|
|
end
|
2020-05-23 00:56:13 -04:00
|
|
|
false
|
|
|
|
end
|
|
|
|
|
|
|
|
def s3_bucket_folder_path
|
2020-12-30 13:13:13 -05:00
|
|
|
S3Helper.get_bucket_and_folder_path(s3_bucket)[1]
|
2013-11-05 13:04:47 -05:00
|
|
|
end
|
2013-08-13 16:08:29 -04:00
|
|
|
|
2017-10-06 01:20:01 -04:00
|
|
|
def s3_bucket_name
|
2020-12-30 13:13:13 -05:00
|
|
|
S3Helper.get_bucket_and_folder_path(s3_bucket)[0]
|
2017-10-06 01:20:01 -04:00
|
|
|
end
|
|
|
|
|
2013-11-05 13:04:47 -05:00
|
|
|
def absolute_base_url
|
2017-10-06 01:20:01 -04:00
|
|
|
@absolute_base_url ||= SiteSetting.Upload.absolute_base_url
|
2013-11-05 13:04:47 -05:00
|
|
|
end
|
2013-08-13 16:08:29 -04:00
|
|
|
|
2019-11-17 20:25:42 -05:00
|
|
|
def s3_upload_host
|
|
|
|
if SiteSetting.Upload.s3_cdn_url.present?
|
|
|
|
SiteSetting.Upload.s3_cdn_url
|
2023-01-09 07:10:19 -05:00
|
|
|
else
|
2019-11-17 20:25:42 -05:00
|
|
|
"https:#{absolute_base_url}"
|
2023-01-09 07:10:19 -05:00
|
|
|
end
|
2019-11-17 20:25:42 -05:00
|
|
|
end
|
|
|
|
|
2013-11-05 13:04:47 -05:00
|
|
|
def external?
|
|
|
|
true
|
|
|
|
end
|
2013-08-13 16:08:29 -04:00
|
|
|
|
2013-11-27 16:01:41 -05:00
|
|
|
def purge_tombstone(grace_period)
|
2020-12-30 13:13:13 -05:00
|
|
|
s3_helper.update_tombstone_lifecycle(grace_period)
|
2013-11-27 16:01:41 -05:00
|
|
|
end
|
|
|
|
|
2018-12-19 00:32:32 -05:00
|
|
|
def multisite_tombstone_prefix
|
|
|
|
File.join("uploads", "tombstone", RailsMultisite::ConnectionManagement.current_db, "/")
|
|
|
|
end
|
|
|
|
|
2019-07-04 11:32:51 -04:00
|
|
|
def download_url(upload)
|
|
|
|
return unless upload
|
|
|
|
"#{upload.short_path}?dl=1"
|
|
|
|
end
|
|
|
|
|
2015-05-25 23:08:31 -04:00
|
|
|
def path_for(upload)
|
2019-05-28 21:00:25 -04:00
|
|
|
url = upload&.url
|
2023-01-20 13:52:49 -05:00
|
|
|
FileStore::LocalStore.new.path_for(upload) if url && url[%r{\A/[^/]}]
|
2015-05-25 23:08:31 -04:00
|
|
|
end
|
|
|
|
|
2019-07-04 11:32:51 -04:00
|
|
|
def url_for(upload, force_download: false)
|
2019-11-17 20:25:42 -05:00
|
|
|
if upload.secure? || force_download
|
2021-07-27 18:42:25 -04:00
|
|
|
presigned_get_url(
|
|
|
|
get_upload_key(upload),
|
|
|
|
force_download: force_download,
|
|
|
|
filename: upload.original_filename,
|
|
|
|
)
|
2023-01-09 07:10:19 -05:00
|
|
|
else
|
2019-11-17 20:25:42 -05:00
|
|
|
upload.url
|
2023-01-09 07:10:19 -05:00
|
|
|
end
|
2019-06-05 23:27:24 -04:00
|
|
|
end
|
|
|
|
|
2015-05-26 22:02:57 -04:00
|
|
|
def cdn_url(url)
|
2017-10-06 01:20:01 -04:00
|
|
|
return url if SiteSetting.Upload.s3_cdn_url.blank?
|
2023-01-20 13:52:49 -05:00
|
|
|
schema = url[%r{\A(https?:)?//}, 1]
|
2020-12-30 13:13:13 -05:00
|
|
|
folder = s3_bucket_folder_path.nil? ? "" : "#{s3_bucket_folder_path}/"
|
2018-11-28 23:11:48 -05:00
|
|
|
url.sub(
|
|
|
|
File.join("#{schema}#{absolute_base_url}", folder),
|
|
|
|
File.join(SiteSetting.Upload.s3_cdn_url, "/"),
|
|
|
|
)
|
2015-05-26 22:02:57 -04:00
|
|
|
end
|
|
|
|
|
2022-05-25 19:53:01 -04:00
|
|
|
def signed_url_for_path(
|
|
|
|
path,
|
|
|
|
expires_in: SiteSetting.s3_presigned_get_url_expires_after_seconds,
|
|
|
|
force_download: false
|
|
|
|
)
|
2019-11-17 20:25:42 -05:00
|
|
|
key = path.sub(absolute_base_url + "/", "")
|
2021-07-27 18:42:25 -04:00
|
|
|
presigned_get_url(key, expires_in: expires_in, force_download: force_download)
|
|
|
|
end
|
|
|
|
|
|
|
|
def signed_url_for_temporary_upload(
|
|
|
|
file_name,
|
|
|
|
expires_in: S3Helper::UPLOAD_URL_EXPIRES_AFTER_SECONDS,
|
|
|
|
metadata: {}
|
|
|
|
)
|
|
|
|
key = temporary_upload_path(file_name)
|
FEATURE: Direct S3 multipart uploads for backups (#14736)
This PR introduces a new `enable_experimental_backup_uploads` site setting (default false and hidden), which when enabled alongside `enable_direct_s3_uploads` will allow for direct S3 multipart uploads of backup .tar.gz files.
To make multipart external uploads work with both the S3BackupStore and the S3Store, I've had to move several methods out of S3Store and into S3Helper, including:
* presigned_url
* create_multipart
* abort_multipart
* complete_multipart
* presign_multipart_part
* list_multipart_parts
Then, S3Store and S3BackupStore either delegate directly to S3Helper or have their own special methods to call S3Helper for these methods. FileStore.temporary_upload_path has also removed its dependence on upload_path, and can now be used interchangeably between the stores. A similar change was made in the frontend as well, moving the multipart related JS code out of ComposerUppyUpload and into a mixin of its own, so it can also be used by UppyUploadMixin.
Some changes to ExternalUploadManager had to be made here as well. The backup direct uploads do not need an Upload record made for them in the database, so they can be moved to their final S3 resting place when completing the multipart upload.
This changeset is not perfect; it introduces some special cases in UploadController to handle backups that was previously in BackupController, because UploadController is where the multipart routes are located. A subsequent pull request will pull these routes into a module or some other sharing pattern, along with hooks, so the backup controller and the upload controller (and any future controllers that may need them) can include these routes in a nicer way.
2021-11-10 17:25:31 -05:00
|
|
|
s3_helper.presigned_url(
|
FEATURE: Uppy direct S3 multipart uploads in composer (#14051)
This pull request introduces the endpoints required, and the JavaScript functionality in the `ComposerUppyUpload` mixin, for direct S3 multipart uploads. There are four new endpoints in the uploads controller:
* `create-multipart.json` - Creates the multipart upload in S3 along with an `ExternalUploadStub` record, storing information about the file in the same way as `generate-presigned-put.json` does for regular direct S3 uploads
* `batch-presign-multipart-parts.json` - Takes a list of part numbers and the unique identifier for an `ExternalUploadStub` record, and generates the presigned URLs for those parts if the multipart upload still exists and if the user has permission to access that upload
* `complete-multipart.json` - Completes the multipart upload in S3. Needs the full list of part numbers and their associated ETags which are returned when the part is uploaded to the presigned URL above. Only works if the user has permission to access the associated `ExternalUploadStub` record and the multipart upload still exists.
After we confirm the upload is complete in S3, we go through the regular `UploadCreator` flow, the same as `complete-external-upload.json`, and promote the temporary upload S3 into a full `Upload` record, moving it to its final destination.
* `abort-multipart.json` - Aborts the multipart upload on S3 and destroys the `ExternalUploadStub` record if the user has permission to access that upload.
Also added are a few new columns to `ExternalUploadStub`:
* multipart - Whether or not this is a multipart upload
* external_upload_identifier - The "upload ID" for an S3 multipart upload
* filesize - The size of the file when the `create-multipart.json` or `generate-presigned-put.json` is called. This is used for validation.
When the user completes a direct S3 upload, either regular or multipart, we take the `filesize` that was captured when the `ExternalUploadStub` was first created and compare it with the final `Content-Length` size of the file where it is stored in S3. Then, if the two do not match, we throw an error, delete the file on S3, and ban the user from uploading files for N (default 5) minutes. This would only happen if the user uploads a different file than what they first specified, or in the case of multipart uploads uploaded larger chunks than needed. This is done to prevent abuse of S3 storage by bad actors.
Also included in this PR is an update to vendor/uppy.js. This has been built locally from the latest uppy source at https://github.com/transloadit/uppy/commit/d613b849a6591083f8a0968aa8d66537e231bbcd. This must be done so that I can get my multipart upload changes into Discourse. When the Uppy team cuts a proper release, we can bump the package.json versions instead.
2021-08-24 18:46:54 -04:00
|
|
|
key,
|
|
|
|
method: :put_object,
|
|
|
|
expires_in: expires_in,
|
|
|
|
opts: {
|
|
|
|
metadata: metadata,
|
|
|
|
acl: "private",
|
|
|
|
},
|
|
|
|
)
|
2021-07-27 18:42:25 -04:00
|
|
|
end
|
|
|
|
|
|
|
|
def temporary_upload_path(file_name)
|
FEATURE: Direct S3 multipart uploads for backups (#14736)
This PR introduces a new `enable_experimental_backup_uploads` site setting (default false and hidden), which when enabled alongside `enable_direct_s3_uploads` will allow for direct S3 multipart uploads of backup .tar.gz files.
To make multipart external uploads work with both the S3BackupStore and the S3Store, I've had to move several methods out of S3Store and into S3Helper, including:
* presigned_url
* create_multipart
* abort_multipart
* complete_multipart
* presign_multipart_part
* list_multipart_parts
Then, S3Store and S3BackupStore either delegate directly to S3Helper or have their own special methods to call S3Helper for these methods. FileStore.temporary_upload_path has also removed its dependence on upload_path, and can now be used interchangeably between the stores. A similar change was made in the frontend as well, moving the multipart related JS code out of ComposerUppyUpload and into a mixin of its own, so it can also be used by UppyUploadMixin.
Some changes to ExternalUploadManager had to be made here as well. The backup direct uploads do not need an Upload record made for them in the database, so they can be moved to their final S3 resting place when completing the multipart upload.
This changeset is not perfect; it introduces some special cases in UploadController to handle backups that was previously in BackupController, because UploadController is where the multipart routes are located. A subsequent pull request will pull these routes into a module or some other sharing pattern, along with hooks, so the backup controller and the upload controller (and any future controllers that may need them) can include these routes in a nicer way.
2021-11-10 17:25:31 -05:00
|
|
|
folder_prefix =
|
|
|
|
s3_bucket_folder_path.nil? ? upload_path : File.join(s3_bucket_folder_path, upload_path)
|
|
|
|
FileStore::BaseStore.temporary_upload_path(file_name, folder_prefix: folder_prefix)
|
2021-07-27 18:42:25 -04:00
|
|
|
end
|
|
|
|
|
|
|
|
def object_from_path(path)
|
|
|
|
s3_helper.object(path)
|
2019-11-17 20:25:42 -05:00
|
|
|
end
|
|
|
|
|
2015-05-29 12:39:47 -04:00
|
|
|
def cache_avatar(avatar, user_id)
|
|
|
|
source = avatar.url.sub(absolute_base_url + "/", "")
|
|
|
|
destination = avatar_template(avatar, user_id).sub(absolute_base_url + "/", "")
|
2020-12-30 13:13:13 -05:00
|
|
|
s3_helper.copy(source, destination)
|
2015-05-29 12:39:47 -04:00
|
|
|
end
|
2013-11-27 16:01:41 -05:00
|
|
|
|
2015-05-29 12:39:47 -04:00
|
|
|
def avatar_template(avatar, user_id)
|
|
|
|
UserAvatar.external_avatar_url(user_id, avatar.upload_id, avatar.width)
|
|
|
|
end
|
2013-07-31 17:26:34 -04:00
|
|
|
|
2016-08-19 02:08:04 -04:00
|
|
|
def s3_bucket
|
2017-10-06 01:20:01 -04:00
|
|
|
if SiteSetting.Upload.s3_upload_bucket.blank?
|
|
|
|
raise Discourse::SiteSettingMissing.new("s3_upload_bucket")
|
2023-01-09 07:10:19 -05:00
|
|
|
end
|
2017-10-06 01:20:01 -04:00
|
|
|
SiteSetting.Upload.s3_upload_bucket.downcase
|
2015-05-29 12:39:47 -04:00
|
|
|
end
|
2018-11-26 14:24:51 -05:00
|
|
|
|
2019-02-14 14:04:35 -05:00
|
|
|
def list_missing_uploads(skip_optimized: false)
|
2019-01-31 23:40:48 -05:00
|
|
|
if SiteSetting.enable_s3_inventory
|
|
|
|
require "s3_inventory"
|
2019-02-19 11:24:35 -05:00
|
|
|
S3Inventory.new(s3_helper, :upload).backfill_etags_and_list_missing
|
|
|
|
S3Inventory.new(s3_helper, :optimized).backfill_etags_and_list_missing unless skip_optimized
|
2019-01-31 23:40:48 -05:00
|
|
|
else
|
2019-03-13 05:39:07 -04:00
|
|
|
list_missing(Upload.by_users, "original/")
|
2019-01-31 23:40:48 -05:00
|
|
|
list_missing(OptimizedImage, "optimized/") unless skip_optimized
|
|
|
|
end
|
2018-11-26 14:24:51 -05:00
|
|
|
end
|
|
|
|
|
2022-04-12 00:26:42 -04:00
|
|
|
def update_upload_ACL(upload, optimized_images_preloaded: false)
|
2019-06-05 23:27:24 -04:00
|
|
|
key = get_upload_key(upload)
|
2019-11-17 20:25:42 -05:00
|
|
|
update_ACL(key, upload.secure?)
|
2019-06-05 23:27:24 -04:00
|
|
|
|
2022-04-12 00:26:42 -04:00
|
|
|
# if we do find_each when the images have already been preloaded with
|
|
|
|
# includes(:optimized_images), then the optimized_images are fetched
|
|
|
|
# from the database again, negating the preloading if this operation
|
|
|
|
# is done on a large amount of uploads at once (see Jobs::SyncAclsForUploads)
|
|
|
|
if optimized_images_preloaded
|
|
|
|
upload.optimized_images.each do |optimized_image|
|
|
|
|
optimized_image_key = get_path_for_optimized_image(optimized_image)
|
|
|
|
update_ACL(optimized_image_key, upload.secure?)
|
|
|
|
end
|
|
|
|
else
|
|
|
|
upload.optimized_images.find_each do |optimized_image|
|
|
|
|
optimized_image_key = get_path_for_optimized_image(optimized_image)
|
|
|
|
update_ACL(optimized_image_key, upload.secure?)
|
|
|
|
end
|
2019-06-05 23:27:24 -04:00
|
|
|
end
|
2019-11-17 20:25:42 -05:00
|
|
|
|
|
|
|
true
|
2019-06-05 23:27:24 -04:00
|
|
|
end
|
|
|
|
|
2019-07-01 14:38:36 -04:00
|
|
|
def download_file(upload, destination_path)
|
2020-12-30 13:13:13 -05:00
|
|
|
s3_helper.download_file(get_upload_key(upload), destination_path)
|
2019-07-01 14:38:36 -04:00
|
|
|
end
|
|
|
|
|
2020-01-12 18:12:27 -05:00
|
|
|
def copy_from(source_path)
|
|
|
|
local_store = FileStore::LocalStore.new
|
|
|
|
public_upload_path = File.join(local_store.public_dir, local_store.upload_path)
|
|
|
|
|
|
|
|
# The migration to S3 and lots of other code expects files to exist in public/uploads,
|
|
|
|
# so lets move them there before executing the migration.
|
|
|
|
if public_upload_path != source_path
|
|
|
|
if Dir.exist?(public_upload_path)
|
|
|
|
old_upload_path = "#{public_upload_path}_#{SecureRandom.hex}"
|
|
|
|
FileUtils.mv(public_upload_path, old_upload_path)
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
|
|
|
FileUtils.mkdir_p(File.expand_path("..", public_upload_path))
|
|
|
|
FileUtils.symlink(source_path, public_upload_path)
|
|
|
|
|
|
|
|
FileStore::ToS3Migration.new(
|
2020-04-19 14:24:27 -04:00
|
|
|
s3_options: FileStore::ToS3Migration.s3_options_from_site_settings,
|
2020-01-12 18:12:27 -05:00
|
|
|
migrate_to_multisite: Rails.configuration.multisite,
|
|
|
|
).migrate
|
|
|
|
ensure
|
|
|
|
FileUtils.rm(public_upload_path) if File.symlink?(public_upload_path)
|
|
|
|
FileUtils.mv(old_upload_path, public_upload_path) if old_upload_path
|
|
|
|
end
|
|
|
|
|
2021-08-26 19:50:23 -04:00
|
|
|
def create_multipart(file_name, content_type, metadata: {})
|
FEATURE: Uppy direct S3 multipart uploads in composer (#14051)
This pull request introduces the endpoints required, and the JavaScript functionality in the `ComposerUppyUpload` mixin, for direct S3 multipart uploads. There are four new endpoints in the uploads controller:
* `create-multipart.json` - Creates the multipart upload in S3 along with an `ExternalUploadStub` record, storing information about the file in the same way as `generate-presigned-put.json` does for regular direct S3 uploads
* `batch-presign-multipart-parts.json` - Takes a list of part numbers and the unique identifier for an `ExternalUploadStub` record, and generates the presigned URLs for those parts if the multipart upload still exists and if the user has permission to access that upload
* `complete-multipart.json` - Completes the multipart upload in S3. Needs the full list of part numbers and their associated ETags which are returned when the part is uploaded to the presigned URL above. Only works if the user has permission to access the associated `ExternalUploadStub` record and the multipart upload still exists.
After we confirm the upload is complete in S3, we go through the regular `UploadCreator` flow, the same as `complete-external-upload.json`, and promote the temporary upload S3 into a full `Upload` record, moving it to its final destination.
* `abort-multipart.json` - Aborts the multipart upload on S3 and destroys the `ExternalUploadStub` record if the user has permission to access that upload.
Also added are a few new columns to `ExternalUploadStub`:
* multipart - Whether or not this is a multipart upload
* external_upload_identifier - The "upload ID" for an S3 multipart upload
* filesize - The size of the file when the `create-multipart.json` or `generate-presigned-put.json` is called. This is used for validation.
When the user completes a direct S3 upload, either regular or multipart, we take the `filesize` that was captured when the `ExternalUploadStub` was first created and compare it with the final `Content-Length` size of the file where it is stored in S3. Then, if the two do not match, we throw an error, delete the file on S3, and ban the user from uploading files for N (default 5) minutes. This would only happen if the user uploads a different file than what they first specified, or in the case of multipart uploads uploaded larger chunks than needed. This is done to prevent abuse of S3 storage by bad actors.
Also included in this PR is an update to vendor/uppy.js. This has been built locally from the latest uppy source at https://github.com/transloadit/uppy/commit/d613b849a6591083f8a0968aa8d66537e231bbcd. This must be done so that I can get my multipart upload changes into Discourse. When the Uppy team cuts a proper release, we can bump the package.json versions instead.
2021-08-24 18:46:54 -04:00
|
|
|
key = temporary_upload_path(file_name)
|
FEATURE: Direct S3 multipart uploads for backups (#14736)
This PR introduces a new `enable_experimental_backup_uploads` site setting (default false and hidden), which when enabled alongside `enable_direct_s3_uploads` will allow for direct S3 multipart uploads of backup .tar.gz files.
To make multipart external uploads work with both the S3BackupStore and the S3Store, I've had to move several methods out of S3Store and into S3Helper, including:
* presigned_url
* create_multipart
* abort_multipart
* complete_multipart
* presign_multipart_part
* list_multipart_parts
Then, S3Store and S3BackupStore either delegate directly to S3Helper or have their own special methods to call S3Helper for these methods. FileStore.temporary_upload_path has also removed its dependence on upload_path, and can now be used interchangeably between the stores. A similar change was made in the frontend as well, moving the multipart related JS code out of ComposerUppyUpload and into a mixin of its own, so it can also be used by UppyUploadMixin.
Some changes to ExternalUploadManager had to be made here as well. The backup direct uploads do not need an Upload record made for them in the database, so they can be moved to their final S3 resting place when completing the multipart upload.
This changeset is not perfect; it introduces some special cases in UploadController to handle backups that was previously in BackupController, because UploadController is where the multipart routes are located. A subsequent pull request will pull these routes into a module or some other sharing pattern, along with hooks, so the backup controller and the upload controller (and any future controllers that may need them) can include these routes in a nicer way.
2021-11-10 17:25:31 -05:00
|
|
|
s3_helper.create_multipart(key, content_type, metadata: metadata)
|
FEATURE: Uppy direct S3 multipart uploads in composer (#14051)
This pull request introduces the endpoints required, and the JavaScript functionality in the `ComposerUppyUpload` mixin, for direct S3 multipart uploads. There are four new endpoints in the uploads controller:
* `create-multipart.json` - Creates the multipart upload in S3 along with an `ExternalUploadStub` record, storing information about the file in the same way as `generate-presigned-put.json` does for regular direct S3 uploads
* `batch-presign-multipart-parts.json` - Takes a list of part numbers and the unique identifier for an `ExternalUploadStub` record, and generates the presigned URLs for those parts if the multipart upload still exists and if the user has permission to access that upload
* `complete-multipart.json` - Completes the multipart upload in S3. Needs the full list of part numbers and their associated ETags which are returned when the part is uploaded to the presigned URL above. Only works if the user has permission to access the associated `ExternalUploadStub` record and the multipart upload still exists.
After we confirm the upload is complete in S3, we go through the regular `UploadCreator` flow, the same as `complete-external-upload.json`, and promote the temporary upload S3 into a full `Upload` record, moving it to its final destination.
* `abort-multipart.json` - Aborts the multipart upload on S3 and destroys the `ExternalUploadStub` record if the user has permission to access that upload.
Also added are a few new columns to `ExternalUploadStub`:
* multipart - Whether or not this is a multipart upload
* external_upload_identifier - The "upload ID" for an S3 multipart upload
* filesize - The size of the file when the `create-multipart.json` or `generate-presigned-put.json` is called. This is used for validation.
When the user completes a direct S3 upload, either regular or multipart, we take the `filesize` that was captured when the `ExternalUploadStub` was first created and compare it with the final `Content-Length` size of the file where it is stored in S3. Then, if the two do not match, we throw an error, delete the file on S3, and ban the user from uploading files for N (default 5) minutes. This would only happen if the user uploads a different file than what they first specified, or in the case of multipart uploads uploaded larger chunks than needed. This is done to prevent abuse of S3 storage by bad actors.
Also included in this PR is an update to vendor/uppy.js. This has been built locally from the latest uppy source at https://github.com/transloadit/uppy/commit/d613b849a6591083f8a0968aa8d66537e231bbcd. This must be done so that I can get my multipart upload changes into Discourse. When the Uppy team cuts a proper release, we can bump the package.json versions instead.
2021-08-24 18:46:54 -04:00
|
|
|
end
|
|
|
|
|
2018-11-26 14:24:51 -05:00
|
|
|
private
|
|
|
|
|
2021-07-27 18:42:25 -04:00
|
|
|
def presigned_get_url(
|
2020-07-02 23:42:36 -04:00
|
|
|
url,
|
|
|
|
force_download: false,
|
|
|
|
filename: false,
|
2022-05-25 19:53:01 -04:00
|
|
|
expires_in: SiteSetting.s3_presigned_get_url_expires_after_seconds
|
2020-07-02 23:42:36 -04:00
|
|
|
)
|
|
|
|
opts = { expires_in: expires_in }
|
|
|
|
|
2019-11-17 20:25:42 -05:00
|
|
|
if force_download && filename
|
|
|
|
opts[:response_content_disposition] = ActionDispatch::Http::ContentDisposition.format(
|
|
|
|
disposition: "attachment",
|
|
|
|
filename: filename,
|
|
|
|
)
|
|
|
|
end
|
|
|
|
|
2021-07-27 18:42:25 -04:00
|
|
|
obj = object_from_path(url)
|
2019-11-17 20:25:42 -05:00
|
|
|
obj.presigned_url(:get, opts)
|
|
|
|
end
|
|
|
|
|
2019-06-05 23:27:24 -04:00
|
|
|
def get_upload_key(upload)
|
|
|
|
if Rails.configuration.multisite
|
|
|
|
File.join(upload_path, "/", get_path_for_upload(upload))
|
|
|
|
else
|
|
|
|
get_path_for_upload(upload)
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
2019-11-17 20:25:42 -05:00
|
|
|
def update_ACL(key, secure)
|
|
|
|
begin
|
2021-07-27 18:42:25 -04:00
|
|
|
object_from_path(key).acl.put(acl: secure ? "private" : "public-read")
|
2019-11-17 20:25:42 -05:00
|
|
|
rescue Aws::S3::Errors::NoSuchKey
|
|
|
|
Rails.logger.warn("Could not update ACL on upload with key: '#{key}'. Upload is missing.")
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
2018-11-26 14:24:51 -05:00
|
|
|
def list_missing(model, prefix)
|
|
|
|
connection = ActiveRecord::Base.connection.raw_connection
|
|
|
|
connection.exec("CREATE TEMP TABLE verified_ids(val integer PRIMARY KEY)")
|
|
|
|
marker = nil
|
2020-12-30 13:13:13 -05:00
|
|
|
files = s3_helper.list(prefix, marker)
|
2018-11-26 14:24:51 -05:00
|
|
|
|
|
|
|
while files.count > 0
|
|
|
|
verified_ids = []
|
|
|
|
|
|
|
|
files.each do |f|
|
2019-10-21 06:32:27 -04:00
|
|
|
id = model.where("url LIKE '%#{f.key}' AND etag = '#{f.etag}'").pluck_first(:id)
|
2018-11-26 14:24:51 -05:00
|
|
|
verified_ids << id if id.present?
|
|
|
|
marker = f.key
|
|
|
|
end
|
|
|
|
|
|
|
|
verified_id_clause =
|
|
|
|
verified_ids.map { |id| "('#{PG::Connection.escape_string(id.to_s)}')" }.join(",")
|
|
|
|
connection.exec("INSERT INTO verified_ids VALUES #{verified_id_clause}")
|
2020-12-30 13:13:13 -05:00
|
|
|
files = s3_helper.list(prefix, marker)
|
2018-11-26 14:24:51 -05:00
|
|
|
end
|
|
|
|
|
2019-01-31 23:40:48 -05:00
|
|
|
missing_uploads =
|
|
|
|
model.joins("LEFT JOIN verified_ids ON verified_ids.val = id").where(
|
|
|
|
"verified_ids.val IS NULL",
|
|
|
|
)
|
2018-11-26 14:24:51 -05:00
|
|
|
missing_count = missing_uploads.count
|
|
|
|
|
|
|
|
if missing_count > 0
|
|
|
|
missing_uploads.find_each { |upload| puts upload.url }
|
|
|
|
|
|
|
|
puts "#{missing_count} of #{model.count} #{model.name.underscore.pluralize} are missing"
|
|
|
|
end
|
|
|
|
ensure
|
2018-11-26 14:45:29 -05:00
|
|
|
connection.exec("DROP TABLE verified_ids") unless connection.nil?
|
2018-11-26 14:24:51 -05:00
|
|
|
end
|
2013-07-31 17:26:34 -04:00
|
|
|
end
|
|
|
|
end
|