diff --git a/go.mod b/go.mod
index 437f8b711..95d3a71bc 100644
--- a/go.mod
+++ b/go.mod
@@ -51,7 +51,7 @@ require (
github.com/hashicorp/hcl/v2 v2.9.1
github.com/hashicorp/packer-plugin-amazon v0.0.1
github.com/hashicorp/packer-plugin-docker v0.0.7
- github.com/hashicorp/packer-plugin-sdk v0.1.1
+ github.com/hashicorp/packer-plugin-sdk v0.1.2
github.com/hashicorp/vault/api v1.0.4
github.com/hetznercloud/hcloud-go v1.15.1
github.com/hyperonecom/h1-client-go v0.0.0-20191203060043-b46280e4c4a4
diff --git a/go.sum b/go.sum
index fe34b7020..8b0a06580 100644
--- a/go.sum
+++ b/go.sum
@@ -82,6 +82,7 @@ github.com/NaverCloudPlatform/ncloud-sdk-go-v2 v1.1.0/go.mod h1:P+3VS0ETiQPyWOx3
github.com/StackExchange/wmi v0.0.0-20190523213315-cbe66965904d h1:G0m3OIz70MZUWq3EgK3CesDbo8upS2Vm9/P3FtgI+Jk=
github.com/StackExchange/wmi v0.0.0-20190523213315-cbe66965904d/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg=
github.com/Telmate/proxmox-api-go v0.0.0-20200715182505-ec97c70ba887/go.mod h1:OGWyIMJ87/k/GCz8CGiWB2HOXsOVDM6Lpe/nFPkC4IQ=
+github.com/Telmate/proxmox-api-go v0.0.0-20210320143302-fea68269e6b0/go.mod h1:ayPkdmEKnlssqLQ9K1BE1jlsaYhXVwkoduXI30oQF0I=
github.com/Telmate/proxmox-api-go v0.0.0-20210331182840-ff89a0cebcfa h1:n4g0+o4DDX6WGTRfdj1Ux+49vSwtxtqFGB5XtxoDphI=
github.com/Telmate/proxmox-api-go v0.0.0-20210331182840-ff89a0cebcfa/go.mod h1:ayPkdmEKnlssqLQ9K1BE1jlsaYhXVwkoduXI30oQF0I=
github.com/abdullin/seq v0.0.0-20160510034733-d5467c17e7af h1:DBNMBMuMiWYu0b+8KMJuWmfCkcxl09JwdlqwDZZ6U14=
@@ -126,6 +127,30 @@ github.com/aws/aws-sdk-go v1.36.0/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zK
github.com/aws/aws-sdk-go v1.36.5/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro=
github.com/aws/aws-sdk-go v1.38.0 h1:mqnmtdW8rGIQmp2d0WRFLua0zW0Pel0P6/vd3gJuViY=
github.com/aws/aws-sdk-go v1.38.0/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro=
+github.com/aws/aws-sdk-go-v2 v1.2.1 h1:055XAi+MtmhyYX161p+jWRibkCb9YpI2ymXZiW1dwVY=
+github.com/aws/aws-sdk-go-v2 v1.2.1/go.mod h1:hTQc/9pYq5bfFACIUY9tc/2SYWd9Vnmw+testmuQeRY=
+github.com/aws/aws-sdk-go-v2/config v1.1.2 h1:H2r6cwMvvINFpEC55Y7jcNaR/oc7zYIChrG2497wmBI=
+github.com/aws/aws-sdk-go-v2/config v1.1.2/go.mod h1:77yIk+qmCS/94JlxbwV1d+YEyu6Z8FBlCGcSz3TdM6A=
+github.com/aws/aws-sdk-go-v2/credentials v1.1.2 h1:YoNqfhxAJGZI+lStIbqgx30UcCqQ86fr7FjTLUvrFOc=
+github.com/aws/aws-sdk-go-v2/credentials v1.1.2/go.mod h1:hofjw//lM0XLplgvzPPMA7oD0doQU1QpaIK1nweEEWg=
+github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.0.3 h1:d3bKAGy4XdJyK8hz3Nx3WJJ4TCmYp2498G4mFY5wly0=
+github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.0.3/go.mod h1:Zr1Mj+KUMGVQ+WJvTT68EZJxqhjiie2PWSPGEUPaNY0=
+github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.0.3 h1:vhRq0752KGBMmLnVessDOpt+5XEdzM87hhiuwGiEpqc=
+github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.0.3/go.mod h1:9tFvXNMet5TrBa2bMLhZBvenXs4qKMqiG1n0MNR4FFA=
+github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.0.2 h1:GO0pL4QvQmA0fXJe3MHVO+emtg31MYq5/8sebSWgE6A=
+github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.0.2/go.mod h1:bYl7lGFQQdHia3uMQH4p6ImnuOeDNeUoydoXM5x8Yzw=
+github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.0.3 h1:dST4y8pZKZdTPs4uwXmGCJmpycz1SHKmCSIhf3GqHEo=
+github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.0.3/go.mod h1:C50Z41fJaJ7WgaeeCulOGAU3q4+4se4B3uOPFdhBi2I=
+github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.1.1 h1:+WCVceRPiUsrui55mDByXOVremK1n3Hm8GnB4ZD3eco=
+github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.1.1/go.mod h1:B+fb+BFbja6obFOHYmYE4iUMdej9aM2VGSpgdU1pn0M=
+github.com/aws/aws-sdk-go-v2/service/s3 v1.2.1 h1:3qn6YVXpOCK9seQ8ZilDyMrhpEUaZNaJG8SXNiCvk+c=
+github.com/aws/aws-sdk-go-v2/service/s3 v1.2.1/go.mod h1:3xGOyhtPPD/WXJUljmb5+ZXhNyHa4h6wgL6mWOF6S0c=
+github.com/aws/aws-sdk-go-v2/service/sso v1.1.2 h1:9BnjX/ALn5uLo2DbgkwMpUkPL1VLQVBXcjZxqJBhf44=
+github.com/aws/aws-sdk-go-v2/service/sso v1.1.2/go.mod h1:5yU1oE3+CVYYLUsaHt2AVU3CJJZ6ER4pwsrRD1L2KSc=
+github.com/aws/aws-sdk-go-v2/service/sts v1.1.2 h1:7Kxqov7uQeP8WUEO0iHz3j9Bh0E1rJrn6cf/OGfcDds=
+github.com/aws/aws-sdk-go-v2/service/sts v1.1.2/go.mod h1:zu7rotIY9P4Aoc6ytqLP9jeYrECDHUODB5Gbp+BSHl8=
+github.com/aws/smithy-go v1.2.0 h1:0PoGBWXkXDIyVdPaZW9gMhaGzj3UOAgTdiVoHuuZAFA=
+github.com/aws/smithy-go v1.2.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAmR5n+E=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d h1:xDfNPAt8lFiC1UJrqV3uuy861HCTo708pDMbjHHdCas=
github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d/go.mod h1:6QX/PXZ00z/TKoufEY6K/a0k6AhaJrQKdFe6OfVXsa4=
@@ -185,8 +210,9 @@ github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7
github.com/exoscale/egoscale v0.18.1/go.mod h1:Z7OOdzzTOz1Q1PjQXumlz9Wn/CddH0zSYdCF3rnBKXE=
github.com/exoscale/egoscale v0.43.1 h1:Lhr0UOfg3t3Y56yh1DsYCjQuUHqFvsC8iUVqvub8+0Q=
github.com/exoscale/egoscale v0.43.1/go.mod h1:mpEXBpROAa/2i5GC0r33rfxG+TxSEka11g1PIXt9+zc=
-github.com/exoscale/packer-plugin-exoscale v0.1.0 h1:p4ymqF1tNiTuxgSdnEjGqXehMdDQbV7BPaLsMxGav24=
github.com/exoscale/packer-plugin-exoscale v0.1.0/go.mod h1:ZmJRkxsAlmEsVYOMxYPupDkax54uZ+ph0h3W59aIMZ8=
+github.com/exoscale/packer-plugin-exoscale v0.1.1 h1:NJ9UvMvSe3LK3H50hJv9nMG2reqgWKBAUhAEs4JJNso=
+github.com/exoscale/packer-plugin-exoscale v0.1.1/go.mod h1:5S07HizadGVKST/m0a5+aNmDiFfY7EbPvnkU4rJWRE8=
github.com/fatih/camelcase v1.0.0 h1:hxNvNX/xYBp0ovncs8WyWZrOrpBNub/JfaMvbURyft8=
github.com/fatih/camelcase v1.0.0/go.mod h1:yN2Sb0lFhZJUdVvtELVWefmrXpuZESvPmqwoZc+/fpc=
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
@@ -270,6 +296,7 @@ github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU=
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-github/v33 v33.0.0/go.mod h1:GMdDnVZY/2TsWgp/lkYnpSAh6TrzhANBBwm6k6TTEXg=
@@ -401,6 +428,7 @@ github.com/hashicorp/packer v1.6.7-0.20210126105722-aef4ced967ec/go.mod h1:2+Vo/
github.com/hashicorp/packer v1.6.7-0.20210208125835-f616955ebcb6/go.mod h1:7f5ZpTTRG53rQ58BcTADuTnpiBcB3wapuxl4sF2sGMM=
github.com/hashicorp/packer v1.6.7-0.20210217093213-201869d627bf/go.mod h1:+EWPPcqee4h8S/y913Dnta1eJkgiqsGXBQgB75A2qV0=
github.com/hashicorp/packer v1.7.0/go.mod h1:3KRJcwOctl2JaAGpQMI1bWQRArfWNWqcYjO6AOsVVGQ=
+github.com/hashicorp/packer v1.7.1/go.mod h1:ApnmMINvuhhnfPyTVqZu6jznDWPVYDJUw7e188DFCmo=
github.com/hashicorp/packer-plugin-amazon v0.0.1 h1:EuyjNK9bL7WhQeIJzhBJxOx8nyc61ai5UbOsb1PIVwI=
github.com/hashicorp/packer-plugin-amazon v0.0.1/go.mod h1:12c9msibyHdId+Mk/pCbdRb1KaLIhaNyxeJ6n8bZt30=
github.com/hashicorp/packer-plugin-docker v0.0.7 h1:hMTrH7vrkFIjphtbbtpuzffTzSjMNgxayo2DPLz9y+c=
@@ -414,8 +442,9 @@ github.com/hashicorp/packer-plugin-sdk v0.0.11/go.mod h1:GNb0WNs7zibb8vzUZce1As6
github.com/hashicorp/packer-plugin-sdk v0.0.12/go.mod h1:hs82OYeufirGG6KRENMpjBWomnIlte99X6wXAPThJ5I=
github.com/hashicorp/packer-plugin-sdk v0.0.14/go.mod h1:tNb3XzJPnjMl3QuUdKmF47B5ImerdTakalHzUAvW0aw=
github.com/hashicorp/packer-plugin-sdk v0.1.0/go.mod h1:CFsC20uZjtER/EnTn/CSMKD0kEdkqOVev8mtOmfnZiI=
-github.com/hashicorp/packer-plugin-sdk v0.1.1 h1:foqSy6m+2MsEf9ygNoBlIoi3SPvlkInS+yT0Uyj3yvw=
github.com/hashicorp/packer-plugin-sdk v0.1.1/go.mod h1:1d3nqB9LUsXMQaNUiL67Q+WYEtjsVcLNTX8ikVlpBrc=
+github.com/hashicorp/packer-plugin-sdk v0.1.2 h1:R/WKJw6BDwvjbcKeC3mZs+wSmdFHE8iK+qz+QnArPQk=
+github.com/hashicorp/packer-plugin-sdk v0.1.2/go.mod h1:KRjczE1/c9NV5Re+PXt3myJsVTI/FxEHpZjRjOH0Fug=
github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc=
github.com/hashicorp/serf v0.9.2 h1:yJoyfZXo4Pk2p/M/viW+YLibBFiIbKoP79gu7kDAFP0=
github.com/hashicorp/serf v0.9.2/go.mod h1:UWDWwZeL5cuWDJdl0C6wrvrUwEqtQ4ZKBKKENpqIUyk=
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/LICENSE.txt b/vendor/github.com/aws/aws-sdk-go-v2/LICENSE.txt
new file mode 100644
index 000000000..d64569567
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/LICENSE.txt
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/NOTICE.txt b/vendor/github.com/aws/aws-sdk-go-v2/NOTICE.txt
new file mode 100644
index 000000000..5f14d1162
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/NOTICE.txt
@@ -0,0 +1,3 @@
+AWS SDK for Go
+Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+Copyright 2014-2015 Stripe, Inc.
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/arn/arn.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/arn/arn.go
new file mode 100644
index 000000000..fe63fedad
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/arn/arn.go
@@ -0,0 +1,92 @@
+// Package arn provides a parser for interacting with Amazon Resource Names.
+package arn
+
+import (
+ "errors"
+ "strings"
+)
+
+const (
+ arnDelimiter = ":"
+ arnSections = 6
+ arnPrefix = "arn:"
+
+ // zero-indexed
+ sectionPartition = 1
+ sectionService = 2
+ sectionRegion = 3
+ sectionAccountID = 4
+ sectionResource = 5
+
+ // errors
+ invalidPrefix = "arn: invalid prefix"
+ invalidSections = "arn: not enough sections"
+)
+
+// ARN captures the individual fields of an Amazon Resource Name.
+// See http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html for more information.
+type ARN struct {
+ // The partition that the resource is in. For standard AWS regions, the partition is "aws". If you have resources in
+ // other partitions, the partition is "aws-partitionname". For example, the partition for resources in the China
+ // (Beijing) region is "aws-cn".
+ Partition string
+
+ // The service namespace that identifies the AWS product (for example, Amazon S3, IAM, or Amazon RDS). For a list of
+ // namespaces, see
+ // http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#genref-aws-service-namespaces.
+ Service string
+
+ // The region the resource resides in. Note that the ARNs for some resources do not require a region, so this
+ // component might be omitted.
+ Region string
+
+ // The ID of the AWS account that owns the resource, without the hyphens. For example, 123456789012. Note that the
+ // ARNs for some resources don't require an account number, so this component might be omitted.
+ AccountID string
+
+ // The content of this part of the ARN varies by service. It often includes an indicator of the type of resource —
+ // for example, an IAM user or Amazon RDS database - followed by a slash (/) or a colon (:), followed by the
+ // resource name itself. Some services allows paths for resource names, as described in
+ // http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#arns-paths.
+ Resource string
+}
+
+// Parse parses an ARN into its constituent parts.
+//
+// Some example ARNs:
+// arn:aws:elasticbeanstalk:us-east-1:123456789012:environment/My App/MyEnvironment
+// arn:aws:iam::123456789012:user/David
+// arn:aws:rds:eu-west-1:123456789012:db:mysql-db
+// arn:aws:s3:::my_corporate_bucket/exampleobject.png
+func Parse(arn string) (ARN, error) {
+ if !strings.HasPrefix(arn, arnPrefix) {
+ return ARN{}, errors.New(invalidPrefix)
+ }
+ sections := strings.SplitN(arn, arnDelimiter, arnSections)
+ if len(sections) != arnSections {
+ return ARN{}, errors.New(invalidSections)
+ }
+ return ARN{
+ Partition: sections[sectionPartition],
+ Service: sections[sectionService],
+ Region: sections[sectionRegion],
+ AccountID: sections[sectionAccountID],
+ Resource: sections[sectionResource],
+ }, nil
+}
+
+// IsARN returns whether the given string is an arn
+// by looking for whether the string starts with arn:
+func IsARN(arn string) bool {
+ return strings.HasPrefix(arn, arnPrefix) && strings.Count(arn, ":") >= arnSections-1
+}
+
+// String returns the canonical representation of the ARN
+func (arn ARN) String() string {
+ return arnPrefix +
+ arn.Partition + arnDelimiter +
+ arn.Service + arnDelimiter +
+ arn.Region + arnDelimiter +
+ arn.AccountID + arnDelimiter +
+ arn.Resource
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/config.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/config.go
new file mode 100644
index 000000000..481aa1563
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/config.go
@@ -0,0 +1,88 @@
+package aws
+
+import (
+ "net/http"
+
+ "github.com/aws/smithy-go/logging"
+ "github.com/aws/smithy-go/middleware"
+)
+
+// HTTPClient provides the interface to provide custom HTTPClients. Generally
+// *http.Client is sufficient for most use cases. The HTTPClient should not
+// follow redirects.
+type HTTPClient interface {
+ Do(*http.Request) (*http.Response, error)
+}
+
+// A Config provides service configuration for service clients.
+type Config struct {
+ // The region to send requests to. This parameter is required and must
+ // be configured globally or on a per-client basis unless otherwise
+ // noted. A full list of regions is found in the "Regions and Endpoints"
+ // document.
+ //
+ // See http://docs.aws.amazon.com/general/latest/gr/rande.html for
+ // information on AWS regions.
+ Region string
+
+ // The credentials object to use when signing requests. Defaults to a
+ // chain of credential providers to search for credentials in environment
+ // variables, shared credential file, and EC2 Instance Roles.
+ Credentials CredentialsProvider
+
+ // The HTTP Client the SDK's API clients will use to invoke HTTP requests.
+ // The SDK defaults to a BuildableClient allowing API clients to create
+ // copies of the HTTP Client for service specific customizations.
+ //
+ // Use a (*http.Client) for custom behavior. Using a custom http.Client
+ // will prevent the SDK from modifying the HTTP client.
+ HTTPClient HTTPClient
+
+ // An endpoint resolver that can be used to provide or override an endpoint for the given
+ // service and region Please see the `aws.EndpointResolver` documentation on usage.
+ EndpointResolver EndpointResolver
+
+ // Retryer is a function that provides a Retryer implementation. A Retryer guides how HTTP requests should be
+ // retried in case of recoverable failures. When nil the API client will use a default
+ // retryer.
+ //
+ // In general, the provider function should return a new instance of a Retyer if you are attempting
+ // to provide a consistent Retryer configuration across all clients. This will ensure that each client will be
+ // provided a new instance of the Retryer implementation, and will avoid issues such as sharing the same retry token
+ // bucket across services.
+ Retryer func() Retryer
+
+ // ConfigSources are the sources that were used to construct the Config.
+ // Allows for additional configuration to be loaded by clients.
+ ConfigSources []interface{}
+
+ // APIOptions provides the set of middleware mutations modify how the API
+ // client requests will be handled. This is useful for adding additional
+ // tracing data to a request, or changing behavior of the SDK's client.
+ APIOptions []func(*middleware.Stack) error
+
+ // The logger writer interface to write logging messages to. Defaults to
+ // standard error.
+ Logger logging.Logger
+
+ // Configures the events that will be sent to the configured logger.
+ // This can be used to configure the logging of signing, retries, request, and responses
+ // of the SDK clients.
+ //
+ // See the ClientLogMode type documentation for the complete set of logging modes and available
+ // configuration.
+ ClientLogMode ClientLogMode
+}
+
+// NewConfig returns a new Config pointer that can be chained with builder
+// methods to set multiple configuration values inline without using pointers.
+func NewConfig() *Config {
+ return &Config{}
+}
+
+// Copy will return a shallow copy of the Config object. If any additional
+// configurations are provided they will be merged into the new config returned.
+func (c Config) Copy() Config {
+ cp := c
+ return cp
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/context.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/context.go
new file mode 100644
index 000000000..4d8e26ef3
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/context.go
@@ -0,0 +1,22 @@
+package aws
+
+import (
+ "context"
+ "time"
+)
+
+type suppressedContext struct {
+ context.Context
+}
+
+func (s *suppressedContext) Deadline() (deadline time.Time, ok bool) {
+ return time.Time{}, false
+}
+
+func (s *suppressedContext) Done() <-chan struct{} {
+ return nil
+}
+
+func (s *suppressedContext) Err() error {
+ return nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/credential_cache.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/credential_cache.go
new file mode 100644
index 000000000..1411a5c32
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/credential_cache.go
@@ -0,0 +1,139 @@
+package aws
+
+import (
+ "context"
+ "sync/atomic"
+ "time"
+
+ sdkrand "github.com/aws/aws-sdk-go-v2/internal/rand"
+ "github.com/aws/aws-sdk-go-v2/internal/sync/singleflight"
+)
+
+// CredentialsCacheOptions are the options
+type CredentialsCacheOptions struct {
+
+ // ExpiryWindow will allow the credentials to trigger refreshing prior to
+ // the credentials actually expiring. This is beneficial so race conditions
+ // with expiring credentials do not cause request to fail unexpectedly
+ // due to ExpiredTokenException exceptions.
+ //
+ // An ExpiryWindow of 10s would cause calls to IsExpired() to return true
+ // 10 seconds before the credentials are actually expired. This can cause an
+ // increased number of requests to refresh the credentials to occur.
+ //
+ // If ExpiryWindow is 0 or less it will be ignored.
+ ExpiryWindow time.Duration
+
+ // ExpiryWindowJitterFrac provides a mechanism for randomizing the expiration of credentials
+ // within the configured ExpiryWindow by a random percentage. Valid values are between 0.0 and 1.0.
+ //
+ // As an example if ExpiryWindow is 60 seconds and ExpiryWindowJitterFrac is 0.5 then credentials will be set to
+ // expire between 30 to 60 seconds prior to their actual expiration time.
+ //
+ // If ExpiryWindow is 0 or less then ExpiryWindowJitterFrac is ignored.
+ // If ExpiryWindowJitterFrac is 0 then no randomization will be applied to the window.
+ // If ExpiryWindowJitterFrac < 0 the value will be treated as 0.
+ // If ExpiryWindowJitterFrac > 1 the value will be treated as 1.
+ ExpiryWindowJitterFrac float64
+}
+
+// CredentialsCache provides caching and concurrency safe credentials retrieval
+// via the provider's retrieve method.
+type CredentialsCache struct {
+ // provider is the CredentialProvider implementation to be wrapped by the CredentialCache.
+ provider CredentialsProvider
+
+ options CredentialsCacheOptions
+ creds atomic.Value
+ sf singleflight.Group
+}
+
+// NewCredentialsCache returns a CredentialsCache that wraps provider. Provider is expected to not be nil. A variadic
+// list of one or more functions can be provided to modify the CredentialsCache configuration. This allows for
+// configuration of credential expiry window and jitter.
+func NewCredentialsCache(provider CredentialsProvider, optFns ...func(options *CredentialsCacheOptions)) *CredentialsCache {
+ options := CredentialsCacheOptions{}
+
+ for _, fn := range optFns {
+ fn(&options)
+ }
+
+ if options.ExpiryWindow < 0 {
+ options.ExpiryWindow = 0
+ }
+
+ if options.ExpiryWindowJitterFrac < 0 {
+ options.ExpiryWindowJitterFrac = 0
+ } else if options.ExpiryWindowJitterFrac > 1 {
+ options.ExpiryWindowJitterFrac = 1
+ }
+
+ return &CredentialsCache{
+ provider: provider,
+ options: options,
+ }
+}
+
+// Retrieve returns the credentials. If the credentials have already been
+// retrieved, and not expired the cached credentials will be returned. If the
+// credentials have not been retrieved yet, or expired the provider's Retrieve
+// method will be called.
+//
+// Returns and error if the provider's retrieve method returns an error.
+func (p *CredentialsCache) Retrieve(ctx context.Context) (Credentials, error) {
+ if creds := p.getCreds(); creds != nil {
+ return *creds, nil
+ }
+
+ resCh := p.sf.DoChan("", func() (interface{}, error) {
+ return p.singleRetrieve(&suppressedContext{ctx})
+ })
+ select {
+ case res := <-resCh:
+ return res.Val.(Credentials), res.Err
+ case <-ctx.Done():
+ return Credentials{}, &RequestCanceledError{Err: ctx.Err()}
+ }
+}
+
+func (p *CredentialsCache) singleRetrieve(ctx context.Context) (interface{}, error) {
+ if creds := p.getCreds(); creds != nil {
+ return *creds, nil
+ }
+
+ creds, err := p.provider.Retrieve(ctx)
+ if err == nil {
+ if creds.CanExpire {
+ randFloat64, err := sdkrand.CryptoRandFloat64()
+ if err != nil {
+ return Credentials{}, err
+ }
+ jitter := time.Duration(randFloat64 * p.options.ExpiryWindowJitterFrac * float64(p.options.ExpiryWindow))
+ creds.Expires = creds.Expires.Add(-(p.options.ExpiryWindow - jitter))
+ }
+
+ p.creds.Store(&creds)
+ }
+
+ return creds, err
+}
+
+func (p *CredentialsCache) getCreds() *Credentials {
+ v := p.creds.Load()
+ if v == nil {
+ return nil
+ }
+
+ c := v.(*Credentials)
+ if c != nil && c.HasKeys() && !c.Expired() {
+ return c
+ }
+
+ return nil
+}
+
+// Invalidate will invalidate the cached credentials. The next call to Retrieve
+// will cause the provider's Retrieve method to be called.
+func (p *CredentialsCache) Invalidate() {
+ p.creds.Store((*Credentials)(nil))
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/credentials.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/credentials.go
new file mode 100644
index 000000000..ce3868a9f
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/credentials.go
@@ -0,0 +1,127 @@
+package aws
+
+import (
+ "context"
+ "fmt"
+ "time"
+
+ "github.com/aws/aws-sdk-go-v2/internal/sdk"
+)
+
+// AnonymousCredentials provides a sentinel CredentialsProvider that should be
+// used to instruct the SDK's signing middleware to not sign the request.
+//
+// Using `nil` credentials when configuring an API client will achieve the same
+// result. The AnonymousCredentials type allows you to configure the SDK's
+// external config loading to not attempt to source credentials from the shared
+// config or environment.
+//
+// For example you can use this CredentialsProvider with an API client's
+// Options to instruct the client not to sign a request for accessing public
+// S3 bucket objects.
+//
+// The following example demonstrates using the AnonymousCredentials to prevent
+// SDK's external config loading attempt to resolve credentials.
+//
+// cfg, err := config.LoadDefaultConfig(context.TODO(),
+// config.WithCredentialsProvider(aws.AnonymousCredentials{}),
+// )
+// if err != nil {
+// log.Fatalf("failed to load config, %v", err)
+// }
+//
+// client := s3.NewFromConfig(cfg)
+//
+// Alternatively you can leave the API client Option's `Credential` member to
+// nil. If using the `NewFromConfig` constructor you'll need to explicitly set
+// the `Credentials` member to nil, if the external config resolved a
+// credential provider.
+//
+// client := s3.New(s3.Options{
+// // Credentials defaults to a nil value.
+// })
+//
+// This can also be configured for specific operations calls too.
+//
+// cfg, err := config.LoadDefaultConfig(context.TODO())
+// if err != nil {
+// log.Fatalf("failed to load config, %v", err)
+// }
+//
+// client := s3.NewFromConfig(config)
+//
+// result, err := client.GetObject(context.TODO(), s3.GetObject{
+// Bucket: aws.String("example-bucket"),
+// Key: aws.String("example-key"),
+// }, func(o *s3.Options) {
+// o.Credentials = nil
+// // Or
+// o.Credentials = aws.AnonymousCredentials{}
+// })
+type AnonymousCredentials struct{}
+
+// Retrieve implements the CredentialsProvider interface, but will always
+// return error, and cannot be used to sign a request. The AnonymousCredentials
+// type is used as a sentinel type instructing the AWS request signing
+// middleware to not sign a request.
+func (AnonymousCredentials) Retrieve(context.Context) (Credentials, error) {
+ return Credentials{Source: "AnonymousCredentials"},
+ fmt.Errorf("the AnonymousCredentials is not a valid credential provider, and cannot be used to sign AWS requests with")
+}
+
+// A Credentials is the AWS credentials value for individual credential fields.
+type Credentials struct {
+ // AWS Access key ID
+ AccessKeyID string
+
+ // AWS Secret Access Key
+ SecretAccessKey string
+
+ // AWS Session Token
+ SessionToken string
+
+ // Source of the credentials
+ Source string
+
+ // Time the credentials will expire.
+ CanExpire bool
+ Expires time.Time
+}
+
+// Expired returns if the credentials have expired.
+func (v Credentials) Expired() bool {
+ if v.CanExpire {
+ // Calling Round(0) on the current time will truncate the monotonic reading only. Ensures credential expiry
+ // time is always based on reported wall-clock time.
+ return !v.Expires.After(sdk.NowTime().Round(0))
+ }
+
+ return false
+}
+
+// HasKeys returns if the credentials keys are set.
+func (v Credentials) HasKeys() bool {
+ return len(v.AccessKeyID) > 0 && len(v.SecretAccessKey) > 0
+}
+
+// A CredentialsProvider is the interface for any component which will provide
+// credentials Credentials. A CredentialsProvider is required to manage its own
+// Expired state, and what to be expired means.
+//
+// A credentials provider implementation can be wrapped with a CredentialCache
+// to cache the credential value retrieved. Without the cache the SDK will
+// attempt to retrieve the credentials for every request.
+type CredentialsProvider interface {
+ // Retrieve returns nil if it successfully retrieved the value.
+ // Error is returned if the value were not obtainable, or empty.
+ Retrieve(ctx context.Context) (Credentials, error)
+}
+
+// CredentialsProviderFunc provides a helper wrapping a function value to
+// satisfy the CredentialsProvider interface.
+type CredentialsProviderFunc func(context.Context) (Credentials, error)
+
+// Retrieve delegates to the function value the CredentialsProviderFunc wraps.
+func (fn CredentialsProviderFunc) Retrieve(ctx context.Context) (Credentials, error) {
+ return fn(ctx)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/doc.go
new file mode 100644
index 000000000..befc3bee1
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/doc.go
@@ -0,0 +1,62 @@
+// Package aws provides the core SDK's utilities and shared types. Use this package's
+// utilities to simplify setting and reading API operations parameters.
+//
+// Value and Pointer Conversion Utilities
+//
+// This package includes a helper conversion utility for each scalar type the SDK's
+// API use. These utilities make getting a pointer of the scalar, and dereferencing
+// a pointer easier.
+//
+// Each conversion utility comes in two forms. Value to Pointer and Pointer to Value.
+// The Pointer to value will safely dereference the pointer and return its value.
+// If the pointer was nil, the scalar's zero value will be returned.
+//
+// The value to pointer functions will be named after the scalar type. So get a
+// *string from a string value use the "String" function. This makes it easy to
+// to get pointer of a literal string value, because getting the address of a
+// literal requires assigning the value to a variable first.
+//
+// var strPtr *string
+//
+// // Without the SDK's conversion functions
+// str := "my string"
+// strPtr = &str
+//
+// // With the SDK's conversion functions
+// strPtr = aws.String("my string")
+//
+// // Convert *string to string value
+// str = aws.ToString(strPtr)
+//
+// In addition to scalars the aws package also includes conversion utilities for
+// map and slice for commonly types used in API parameters. The map and slice
+// conversion functions use similar naming pattern as the scalar conversion
+// functions.
+//
+// var strPtrs []*string
+// var strs []string = []string{"Go", "Gophers", "Go"}
+//
+// // Convert []string to []*string
+// strPtrs = aws.StringSlice(strs)
+//
+// // Convert []*string to []string
+// strs = aws.ToStringSlice(strPtrs)
+//
+// SDK Default HTTP Client
+//
+// The SDK will use the http.DefaultClient if a HTTP client is not provided to
+// the SDK's Session, or service client constructor. This means that if the
+// http.DefaultClient is modified by other components of your application the
+// modifications will be picked up by the SDK as well.
+//
+// In some cases this might be intended, but it is a better practice to create
+// a custom HTTP Client to share explicitly through your application. You can
+// configure the SDK to use the custom HTTP Client by setting the HTTPClient
+// value of the SDK's Config type when creating a Session or service client.
+package aws
+
+// generate.go uses a build tag of "ignore", go run doesn't need to specify
+// this because go run ignores all build flags when running a go file directly.
+//go:generate go run -tags codegen generate.go
+//go:generate go run -tags codegen logging_generate.go
+//go:generate gofmt -w -s .
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/endpoints.go
new file mode 100644
index 000000000..dadb2b407
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/endpoints.go
@@ -0,0 +1,113 @@
+package aws
+
+import (
+ "fmt"
+)
+
+// Endpoint represents the endpoint a service client should make API operation
+// calls to.
+//
+// The SDK will automatically resolve these endpoints per API client using an
+// internal endpoint resolvers. If you'd like to provide custom endpoint
+// resolving behavior you can implement the EndpointResolver interface.
+type Endpoint struct {
+ // The base URL endpoint the SDK API clients will use to make API calls to.
+ // The SDK will suffix URI path and query elements to this endpoint.
+ URL string
+
+ // Specifies if the endpoint's hostname can be modified by the SDK's API
+ // client.
+ //
+ // If the hostname is mutable the SDK API clients may modify any part of
+ // the hostname based on the requirements of the API, (e.g. adding, or
+ // removing content in the hostname). Such as, Amazon S3 API client
+ // prefixing "bucketname" to the hostname, or changing the
+ // hostname service name component from "s3." to "s3-accesspoint.dualstack."
+ // for the dualstack endpoint of an S3 Accesspoint resource.
+ //
+ // Care should be taken when providing a custom endpoint for an API. If the
+ // endpoint hostname is mutable, and the client cannot modify the endpoint
+ // correctly, the operation call will most likely fail, or have undefined
+ // behavior.
+ //
+ // If hostname is immutable, the SDK API clients will not modify the
+ // hostname of the URL. This may cause the API client not to function
+ // correctly if the API requires the operation specific hostname values
+ // to be used by the client.
+ //
+ // This flag does not modify the API client's behavior if this endpoint
+ // will be used instead of Endpoint Discovery, or if the endpoint will be
+ // used to perform Endpoint Discovery. That behavior is configured via the
+ // API Client's Options.
+ HostnameImmutable bool
+
+ // The AWS partition the endpoint belongs to.
+ PartitionID string
+
+ // The service name that should be used for signing the requests to the
+ // endpoint.
+ SigningName string
+
+ // The region that should be used for signing the request to the endpoint.
+ SigningRegion string
+
+ // The signing method that should be used for signing the requests to the
+ // endpoint.
+ SigningMethod string
+
+ // The source of the Endpoint. By default, this will be EndpointSourceServiceMetadata.
+ // When providing a custom endpoint, you should set the source as EndpointSourceCustom.
+ // If source is not provided when providing a custom endpoint, the SDK may not
+ // perform required host mutations correctly. Source should be used along with
+ // HostnameImmutable property as per the usage requirement.
+ Source EndpointSource
+}
+
+// EndpointSource is the endpoint source type.
+type EndpointSource int
+
+const (
+ // EndpointSourceServiceMetadata denotes service modeled endpoint metadata is used as Endpoint Source.
+ EndpointSourceServiceMetadata EndpointSource = iota
+
+ // EndpointSourceCustom denotes endpoint is a custom endpoint. This source should be used when
+ // user provides a custom endpoint to be used by the SDK.
+ EndpointSourceCustom
+)
+
+// EndpointNotFoundError is a sentinel error to indicate that the
+// EndpointResolver implementation was unable to resolve an endpoint for the
+// given service and region. Resolvers should use this to indicate that an API
+// client should fallback and attempt to use it's internal default resolver to
+// resolve the endpoint.
+type EndpointNotFoundError struct {
+ Err error
+}
+
+// Error is the error message.
+func (e *EndpointNotFoundError) Error() string {
+ return fmt.Sprintf("endpoint not found, %v", e.Err)
+}
+
+// Unwrap returns the underlying error.
+func (e *EndpointNotFoundError) Unwrap() error {
+ return e.Err
+}
+
+// EndpointResolver is an endpoint resolver that can be used to provide or
+// override an endpoint for the given service and region. API clients will
+// attempt to use the EndpointResolver first to resolve an endpoint if
+// available. If the EndpointResolver returns an EndpointNotFoundError error,
+// API clients will fallback to attempting to resolve the endpoint using its
+// internal default endpoint resolver.
+type EndpointResolver interface {
+ ResolveEndpoint(service, region string) (Endpoint, error)
+}
+
+// EndpointResolverFunc wraps a function to satisfy the EndpointResolver interface.
+type EndpointResolverFunc func(service, region string) (Endpoint, error)
+
+// ResolveEndpoint calls the wrapped function and returns the results.
+func (e EndpointResolverFunc) ResolveEndpoint(service, region string) (Endpoint, error) {
+ return e(service, region)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/errors.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/errors.go
new file mode 100644
index 000000000..f390a08f9
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/errors.go
@@ -0,0 +1,9 @@
+package aws
+
+// MissingRegionError is an error that is returned if region configuration
+// value was not found.
+type MissingRegionError struct{}
+
+func (*MissingRegionError) Error() string {
+ return "an AWS region is required, but was not found"
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/from_ptr.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/from_ptr.go
new file mode 100644
index 000000000..72e29c354
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/from_ptr.go
@@ -0,0 +1,344 @@
+// Code generated by aws/generate.go DO NOT EDIT.
+
+package aws
+
+import (
+ "github.com/aws/smithy-go/ptr"
+ "time"
+)
+
+// ToBool returns bool value dereferenced if the passed
+// in pointer was not nil. Returns a bool zero value if the
+// pointer was nil.
+func ToBool(p *bool) (v bool) {
+ return ptr.ToBool(p)
+}
+
+// ToBoolSlice returns a slice of bool values, that are
+// dereferenced if the passed in pointer was not nil. Returns a bool
+// zero value if the pointer was nil.
+func ToBoolSlice(vs []*bool) []bool {
+ return ptr.ToBoolSlice(vs)
+}
+
+// ToBoolMap returns a map of bool values, that are
+// dereferenced if the passed in pointer was not nil. The bool
+// zero value is used if the pointer was nil.
+func ToBoolMap(vs map[string]*bool) map[string]bool {
+ return ptr.ToBoolMap(vs)
+}
+
+// ToByte returns byte value dereferenced if the passed
+// in pointer was not nil. Returns a byte zero value if the
+// pointer was nil.
+func ToByte(p *byte) (v byte) {
+ return ptr.ToByte(p)
+}
+
+// ToByteSlice returns a slice of byte values, that are
+// dereferenced if the passed in pointer was not nil. Returns a byte
+// zero value if the pointer was nil.
+func ToByteSlice(vs []*byte) []byte {
+ return ptr.ToByteSlice(vs)
+}
+
+// ToByteMap returns a map of byte values, that are
+// dereferenced if the passed in pointer was not nil. The byte
+// zero value is used if the pointer was nil.
+func ToByteMap(vs map[string]*byte) map[string]byte {
+ return ptr.ToByteMap(vs)
+}
+
+// ToString returns string value dereferenced if the passed
+// in pointer was not nil. Returns a string zero value if the
+// pointer was nil.
+func ToString(p *string) (v string) {
+ return ptr.ToString(p)
+}
+
+// ToStringSlice returns a slice of string values, that are
+// dereferenced if the passed in pointer was not nil. Returns a string
+// zero value if the pointer was nil.
+func ToStringSlice(vs []*string) []string {
+ return ptr.ToStringSlice(vs)
+}
+
+// ToStringMap returns a map of string values, that are
+// dereferenced if the passed in pointer was not nil. The string
+// zero value is used if the pointer was nil.
+func ToStringMap(vs map[string]*string) map[string]string {
+ return ptr.ToStringMap(vs)
+}
+
+// ToInt returns int value dereferenced if the passed
+// in pointer was not nil. Returns a int zero value if the
+// pointer was nil.
+func ToInt(p *int) (v int) {
+ return ptr.ToInt(p)
+}
+
+// ToIntSlice returns a slice of int values, that are
+// dereferenced if the passed in pointer was not nil. Returns a int
+// zero value if the pointer was nil.
+func ToIntSlice(vs []*int) []int {
+ return ptr.ToIntSlice(vs)
+}
+
+// ToIntMap returns a map of int values, that are
+// dereferenced if the passed in pointer was not nil. The int
+// zero value is used if the pointer was nil.
+func ToIntMap(vs map[string]*int) map[string]int {
+ return ptr.ToIntMap(vs)
+}
+
+// ToInt8 returns int8 value dereferenced if the passed
+// in pointer was not nil. Returns a int8 zero value if the
+// pointer was nil.
+func ToInt8(p *int8) (v int8) {
+ return ptr.ToInt8(p)
+}
+
+// ToInt8Slice returns a slice of int8 values, that are
+// dereferenced if the passed in pointer was not nil. Returns a int8
+// zero value if the pointer was nil.
+func ToInt8Slice(vs []*int8) []int8 {
+ return ptr.ToInt8Slice(vs)
+}
+
+// ToInt8Map returns a map of int8 values, that are
+// dereferenced if the passed in pointer was not nil. The int8
+// zero value is used if the pointer was nil.
+func ToInt8Map(vs map[string]*int8) map[string]int8 {
+ return ptr.ToInt8Map(vs)
+}
+
+// ToInt16 returns int16 value dereferenced if the passed
+// in pointer was not nil. Returns a int16 zero value if the
+// pointer was nil.
+func ToInt16(p *int16) (v int16) {
+ return ptr.ToInt16(p)
+}
+
+// ToInt16Slice returns a slice of int16 values, that are
+// dereferenced if the passed in pointer was not nil. Returns a int16
+// zero value if the pointer was nil.
+func ToInt16Slice(vs []*int16) []int16 {
+ return ptr.ToInt16Slice(vs)
+}
+
+// ToInt16Map returns a map of int16 values, that are
+// dereferenced if the passed in pointer was not nil. The int16
+// zero value is used if the pointer was nil.
+func ToInt16Map(vs map[string]*int16) map[string]int16 {
+ return ptr.ToInt16Map(vs)
+}
+
+// ToInt32 returns int32 value dereferenced if the passed
+// in pointer was not nil. Returns a int32 zero value if the
+// pointer was nil.
+func ToInt32(p *int32) (v int32) {
+ return ptr.ToInt32(p)
+}
+
+// ToInt32Slice returns a slice of int32 values, that are
+// dereferenced if the passed in pointer was not nil. Returns a int32
+// zero value if the pointer was nil.
+func ToInt32Slice(vs []*int32) []int32 {
+ return ptr.ToInt32Slice(vs)
+}
+
+// ToInt32Map returns a map of int32 values, that are
+// dereferenced if the passed in pointer was not nil. The int32
+// zero value is used if the pointer was nil.
+func ToInt32Map(vs map[string]*int32) map[string]int32 {
+ return ptr.ToInt32Map(vs)
+}
+
+// ToInt64 returns int64 value dereferenced if the passed
+// in pointer was not nil. Returns a int64 zero value if the
+// pointer was nil.
+func ToInt64(p *int64) (v int64) {
+ return ptr.ToInt64(p)
+}
+
+// ToInt64Slice returns a slice of int64 values, that are
+// dereferenced if the passed in pointer was not nil. Returns a int64
+// zero value if the pointer was nil.
+func ToInt64Slice(vs []*int64) []int64 {
+ return ptr.ToInt64Slice(vs)
+}
+
+// ToInt64Map returns a map of int64 values, that are
+// dereferenced if the passed in pointer was not nil. The int64
+// zero value is used if the pointer was nil.
+func ToInt64Map(vs map[string]*int64) map[string]int64 {
+ return ptr.ToInt64Map(vs)
+}
+
+// ToUint returns uint value dereferenced if the passed
+// in pointer was not nil. Returns a uint zero value if the
+// pointer was nil.
+func ToUint(p *uint) (v uint) {
+ return ptr.ToUint(p)
+}
+
+// ToUintSlice returns a slice of uint values, that are
+// dereferenced if the passed in pointer was not nil. Returns a uint
+// zero value if the pointer was nil.
+func ToUintSlice(vs []*uint) []uint {
+ return ptr.ToUintSlice(vs)
+}
+
+// ToUintMap returns a map of uint values, that are
+// dereferenced if the passed in pointer was not nil. The uint
+// zero value is used if the pointer was nil.
+func ToUintMap(vs map[string]*uint) map[string]uint {
+ return ptr.ToUintMap(vs)
+}
+
+// ToUint8 returns uint8 value dereferenced if the passed
+// in pointer was not nil. Returns a uint8 zero value if the
+// pointer was nil.
+func ToUint8(p *uint8) (v uint8) {
+ return ptr.ToUint8(p)
+}
+
+// ToUint8Slice returns a slice of uint8 values, that are
+// dereferenced if the passed in pointer was not nil. Returns a uint8
+// zero value if the pointer was nil.
+func ToUint8Slice(vs []*uint8) []uint8 {
+ return ptr.ToUint8Slice(vs)
+}
+
+// ToUint8Map returns a map of uint8 values, that are
+// dereferenced if the passed in pointer was not nil. The uint8
+// zero value is used if the pointer was nil.
+func ToUint8Map(vs map[string]*uint8) map[string]uint8 {
+ return ptr.ToUint8Map(vs)
+}
+
+// ToUint16 returns uint16 value dereferenced if the passed
+// in pointer was not nil. Returns a uint16 zero value if the
+// pointer was nil.
+func ToUint16(p *uint16) (v uint16) {
+ return ptr.ToUint16(p)
+}
+
+// ToUint16Slice returns a slice of uint16 values, that are
+// dereferenced if the passed in pointer was not nil. Returns a uint16
+// zero value if the pointer was nil.
+func ToUint16Slice(vs []*uint16) []uint16 {
+ return ptr.ToUint16Slice(vs)
+}
+
+// ToUint16Map returns a map of uint16 values, that are
+// dereferenced if the passed in pointer was not nil. The uint16
+// zero value is used if the pointer was nil.
+func ToUint16Map(vs map[string]*uint16) map[string]uint16 {
+ return ptr.ToUint16Map(vs)
+}
+
+// ToUint32 returns uint32 value dereferenced if the passed
+// in pointer was not nil. Returns a uint32 zero value if the
+// pointer was nil.
+func ToUint32(p *uint32) (v uint32) {
+ return ptr.ToUint32(p)
+}
+
+// ToUint32Slice returns a slice of uint32 values, that are
+// dereferenced if the passed in pointer was not nil. Returns a uint32
+// zero value if the pointer was nil.
+func ToUint32Slice(vs []*uint32) []uint32 {
+ return ptr.ToUint32Slice(vs)
+}
+
+// ToUint32Map returns a map of uint32 values, that are
+// dereferenced if the passed in pointer was not nil. The uint32
+// zero value is used if the pointer was nil.
+func ToUint32Map(vs map[string]*uint32) map[string]uint32 {
+ return ptr.ToUint32Map(vs)
+}
+
+// ToUint64 returns uint64 value dereferenced if the passed
+// in pointer was not nil. Returns a uint64 zero value if the
+// pointer was nil.
+func ToUint64(p *uint64) (v uint64) {
+ return ptr.ToUint64(p)
+}
+
+// ToUint64Slice returns a slice of uint64 values, that are
+// dereferenced if the passed in pointer was not nil. Returns a uint64
+// zero value if the pointer was nil.
+func ToUint64Slice(vs []*uint64) []uint64 {
+ return ptr.ToUint64Slice(vs)
+}
+
+// ToUint64Map returns a map of uint64 values, that are
+// dereferenced if the passed in pointer was not nil. The uint64
+// zero value is used if the pointer was nil.
+func ToUint64Map(vs map[string]*uint64) map[string]uint64 {
+ return ptr.ToUint64Map(vs)
+}
+
+// ToFloat32 returns float32 value dereferenced if the passed
+// in pointer was not nil. Returns a float32 zero value if the
+// pointer was nil.
+func ToFloat32(p *float32) (v float32) {
+ return ptr.ToFloat32(p)
+}
+
+// ToFloat32Slice returns a slice of float32 values, that are
+// dereferenced if the passed in pointer was not nil. Returns a float32
+// zero value if the pointer was nil.
+func ToFloat32Slice(vs []*float32) []float32 {
+ return ptr.ToFloat32Slice(vs)
+}
+
+// ToFloat32Map returns a map of float32 values, that are
+// dereferenced if the passed in pointer was not nil. The float32
+// zero value is used if the pointer was nil.
+func ToFloat32Map(vs map[string]*float32) map[string]float32 {
+ return ptr.ToFloat32Map(vs)
+}
+
+// ToFloat64 returns float64 value dereferenced if the passed
+// in pointer was not nil. Returns a float64 zero value if the
+// pointer was nil.
+func ToFloat64(p *float64) (v float64) {
+ return ptr.ToFloat64(p)
+}
+
+// ToFloat64Slice returns a slice of float64 values, that are
+// dereferenced if the passed in pointer was not nil. Returns a float64
+// zero value if the pointer was nil.
+func ToFloat64Slice(vs []*float64) []float64 {
+ return ptr.ToFloat64Slice(vs)
+}
+
+// ToFloat64Map returns a map of float64 values, that are
+// dereferenced if the passed in pointer was not nil. The float64
+// zero value is used if the pointer was nil.
+func ToFloat64Map(vs map[string]*float64) map[string]float64 {
+ return ptr.ToFloat64Map(vs)
+}
+
+// ToTime returns time.Time value dereferenced if the passed
+// in pointer was not nil. Returns a time.Time zero value if the
+// pointer was nil.
+func ToTime(p *time.Time) (v time.Time) {
+ return ptr.ToTime(p)
+}
+
+// ToTimeSlice returns a slice of time.Time values, that are
+// dereferenced if the passed in pointer was not nil. Returns a time.Time
+// zero value if the pointer was nil.
+func ToTimeSlice(vs []*time.Time) []time.Time {
+ return ptr.ToTimeSlice(vs)
+}
+
+// ToTimeMap returns a map of time.Time values, that are
+// dereferenced if the passed in pointer was not nil. The time.Time
+// zero value is used if the pointer was nil.
+func ToTimeMap(vs map[string]*time.Time) map[string]time.Time {
+ return ptr.ToTimeMap(vs)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/logging.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/logging.go
new file mode 100644
index 000000000..f6abb0a63
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/logging.go
@@ -0,0 +1,84 @@
+// Code generated by aws/logging_generate.go DO NOT EDIT.
+
+package aws
+
+// ClientLogMode represents the logging mode of SDK clients. The client logging mode is a bit-field where
+// each bit is a flag that describes the logging behavior for one or more client components.
+// The entire 64-bit group is reserved for later expansion by the SDK.
+//
+// Example: Setting ClientLogMode to enable logging of retries and requests
+// clientLogMode := aws.LogRetries | aws.LogRequest
+//
+// Example: Adding an additional log mode to an existing ClientLogMode value
+// clientLogMode |= aws.LogResponse
+type ClientLogMode uint64
+
+// Supported ClientLogMode bits that can be configured to toggle logging of specific SDK events.
+const (
+ LogSigning ClientLogMode = 1 << (64 - 1 - iota)
+ LogRetries
+ LogRequest
+ LogRequestWithBody
+ LogResponse
+ LogResponseWithBody
+)
+
+// IsSigning returns whether the Signing logging mode bit is set
+func (m ClientLogMode) IsSigning() bool {
+ return m&LogSigning != 0
+}
+
+// IsRetries returns whether the Retries logging mode bit is set
+func (m ClientLogMode) IsRetries() bool {
+ return m&LogRetries != 0
+}
+
+// IsRequest returns whether the Request logging mode bit is set
+func (m ClientLogMode) IsRequest() bool {
+ return m&LogRequest != 0
+}
+
+// IsRequestWithBody returns whether the RequestWithBody logging mode bit is set
+func (m ClientLogMode) IsRequestWithBody() bool {
+ return m&LogRequestWithBody != 0
+}
+
+// IsResponse returns whether the Response logging mode bit is set
+func (m ClientLogMode) IsResponse() bool {
+ return m&LogResponse != 0
+}
+
+// IsResponseWithBody returns whether the ResponseWithBody logging mode bit is set
+func (m ClientLogMode) IsResponseWithBody() bool {
+ return m&LogResponseWithBody != 0
+}
+
+// ClearSigning clears the Signing logging mode bit
+func (m *ClientLogMode) ClearSigning() {
+ *m &^= LogSigning
+}
+
+// ClearRetries clears the Retries logging mode bit
+func (m *ClientLogMode) ClearRetries() {
+ *m &^= LogRetries
+}
+
+// ClearRequest clears the Request logging mode bit
+func (m *ClientLogMode) ClearRequest() {
+ *m &^= LogRequest
+}
+
+// ClearRequestWithBody clears the RequestWithBody logging mode bit
+func (m *ClientLogMode) ClearRequestWithBody() {
+ *m &^= LogRequestWithBody
+}
+
+// ClearResponse clears the Response logging mode bit
+func (m *ClientLogMode) ClearResponse() {
+ *m &^= LogResponse
+}
+
+// ClearResponseWithBody clears the ResponseWithBody logging mode bit
+func (m *ClientLogMode) ClearResponseWithBody() {
+ *m &^= LogResponseWithBody
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/logging_generate.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/logging_generate.go
new file mode 100644
index 000000000..522bbadea
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/logging_generate.go
@@ -0,0 +1,77 @@
+// +build clientlogmode
+
+package main
+
+import (
+ "log"
+ "os"
+ "text/template"
+)
+
+var config = struct {
+ ModeBits []string
+}{
+ // Items should be appended only to keep bit-flag positions stable
+ ModeBits: []string{
+ "Signing",
+ "Retries",
+ "Request",
+ "RequestWithBody",
+ "Response",
+ "ResponseWithBody",
+ },
+}
+
+var tmpl = template.Must(template.New("ClientLogMode").Funcs(map[string]interface{}{
+ "symbolName": func(name string) string {
+ return "Log" + name
+ },
+}).Parse(`// Code generated by aws/logging_generate.go DO NOT EDIT.
+
+package aws
+
+// ClientLogMode represents the logging mode of SDK clients. The client logging mode is a bit-field where
+// each bit is a flag that describes the logging behavior for one or more client components.
+// The entire 64-bit group is reserved for later expansion by the SDK.
+//
+// Example: Setting ClientLogMode to enable logging of retries and requests
+// clientLogMode := aws.LogRetries | aws.LogRequest
+//
+// Example: Adding an additional log mode to an existing ClientLogMode value
+// clientLogMode |= aws.LogResponse
+type ClientLogMode uint64
+
+// Supported ClientLogMode bits that can be configured to toggle logging of specific SDK events.
+const (
+{{- range $index, $field := .ModeBits }}
+ {{ (symbolName $field) }}{{- if (eq 0 $index) }} ClientLogMode = 1 << (64 - 1 - iota){{- end }}
+{{- end }}
+)
+
+{{ range $_, $field := .ModeBits }}
+// Is{{- $field }} returns whether the {{ $field }} logging mode bit is set
+func (m ClientLogMode) Is{{- $field }}() bool {
+ return m&{{- (symbolName $field) }} != 0
+}
+{{ end }}
+
+{{ range $_, $field := .ModeBits }}
+// Clear{{- $field }} clears the {{ $field }} logging mode bit
+func (m *ClientLogMode) Clear{{- $field }}() {
+ *m &^= {{- (symbolName $field) }}
+}
+{{ end }}
+`))
+
+func main() {
+ file, err := os.Create("logging.go")
+ if err != nil {
+ log.Fatal(err)
+ }
+ defer file.Close()
+
+ err = tmpl.Execute(file, config)
+ if err != nil {
+ log.Fatal(err)
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/metadata.go
new file mode 100644
index 000000000..282012174
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/metadata.go
@@ -0,0 +1,167 @@
+package middleware
+
+import (
+ "context"
+ "github.com/aws/aws-sdk-go-v2/aws"
+
+ "github.com/aws/smithy-go/middleware"
+)
+
+// RegisterServiceMetadata registers metadata about the service and operation into the middleware context
+// so that it is available at runtime for other middleware to introspect.
+type RegisterServiceMetadata struct {
+ ServiceID string
+ SigningName string
+ Region string
+ OperationName string
+}
+
+// ID returns the middleware identifier.
+func (s *RegisterServiceMetadata) ID() string {
+ return "RegisterServiceMetadata"
+}
+
+// HandleInitialize registers service metadata information into the middleware context, allowing for introspection.
+func (s RegisterServiceMetadata) HandleInitialize(
+ ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler,
+) (out middleware.InitializeOutput, metadata middleware.Metadata, err error) {
+ if len(s.ServiceID) > 0 {
+ ctx = SetServiceID(ctx, s.ServiceID)
+ }
+ if len(s.SigningName) > 0 {
+ ctx = SetSigningName(ctx, s.SigningName)
+ }
+ if len(s.Region) > 0 {
+ ctx = setRegion(ctx, s.Region)
+ }
+ if len(s.OperationName) > 0 {
+ ctx = setOperationName(ctx, s.OperationName)
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+// service metadata keys for storing and lookup of runtime stack information.
+type (
+ serviceIDKey struct{}
+ signingNameKey struct{}
+ signingRegionKey struct{}
+ regionKey struct{}
+ operationNameKey struct{}
+ partitionIDKey struct{}
+)
+
+// GetServiceID retrieves the service id from the context.
+//
+// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues
+// to clear all stack values.
+func GetServiceID(ctx context.Context) (v string) {
+ v, _ = middleware.GetStackValue(ctx, serviceIDKey{}).(string)
+ return v
+}
+
+// GetSigningName retrieves the service signing name from the context.
+//
+// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues
+// to clear all stack values.
+func GetSigningName(ctx context.Context) (v string) {
+ v, _ = middleware.GetStackValue(ctx, signingNameKey{}).(string)
+ return v
+}
+
+// GetSigningRegion retrieves the region from the context.
+//
+// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues
+// to clear all stack values.
+func GetSigningRegion(ctx context.Context) (v string) {
+ v, _ = middleware.GetStackValue(ctx, signingRegionKey{}).(string)
+ return v
+}
+
+// GetRegion retrieves the endpoint region from the context.
+//
+// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues
+// to clear all stack values.
+func GetRegion(ctx context.Context) (v string) {
+ v, _ = middleware.GetStackValue(ctx, regionKey{}).(string)
+ return v
+}
+
+// GetOperationName retrieves the service operation metadata from the context.
+//
+// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues
+// to clear all stack values.
+func GetOperationName(ctx context.Context) (v string) {
+ v, _ = middleware.GetStackValue(ctx, operationNameKey{}).(string)
+ return v
+}
+
+// GetPartitionID retrieves the endpoint partition id from the context.
+//
+// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues
+// to clear all stack values.
+func GetPartitionID(ctx context.Context) string {
+ v, _ := middleware.GetStackValue(ctx, partitionIDKey{}).(string)
+ return v
+}
+
+// SetSigningName set or modifies the signing name on the context.
+//
+// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues
+// to clear all stack values.
+func SetSigningName(ctx context.Context, value string) context.Context {
+ return middleware.WithStackValue(ctx, signingNameKey{}, value)
+}
+
+// SetSigningRegion sets or modifies the region on the context.
+//
+// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues
+// to clear all stack values.
+func SetSigningRegion(ctx context.Context, value string) context.Context {
+ return middleware.WithStackValue(ctx, signingRegionKey{}, value)
+}
+
+// SetServiceID sets the service id on the context.
+//
+// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues
+// to clear all stack values.
+func SetServiceID(ctx context.Context, value string) context.Context {
+ return middleware.WithStackValue(ctx, serviceIDKey{}, value)
+}
+
+// setRegion sets the endpoint region on the context.
+//
+// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues
+// to clear all stack values.
+func setRegion(ctx context.Context, value string) context.Context {
+ return middleware.WithStackValue(ctx, regionKey{}, value)
+}
+
+// setOperationName sets the service operation on the context.
+//
+// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues
+// to clear all stack values.
+func setOperationName(ctx context.Context, value string) context.Context {
+ return middleware.WithStackValue(ctx, operationNameKey{}, value)
+}
+
+// SetPartitionID sets the partition id of a resolved region on the context
+//
+// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues
+// to clear all stack values.
+func SetPartitionID(ctx context.Context, value string) context.Context {
+ return middleware.WithStackValue(ctx, partitionIDKey{}, value)
+}
+
+// EndpointSource key
+type endpointSourceKey struct{}
+
+// GetEndpointSource returns an endpoint source if set on context
+func GetEndpointSource(ctx context.Context) (v aws.EndpointSource) {
+ v, _ = middleware.GetStackValue(ctx, endpointSourceKey{}).(aws.EndpointSource)
+ return v
+}
+
+// SetEndpointSource sets endpoint source on context
+func SetEndpointSource(ctx context.Context, value aws.EndpointSource) context.Context {
+ return middleware.WithStackValue(ctx, endpointSourceKey{}, value)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/middleware.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/middleware.go
new file mode 100644
index 000000000..9bd0dfb15
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/middleware.go
@@ -0,0 +1,168 @@
+package middleware
+
+import (
+ "context"
+ "fmt"
+ "time"
+
+ "github.com/aws/aws-sdk-go-v2/internal/rand"
+ "github.com/aws/aws-sdk-go-v2/internal/sdk"
+ "github.com/aws/smithy-go/logging"
+ "github.com/aws/smithy-go/middleware"
+ smithyrand "github.com/aws/smithy-go/rand"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// ClientRequestID is a Smithy BuildMiddleware that will generate a unique ID for logical API operation
+// invocation.
+type ClientRequestID struct{}
+
+// ID the identifier for the ClientRequestID
+func (r *ClientRequestID) ID() string {
+ return "ClientRequestID"
+}
+
+// HandleBuild attaches a unique operation invocation id for the operation to the request
+func (r ClientRequestID) HandleBuild(ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler) (
+ out middleware.BuildOutput, metadata middleware.Metadata, err error,
+) {
+ req, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown transport type %T", req)
+ }
+
+ invocationID, err := smithyrand.NewUUID(rand.Reader).GetUUID()
+ if err != nil {
+ return out, metadata, err
+ }
+
+ const invocationIDHeader = "Amz-Sdk-Invocation-Id"
+ req.Header[invocationIDHeader] = append(req.Header[invocationIDHeader][:0], invocationID)
+
+ return next.HandleBuild(ctx, in)
+}
+
+// RecordResponseTiming records the response timing for the SDK client requests.
+type RecordResponseTiming struct{}
+
+// ID is the middleware identifier
+func (a *RecordResponseTiming) ID() string {
+ return "RecordResponseTiming"
+}
+
+// HandleDeserialize calculates response metadata and clock skew
+func (a RecordResponseTiming) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ responseAt := sdk.NowTime()
+ setResponseAt(&metadata, responseAt)
+
+ var serverTime time.Time
+
+ switch resp := out.RawResponse.(type) {
+ case *smithyhttp.Response:
+ respDateHeader := resp.Header.Get("Date")
+ if len(respDateHeader) == 0 {
+ break
+ }
+ var parseErr error
+ serverTime, parseErr = smithyhttp.ParseTime(respDateHeader)
+ if parseErr != nil {
+ logger := middleware.GetLogger(ctx)
+ logger.Logf(logging.Warn, "failed to parse response Date header value, got %v",
+ parseErr.Error())
+ break
+ }
+ setServerTime(&metadata, serverTime)
+ }
+
+ if !serverTime.IsZero() {
+ attemptSkew := serverTime.Sub(responseAt)
+ setAttemptSkew(&metadata, attemptSkew)
+ }
+
+ return out, metadata, err
+}
+
+type responseAtKey struct{}
+
+// GetResponseAt returns the time response was received at.
+func GetResponseAt(metadata middleware.Metadata) (v time.Time, ok bool) {
+ v, ok = metadata.Get(responseAtKey{}).(time.Time)
+ return v, ok
+}
+
+// setResponseAt sets the response time on the metadata.
+func setResponseAt(metadata *middleware.Metadata, v time.Time) {
+ metadata.Set(responseAtKey{}, v)
+}
+
+type serverTimeKey struct{}
+
+// GetServerTime returns the server time for response.
+func GetServerTime(metadata middleware.Metadata) (v time.Time, ok bool) {
+ v, ok = metadata.Get(serverTimeKey{}).(time.Time)
+ return v, ok
+}
+
+// setServerTime sets the server time on the metadata.
+func setServerTime(metadata *middleware.Metadata, v time.Time) {
+ metadata.Set(serverTimeKey{}, v)
+}
+
+type attemptSkewKey struct{}
+
+// GetAttemptSkew returns Attempt clock skew for response from metadata.
+func GetAttemptSkew(metadata middleware.Metadata) (v time.Duration, ok bool) {
+ v, ok = metadata.Get(attemptSkewKey{}).(time.Duration)
+ return v, ok
+}
+
+// setAttemptSkew sets the attempt clock skew on the metadata.
+func setAttemptSkew(metadata *middleware.Metadata, v time.Duration) {
+ metadata.Set(attemptSkewKey{}, v)
+}
+
+// AddClientRequestIDMiddleware adds ClientRequestID to the middleware stack
+func AddClientRequestIDMiddleware(stack *middleware.Stack) error {
+ return stack.Build.Add(&ClientRequestID{}, middleware.After)
+}
+
+// AddRecordResponseTiming adds RecordResponseTiming middleware to the
+// middleware stack.
+func AddRecordResponseTiming(stack *middleware.Stack) error {
+ return stack.Deserialize.Add(&RecordResponseTiming{}, middleware.After)
+}
+
+// rawResponseKey is the accessor key used to store and access the
+// raw response within the response metadata.
+type rawResponseKey struct{}
+
+// addRawResponse middleware adds raw response on to the metadata
+type addRawResponse struct{}
+
+// ID the identifier for the ClientRequestID
+func (m *addRawResponse) ID() string {
+ return "AddRawResponseToMetadata"
+}
+
+// HandleDeserialize adds raw response on the middleware metadata
+func (m addRawResponse) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ metadata.Set(rawResponseKey{}, out.RawResponse)
+ return out, metadata, err
+}
+
+// AddRawResponseToMetadata adds middleware to the middleware stack that
+// store raw response on to the metadata.
+func AddRawResponseToMetadata(stack *middleware.Stack) error {
+ return stack.Deserialize.Add(&addRawResponse{}, middleware.Before)
+}
+
+// GetRawResponse returns raw response set on metadata
+func GetRawResponse(metadata middleware.Metadata) interface{} {
+ return metadata.Get(rawResponseKey{})
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/request_id.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/request_id.go
new file mode 100644
index 000000000..dd3391fe4
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/request_id.go
@@ -0,0 +1,27 @@
+package middleware
+
+import (
+ "github.com/aws/smithy-go/middleware"
+)
+
+// requestIDKey is used to retrieve request id from response metadata
+type requestIDKey struct{}
+
+// SetRequestIDMetadata sets the provided request id over middleware metadata
+func SetRequestIDMetadata(metadata *middleware.Metadata, id string) {
+ metadata.Set(requestIDKey{}, id)
+}
+
+// GetRequestIDMetadata retrieves the request id from middleware metadata
+// returns string and bool indicating value of request id, whether request id was set.
+func GetRequestIDMetadata(metadata middleware.Metadata) (string, bool) {
+ if !metadata.Has(requestIDKey{}) {
+ return "", false
+ }
+
+ v, ok := metadata.Get(requestIDKey{}).(string)
+ if !ok {
+ return "", true
+ }
+ return v, true
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/request_id_retriever.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/request_id_retriever.go
new file mode 100644
index 000000000..7ce48c611
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/request_id_retriever.go
@@ -0,0 +1,49 @@
+package middleware
+
+import (
+ "context"
+
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// AddRequestIDRetrieverMiddleware adds request id retriever middleware
+func AddRequestIDRetrieverMiddleware(stack *middleware.Stack) error {
+ // add error wrapper middleware before operation deserializers so that it can wrap the error response
+ // returned by operation deserializers
+ return stack.Deserialize.Insert(&requestIDRetriever{}, "OperationDeserializer", middleware.Before)
+}
+
+type requestIDRetriever struct {
+}
+
+// ID returns the middleware identifier
+func (m *requestIDRetriever) ID() string {
+ return "RequestIDRetriever"
+}
+
+func (m *requestIDRetriever) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+
+ resp, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ // No raw response to wrap with.
+ return out, metadata, err
+ }
+
+ // Different header which can map to request id
+ requestIDHeaderList := []string{"X-Amzn-Requestid", "X-Amz-RequestId"}
+
+ for _, h := range requestIDHeaderList {
+ // check for headers known to contain Request id
+ if v := resp.Header.Get(h); len(v) != 0 {
+ // set reqID on metadata for successful responses.
+ SetRequestIDMetadata(&metadata, v)
+ break
+ }
+ }
+
+ return out, metadata, err
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/user_agent.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/user_agent.go
new file mode 100644
index 000000000..80418d72a
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/user_agent.go
@@ -0,0 +1,256 @@
+package middleware
+
+import (
+ "context"
+ "fmt"
+ "os"
+ "runtime"
+ "strings"
+
+ "github.com/aws/aws-sdk-go-v2/aws"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+var languageVersion = strings.TrimPrefix(runtime.Version(), "go")
+
+// SDKAgentKeyType is the metadata type to add to the SDK agent string
+type SDKAgentKeyType int
+
+// The set of valid SDKAgentKeyType constants. If an unknown value is assigned for SDKAgentKeyType it will
+// be mapped to AdditionalMetadata.
+const (
+ _ SDKAgentKeyType = iota
+ APIMetadata
+ OperatingSystemMetadata
+ LanguageMetadata
+ EnvironmentMetadata
+ FeatureMetadata
+ ConfigMetadata
+ FrameworkMetadata
+ AdditionalMetadata
+ ApplicationIdentifier
+)
+
+func (k SDKAgentKeyType) string() string {
+ switch k {
+ case APIMetadata:
+ return "api"
+ case OperatingSystemMetadata:
+ return "os"
+ case LanguageMetadata:
+ return "lang"
+ case EnvironmentMetadata:
+ return "exec-env"
+ case FeatureMetadata:
+ return "ft"
+ case ConfigMetadata:
+ return "cfg"
+ case FrameworkMetadata:
+ return "lib"
+ case ApplicationIdentifier:
+ return "app"
+ case AdditionalMetadata:
+ fallthrough
+ default:
+ return "md"
+ }
+}
+
+const execEnvVar = `AWS_EXECUTION_ENV`
+
+// requestUserAgent is a build middleware that set the User-Agent for the request.
+type requestUserAgent struct {
+ sdkAgent, userAgent *smithyhttp.UserAgentBuilder
+}
+
+// newRequestUserAgent returns a new requestUserAgent which will set the User-Agent and X-Amz-User-Agent for the
+// request.
+//
+// User-Agent example:
+// aws-sdk-go-v2/1.2.3
+//
+// X-Amz-User-Agent example:
+// aws-sdk-go-v2/1.2.3 md/GOOS/linux md/GOARCH/amd64 lang/go/1.15
+func newRequestUserAgent() *requestUserAgent {
+ userAgent, sdkAgent := smithyhttp.NewUserAgentBuilder(), smithyhttp.NewUserAgentBuilder()
+ addProductName(userAgent)
+ addProductName(sdkAgent)
+
+ r := &requestUserAgent{
+ sdkAgent: sdkAgent,
+ userAgent: userAgent,
+ }
+
+ addSDKMetadata(r)
+
+ return r
+}
+
+func getNormalizedOSName() (os string) {
+ switch runtime.GOOS {
+ case "android":
+ os = "android"
+ case "linux":
+ os = "linux"
+ case "windows":
+ os = "windows"
+ case "darwin":
+ // Due to Apple M1 we can't distinguish between macOS and iOS when GOOS/GOARCH is darwin/amd64
+ // For now declare this as "other" until we have a better detection mechanism.
+ fallthrough
+ default:
+ os = "other"
+ }
+ return os
+}
+
+func addSDKMetadata(r *requestUserAgent) {
+ r.AddSDKAgentKey(OperatingSystemMetadata, getNormalizedOSName())
+ r.AddSDKAgentKeyValue(LanguageMetadata, "go", languageVersion)
+ r.AddSDKAgentKeyValue(AdditionalMetadata, "GOOS", runtime.GOOS)
+ r.AddSDKAgentKeyValue(AdditionalMetadata, "GOARCH", runtime.GOARCH)
+ if ev := os.Getenv(execEnvVar); len(ev) > 0 {
+ r.AddSDKAgentKey(EnvironmentMetadata, ev)
+ }
+}
+
+func addProductName(builder *smithyhttp.UserAgentBuilder) {
+ builder.AddKeyValue(aws.SDKName, aws.SDKVersion)
+}
+
+// AddUserAgentKey retrieves a requestUserAgent from the provided stack, or initializes one.
+func AddUserAgentKey(key string) func(*middleware.Stack) error {
+ return func(stack *middleware.Stack) error {
+ requestUserAgent, err := getOrAddRequestUserAgent(stack)
+ if err != nil {
+ return err
+ }
+ requestUserAgent.AddUserAgentKey(key)
+ return nil
+ }
+}
+
+// AddUserAgentKeyValue retrieves a requestUserAgent from the provided stack, or initializes one.
+func AddUserAgentKeyValue(key, value string) func(*middleware.Stack) error {
+ return func(stack *middleware.Stack) error {
+ requestUserAgent, err := getOrAddRequestUserAgent(stack)
+ if err != nil {
+ return err
+ }
+ requestUserAgent.AddUserAgentKeyValue(key, value)
+ return nil
+ }
+}
+
+// AddSDKAgentKey retrieves a requestUserAgent from the provided stack, or initializes one.
+func AddSDKAgentKey(keyType SDKAgentKeyType, key string) func(*middleware.Stack) error {
+ return func(stack *middleware.Stack) error {
+ requestUserAgent, err := getOrAddRequestUserAgent(stack)
+ if err != nil {
+ return err
+ }
+ requestUserAgent.AddSDKAgentKey(keyType, key)
+ return nil
+ }
+}
+
+// AddSDKAgentKeyValue retrieves a requestUserAgent from the provided stack, or initializes one.
+func AddSDKAgentKeyValue(keyType SDKAgentKeyType, key, value string) func(*middleware.Stack) error {
+ return func(stack *middleware.Stack) error {
+ requestUserAgent, err := getOrAddRequestUserAgent(stack)
+ if err != nil {
+ return err
+ }
+ requestUserAgent.AddSDKAgentKeyValue(keyType, key, value)
+ return nil
+ }
+}
+
+// AddRequestUserAgentMiddleware registers a requestUserAgent middleware on the stack if not present.
+func AddRequestUserAgentMiddleware(stack *middleware.Stack) error {
+ _, err := getOrAddRequestUserAgent(stack)
+ return err
+}
+
+func getOrAddRequestUserAgent(stack *middleware.Stack) (*requestUserAgent, error) {
+ id := (*requestUserAgent)(nil).ID()
+ bm, ok := stack.Build.Get(id)
+ if !ok {
+ bm = newRequestUserAgent()
+ err := stack.Build.Add(bm, middleware.After)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ requestUserAgent, ok := bm.(*requestUserAgent)
+ if !ok {
+ return nil, fmt.Errorf("%T for %s middleware did not match expected type", bm, id)
+ }
+
+ return requestUserAgent, nil
+}
+
+// AddUserAgentKey adds the component identified by name to the User-Agent string.
+func (u *requestUserAgent) AddUserAgentKey(key string) {
+ u.userAgent.AddKey(key)
+}
+
+// AddUserAgentKeyValue adds the key identified by the given name and value to the User-Agent string.
+func (u *requestUserAgent) AddUserAgentKeyValue(key, value string) {
+ u.userAgent.AddKeyValue(key, value)
+}
+
+// AddUserAgentKey adds the component identified by name to the User-Agent string.
+func (u *requestUserAgent) AddSDKAgentKey(keyType SDKAgentKeyType, key string) {
+ u.sdkAgent.AddKey(keyType.string() + "/" + key)
+}
+
+// AddUserAgentKeyValue adds the key identified by the given name and value to the User-Agent string.
+func (u *requestUserAgent) AddSDKAgentKeyValue(keyType SDKAgentKeyType, key, value string) {
+ u.sdkAgent.AddKeyValue(keyType.string()+"/"+key, value)
+}
+
+// ID the name of the middleware.
+func (u *requestUserAgent) ID() string {
+ return "UserAgent"
+}
+
+// HandleBuild adds or appends the constructed user agent to the request.
+func (u *requestUserAgent) HandleBuild(ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler) (
+ out middleware.BuildOutput, metadata middleware.Metadata, err error,
+) {
+ switch req := in.Request.(type) {
+ case *smithyhttp.Request:
+ u.addHTTPUserAgent(req)
+ u.addHTTPSDKAgent(req)
+ default:
+ return out, metadata, fmt.Errorf("unknown transport type %T", in)
+ }
+
+ return next.HandleBuild(ctx, in)
+}
+
+func (u *requestUserAgent) addHTTPUserAgent(request *smithyhttp.Request) {
+ const userAgent = "User-Agent"
+ updateHTTPHeader(request, userAgent, u.userAgent.Build())
+}
+
+func (u *requestUserAgent) addHTTPSDKAgent(request *smithyhttp.Request) {
+ const sdkAgent = "X-Amz-User-Agent"
+ updateHTTPHeader(request, sdkAgent, u.sdkAgent.Build())
+}
+
+func updateHTTPHeader(request *smithyhttp.Request, header string, value string) {
+ var current string
+ if v := request.Header[header]; len(v) > 0 {
+ current = v[0]
+ }
+ if len(current) > 0 {
+ current = value + " " + current
+ } else {
+ current = value
+ }
+ request.Header[header] = append(request.Header[header][:0], current)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/array.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/array.go
new file mode 100644
index 000000000..77dd4d8db
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/array.go
@@ -0,0 +1,61 @@
+package query
+
+import (
+ "fmt"
+ "net/url"
+)
+
+// Array represents the encoding of Query lists and sets. A Query array is a
+// representation of a list of values of a fixed type. A serialized array might
+// look like the following:
+//
+// ListName.member.1=foo
+// &ListName.member.2=bar
+// &Listname.member.3=baz
+type Array struct {
+ // The query values to add the array to.
+ values url.Values
+ // The array's prefix, which includes the names of all parent structures
+ // and ends with the name of the list. For example, the prefix might be
+ // "ParentStructure.ListName". This prefix will be used to form the full
+ // keys for each element in the list. For example, an entry might have the
+ // key "ParentStructure.ListName.member.MemberName.1".
+ //
+ // While this is currently represented as a string that gets added to, it
+ // could also be represented as a stack that only gets condensed into a
+ // string when a finalized key is created. This could potentially reduce
+ // allocations.
+ prefix string
+ // Whether the list is flat or not. A list that is not flat will produce the
+ // following entry to the url.Values for a given entry:
+ // ListName.MemberName.1=value
+ // A list that is flat will produce the following:
+ // ListName.1=value
+ flat bool
+ // The location name of the member. In most cases this should be "member".
+ memberName string
+ // Elements are stored in values, so we keep track of the list size here.
+ size int32
+}
+
+func newArray(values url.Values, prefix string, flat bool, memberName string) *Array {
+ return &Array{
+ values: values,
+ prefix: prefix,
+ flat: flat,
+ memberName: memberName,
+ }
+}
+
+// Value adds a new element to the Query Array. Returns a Value type used to
+// encode the array element.
+func (a *Array) Value() Value {
+ // Query lists start a 1, so adjust the size first
+ a.size++
+ prefix := a.prefix
+ if !a.flat {
+ prefix = fmt.Sprintf("%s.%s", prefix, a.memberName)
+ }
+ // Lists can't have flat members
+ return newValue(a.values, fmt.Sprintf("%s.%d", prefix, a.size), false)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/encoder.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/encoder.go
new file mode 100644
index 000000000..2ecf9241c
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/encoder.go
@@ -0,0 +1,80 @@
+package query
+
+import (
+ "io"
+ "net/url"
+ "sort"
+)
+
+// Encoder is a Query encoder that supports construction of Query body
+// values using methods.
+type Encoder struct {
+ // The query values that will be built up to manage encoding.
+ values url.Values
+ // The writer that the encoded body will be written to.
+ writer io.Writer
+ Value
+}
+
+// NewEncoder returns a new Query body encoder
+func NewEncoder(writer io.Writer) *Encoder {
+ values := url.Values{}
+ return &Encoder{
+ values: values,
+ writer: writer,
+ Value: newBaseValue(values),
+ }
+}
+
+// Encode returns the []byte slice representing the current
+// state of the Query encoder.
+func (e Encoder) Encode() error {
+ ws, ok := e.writer.(interface{ WriteString(string) (int, error) })
+ if !ok {
+ // Fall back to less optimal byte slice casting if WriteString isn't available.
+ ws = &wrapWriteString{writer: e.writer}
+ }
+
+ // Get the keys and sort them to have a stable output
+ keys := make([]string, 0, len(e.values))
+ for k := range e.values {
+ keys = append(keys, k)
+ }
+ sort.Strings(keys)
+ isFirstEntry := true
+ for _, key := range keys {
+ queryValues := e.values[key]
+ escapedKey := url.QueryEscape(key)
+ for _, value := range queryValues {
+ if !isFirstEntry {
+ if _, err := ws.WriteString(`&`); err != nil {
+ return err
+ }
+ } else {
+ isFirstEntry = false
+ }
+ if _, err := ws.WriteString(escapedKey); err != nil {
+ return err
+ }
+ if _, err := ws.WriteString(`=`); err != nil {
+ return err
+ }
+ if _, err := ws.WriteString(url.QueryEscape(value)); err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+// wrapWriteString wraps an io.Writer to provide a WriteString method
+// where one is not available.
+type wrapWriteString struct {
+ writer io.Writer
+}
+
+// WriteString writes a string to the wrapped writer by casting it to
+// a byte array first.
+func (w wrapWriteString) WriteString(v string) (int, error) {
+ return w.writer.Write([]byte(v))
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/map.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/map.go
new file mode 100644
index 000000000..ab91e357b
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/map.go
@@ -0,0 +1,78 @@
+package query
+
+import (
+ "fmt"
+ "net/url"
+)
+
+// Map represents the encoding of Query maps. A Query map is a representation
+// of a mapping of arbitrary string keys to arbitrary values of a fixed type.
+// A Map differs from an Object in that the set of keys is not fixed, in that
+// the values must all be of the same type, and that map entries are ordered.
+// A serialized map might look like the following:
+//
+// MapName.entry.1.key=Foo
+// &MapName.entry.1.value=spam
+// &MapName.entry.2.key=Bar
+// &MapName.entry.2.value=eggs
+type Map struct {
+ // The query values to add the map to.
+ values url.Values
+ // The map's prefix, which includes the names of all parent structures
+ // and ends with the name of the object. For example, the prefix might be
+ // "ParentStructure.MapName". This prefix will be used to form the full
+ // keys for each key-value pair of the map. For example, a value might have
+ // the key "ParentStructure.MapName.1.value".
+ //
+ // While this is currently represented as a string that gets added to, it
+ // could also be represented as a stack that only gets condensed into a
+ // string when a finalized key is created. This could potentially reduce
+ // allocations.
+ prefix string
+ // Whether the map is flat or not. A map that is not flat will produce the
+ // following entries to the url.Values for a given key-value pair:
+ // MapName.entry.1.KeyLocationName=mykey
+ // MapName.entry.1.ValueLocationName=myvalue
+ // A map that is flat will produce the following:
+ // MapName.1.KeyLocationName=mykey
+ // MapName.1.ValueLocationName=myvalue
+ flat bool
+ // The location name of the key. In most cases this should be "key".
+ keyLocationName string
+ // The location name of the value. In most cases this should be "value".
+ valueLocationName string
+ // Elements are stored in values, so we keep track of the list size here.
+ size int32
+}
+
+func newMap(values url.Values, prefix string, flat bool, keyLocationName string, valueLocationName string) *Map {
+ return &Map{
+ values: values,
+ prefix: prefix,
+ flat: flat,
+ keyLocationName: keyLocationName,
+ valueLocationName: valueLocationName,
+ }
+}
+
+// Key adds the given named key to the Query map.
+// Returns a Value encoder that should be used to encode a Query value type.
+func (m *Map) Key(name string) Value {
+ // Query lists start a 1, so adjust the size first
+ m.size++
+ var key string
+ var value string
+ if m.flat {
+ key = fmt.Sprintf("%s.%d.%s", m.prefix, m.size, m.keyLocationName)
+ value = fmt.Sprintf("%s.%d.%s", m.prefix, m.size, m.valueLocationName)
+ } else {
+ key = fmt.Sprintf("%s.entry.%d.%s", m.prefix, m.size, m.keyLocationName)
+ value = fmt.Sprintf("%s.entry.%d.%s", m.prefix, m.size, m.valueLocationName)
+ }
+
+ // The key can only be a string, so we just go ahead and set it here
+ newValue(m.values, key, false).String(name)
+
+ // Maps can't have flat members
+ return newValue(m.values, value, false)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/middleware.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/middleware.go
new file mode 100644
index 000000000..360344791
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/middleware.go
@@ -0,0 +1,62 @@
+package query
+
+import (
+ "context"
+ "fmt"
+ "io/ioutil"
+
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// AddAsGetRequestMiddleware adds a middleware to the Serialize stack after the
+// operation serializer that will convert the query request body to a GET
+// operation with the query message in the HTTP request querystring.
+func AddAsGetRequestMiddleware(stack *middleware.Stack) error {
+ return stack.Serialize.Insert(&asGetRequest{}, "OperationSerializer", middleware.After)
+}
+
+type asGetRequest struct{}
+
+func (*asGetRequest) ID() string { return "Query:AsGetRequest" }
+
+func (m *asGetRequest) HandleSerialize(
+ ctx context.Context, input middleware.SerializeInput, next middleware.SerializeHandler,
+) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ req, ok := input.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, fmt.Errorf("expect smithy HTTP Request, got %T", input.Request)
+ }
+
+ req.Method = "GET"
+
+ // If the stream is not set, nothing else to do.
+ stream := req.GetStream()
+ if stream == nil {
+ return next.HandleSerialize(ctx, input)
+ }
+
+ // Clear the stream since there will not be any body.
+ req.Header.Del("Content-Type")
+ req, err = req.SetStream(nil)
+ if err != nil {
+ return out, metadata, fmt.Errorf("unable update request body %w", err)
+ }
+ input.Request = req
+
+ // Update request query with the body's query string value.
+ delim := ""
+ if len(req.URL.RawQuery) != 0 {
+ delim = "&"
+ }
+
+ b, err := ioutil.ReadAll(stream)
+ if err != nil {
+ return out, metadata, fmt.Errorf("unable to get request body %w", err)
+ }
+ req.URL.RawQuery += delim + string(b)
+
+ return next.HandleSerialize(ctx, input)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/object.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/object.go
new file mode 100644
index 000000000..debb413de
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/object.go
@@ -0,0 +1,56 @@
+package query
+
+import (
+ "fmt"
+ "net/url"
+)
+
+// Object represents the encoding of Query structures and unions. A Query
+// object is a representation of a mapping of string keys to arbitrary
+// values where there is a fixed set of keys whose values each have their
+// own known type. A serialized object might look like the following:
+//
+// ObjectName.Foo=value
+// &ObjectName.Bar=5
+type Object struct {
+ // The query values to add the object to.
+ values url.Values
+ // The object's prefix, which includes the names of all parent structures
+ // and ends with the name of the object. For example, the prefix might be
+ // "ParentStructure.ObjectName". This prefix will be used to form the full
+ // keys for each member of the object. For example, a member might have the
+ // key "ParentStructure.ObjectName.MemberName".
+ //
+ // While this is currently represented as a string that gets added to, it
+ // could also be represented as a stack that only gets condensed into a
+ // string when a finalized key is created. This could potentially reduce
+ // allocations.
+ prefix string
+}
+
+func newObject(values url.Values, prefix string) *Object {
+ return &Object{
+ values: values,
+ prefix: prefix,
+ }
+}
+
+// Key adds the given named key to the Query object.
+// Returns a Value encoder that should be used to encode a Query value type.
+func (o *Object) Key(name string) Value {
+ return o.key(name, false)
+}
+
+// FlatKey adds the given named key to the Query object.
+// Returns a Value encoder that should be used to encode a Query value type. The
+// value will be flattened if it is a map or array.
+func (o *Object) FlatKey(name string) Value {
+ return o.key(name, true)
+}
+
+func (o *Object) key(name string, flatValue bool) Value {
+ if o.prefix != "" {
+ return newValue(o.values, fmt.Sprintf("%s.%s", o.prefix, name), flatValue)
+ }
+ return newValue(o.values, name, flatValue)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/value.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/value.go
new file mode 100644
index 000000000..302525ab1
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/value.go
@@ -0,0 +1,106 @@
+package query
+
+import (
+ "math/big"
+ "net/url"
+
+ "github.com/aws/smithy-go/encoding/httpbinding"
+)
+
+// Value represents a Query Value type.
+type Value struct {
+ // The query values to add the value to.
+ values url.Values
+ // The value's key, which will form the prefix for complex types.
+ key string
+ // Whether the value should be flattened or not if it's a flattenable type.
+ flat bool
+ queryValue httpbinding.QueryValue
+}
+
+func newValue(values url.Values, key string, flat bool) Value {
+ return Value{
+ values: values,
+ key: key,
+ flat: flat,
+ queryValue: httpbinding.NewQueryValue(values, key, false),
+ }
+}
+
+func newBaseValue(values url.Values) Value {
+ return Value{
+ values: values,
+ queryValue: httpbinding.NewQueryValue(nil, "", false),
+ }
+}
+
+// Array returns a new Array encoder.
+func (qv Value) Array(locationName string) *Array {
+ return newArray(qv.values, qv.key, qv.flat, locationName)
+}
+
+// Object returns a new Object encoder.
+func (qv Value) Object() *Object {
+ return newObject(qv.values, qv.key)
+}
+
+// Map returns a new Map encoder.
+func (qv Value) Map(keyLocationName string, valueLocationName string) *Map {
+ return newMap(qv.values, qv.key, qv.flat, keyLocationName, valueLocationName)
+}
+
+// Base64EncodeBytes encodes v as a base64 query string value.
+// This is intended to enable compatibility with the JSON encoder.
+func (qv Value) Base64EncodeBytes(v []byte) {
+ qv.queryValue.Blob(v)
+}
+
+// Boolean encodes v as a query string value
+func (qv Value) Boolean(v bool) {
+ qv.queryValue.Boolean(v)
+}
+
+// String encodes v as a query string value
+func (qv Value) String(v string) {
+ qv.queryValue.String(v)
+}
+
+// Byte encodes v as a query string value
+func (qv Value) Byte(v int8) {
+ qv.queryValue.Byte(v)
+}
+
+// Short encodes v as a query string value
+func (qv Value) Short(v int16) {
+ qv.queryValue.Short(v)
+}
+
+// Integer encodes v as a query string value
+func (qv Value) Integer(v int32) {
+ qv.queryValue.Integer(v)
+}
+
+// Long encodes v as a query string value
+func (qv Value) Long(v int64) {
+ qv.queryValue.Long(v)
+}
+
+// Float encodes v as a query string value
+func (qv Value) Float(v float32) {
+ qv.queryValue.Float(v)
+}
+
+// Double encodes v as a query string value
+func (qv Value) Double(v float64) {
+ qv.queryValue.Double(v)
+}
+
+// BigInteger encodes v as a query string value
+func (qv Value) BigInteger(v *big.Int) {
+ qv.queryValue.BigInteger(v)
+}
+
+// BigDecimal encodes v as a query string value
+func (qv Value) BigDecimal(v *big.Float) {
+ qv.queryValue.BigDecimal(v)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/restjson/decoder_util.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/restjson/decoder_util.go
new file mode 100644
index 000000000..1bce78a4d
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/restjson/decoder_util.go
@@ -0,0 +1,85 @@
+package restjson
+
+import (
+ "encoding/json"
+ "io"
+ "strings"
+
+ "github.com/aws/smithy-go"
+)
+
+// GetErrorInfo util looks for code, __type, and message members in the
+// json body. These members are optionally available, and the function
+// returns the value of member if it is available. This function is useful to
+// identify the error code, msg in a REST JSON error response.
+func GetErrorInfo(decoder *json.Decoder) (errorType string, message string, err error) {
+ var errInfo struct {
+ Code string
+ Type string `json:"__type"`
+ Message string
+ }
+
+ err = decoder.Decode(&errInfo)
+ if err != nil {
+ if err == io.EOF {
+ return errorType, message, nil
+ }
+ return errorType, message, err
+ }
+
+ // assign error type
+ if len(errInfo.Code) != 0 {
+ errorType = errInfo.Code
+ } else if len(errInfo.Type) != 0 {
+ errorType = errInfo.Type
+ }
+
+ // assign error message
+ if len(errInfo.Message) != 0 {
+ message = errInfo.Message
+ }
+
+ // sanitize error
+ if len(errorType) != 0 {
+ errorType = SanitizeErrorCode(errorType)
+ }
+
+ return errorType, message, nil
+}
+
+// SanitizeErrorCode sanitizes the errorCode string .
+// The rule for sanitizing is if a `:` character is present, then take only the
+// contents before the first : character in the value.
+// If a # character is present, then take only the contents after the
+// first # character in the value.
+func SanitizeErrorCode(errorCode string) string {
+ if strings.ContainsAny(errorCode, ":") {
+ errorCode = strings.SplitN(errorCode, ":", 2)[0]
+ }
+
+ if strings.ContainsAny(errorCode, "#") {
+ errorCode = strings.SplitN(errorCode, "#", 2)[1]
+ }
+
+ return errorCode
+}
+
+// GetSmithyGenericAPIError returns smithy generic api error and an error interface.
+// Takes in json decoder, and error Code string as args. The function retrieves error message
+// and error code from the decoder body. If errorCode of length greater than 0 is passed in as
+// an argument, it is used instead.
+func GetSmithyGenericAPIError(decoder *json.Decoder, errorCode string) (*smithy.GenericAPIError, error) {
+ errorType, message, err := GetErrorInfo(decoder)
+ if err != nil {
+ return nil, err
+ }
+
+ if len(errorCode) == 0 {
+ errorCode = errorType
+ }
+
+ return &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: message,
+ }, nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/xml/error_utils.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/xml/error_utils.go
new file mode 100644
index 000000000..c228f7d87
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/xml/error_utils.go
@@ -0,0 +1,56 @@
+package xml
+
+import (
+ "encoding/xml"
+ "fmt"
+ "io"
+)
+
+// ErrorComponents represents the error response fields
+// that will be deserialized from an xml error response body
+type ErrorComponents struct {
+ Code string
+ Message string
+ RequestID string
+}
+
+// GetErrorResponseComponents returns the error fields from an xml error response body
+func GetErrorResponseComponents(r io.Reader, noErrorWrapping bool) (ErrorComponents, error) {
+ if noErrorWrapping {
+ var errResponse noWrappedErrorResponse
+ if err := xml.NewDecoder(r).Decode(&errResponse); err != nil && err != io.EOF {
+ return ErrorComponents{}, fmt.Errorf("error while deserializing xml error response: %w", err)
+ }
+ return ErrorComponents{
+ Code: errResponse.Code,
+ Message: errResponse.Message,
+ RequestID: errResponse.RequestID,
+ }, nil
+ }
+
+ var errResponse wrappedErrorResponse
+ if err := xml.NewDecoder(r).Decode(&errResponse); err != nil && err != io.EOF {
+ return ErrorComponents{}, fmt.Errorf("error while deserializing xml error response: %w", err)
+ }
+ return ErrorComponents{
+ Code: errResponse.Code,
+ Message: errResponse.Message,
+ RequestID: errResponse.RequestID,
+ }, nil
+}
+
+// noWrappedErrorResponse represents the error response body with
+// no internal ...
+type wrappedErrorResponse struct {
+ Code string `xml:"Error>Code"`
+ Message string `xml:"Error>Message"`
+ RequestID string `xml:"RequestId"`
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/ratelimit/token_bucket.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/ratelimit/token_bucket.go
new file mode 100644
index 000000000..f337803f2
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/ratelimit/token_bucket.go
@@ -0,0 +1,51 @@
+package ratelimit
+
+import (
+ "sync"
+)
+
+// TokenBucket provides a concurrency safe utility for adding and removing
+// tokens from the available token bucket.
+type TokenBucket struct {
+ capacity uint
+ maxCapacity uint
+ mu sync.Mutex
+}
+
+// NewTokenBucket returns an initialized TokenBucket with the capacity
+// specified.
+func NewTokenBucket(i uint) *TokenBucket {
+ return &TokenBucket{
+ capacity: i,
+ maxCapacity: i,
+ }
+}
+
+// Retrieve attempts to reduce the available tokens by the amount requested. If
+// there are tokens available true will be returned along with the number of
+// available tokens remaining. If amount requested is larger than the available
+// capacity, false will be returned along with the available capacity. If the
+// amount is less than the available capacity
+func (t *TokenBucket) Retrieve(amount uint) (available uint, retrieved bool) {
+ t.mu.Lock()
+ defer t.mu.Unlock()
+
+ if amount > t.capacity {
+ return t.capacity, false
+ }
+
+ t.capacity -= amount
+ return t.capacity, true
+}
+
+// Refund returns the amount of tokens back to the available token bucket, up
+// to the initial capacity.
+func (t *TokenBucket) Refund(amount uint) {
+ t.mu.Lock()
+ defer t.mu.Unlock()
+
+ t.capacity += amount
+ if t.capacity > t.maxCapacity {
+ t.capacity = t.maxCapacity
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/ratelimit/token_rate_limit.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/ratelimit/token_rate_limit.go
new file mode 100644
index 000000000..d7997f42a
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/ratelimit/token_rate_limit.go
@@ -0,0 +1,82 @@
+package ratelimit
+
+import (
+ "context"
+ "fmt"
+)
+
+type rateToken struct {
+ tokenCost uint
+ bucket *TokenBucket
+}
+
+func (t rateToken) release() error {
+ t.bucket.Refund(t.tokenCost)
+ return nil
+}
+
+// TokenRateLimit provides a Token Bucket RateLimiter implementation
+// that limits the overall number of retry attempts that can be made across
+// operation invocations.
+type TokenRateLimit struct {
+ bucket *TokenBucket
+}
+
+// NewTokenRateLimit returns an TokenRateLimit with default values.
+// Functional options can configure the retry rate limiter.
+func NewTokenRateLimit(tokens uint) *TokenRateLimit {
+ return &TokenRateLimit{
+ bucket: NewTokenBucket(tokens),
+ }
+}
+
+func isTimeoutError(error) bool {
+ return false
+}
+
+type canceledError struct {
+ Err error
+}
+
+func (c canceledError) CanceledError() bool { return true }
+func (c canceledError) Unwrap() error { return c.Err }
+func (c canceledError) Error() string {
+ return fmt.Sprintf("canceled, %v", c.Err)
+}
+
+// GetToken may cause a available pool of retry quota to be
+// decremented. Will return an error if the decremented value can not be
+// reduced from the retry quota.
+func (l *TokenRateLimit) GetToken(ctx context.Context, cost uint) (func() error, error) {
+ select {
+ case <-ctx.Done():
+ return nil, canceledError{Err: ctx.Err()}
+ default:
+ }
+ if avail, ok := l.bucket.Retrieve(cost); !ok {
+ return nil, QuotaExceededError{Available: avail, Requested: cost}
+ }
+
+ return rateToken{
+ tokenCost: cost,
+ bucket: l.bucket,
+ }.release, nil
+}
+
+// AddTokens increments the token bucket by a fixed amount.
+func (l *TokenRateLimit) AddTokens(v uint) error {
+ l.bucket.Refund(v)
+ return nil
+}
+
+// QuotaExceededError provides the SDK error when the retries for a given
+// token bucket have been exhausted.
+type QuotaExceededError struct {
+ Available uint
+ Requested uint
+}
+
+func (e QuotaExceededError) Error() string {
+ return fmt.Sprintf("retry quota exceeded, %d available, %d requested",
+ e.Available, e.Requested)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/request.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/request.go
new file mode 100644
index 000000000..d8d00e615
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/request.go
@@ -0,0 +1,25 @@
+package aws
+
+import (
+ "fmt"
+)
+
+// TODO remove replace with smithy.CanceledError
+
+// RequestCanceledError is the error that will be returned by an API request
+// that was canceled. Requests given a Context may return this error when
+// canceled.
+type RequestCanceledError struct {
+ Err error
+}
+
+// CanceledError returns true to satisfy interfaces checking for canceled errors.
+func (*RequestCanceledError) CanceledError() bool { return true }
+
+// Unwrap returns the underlying error, if there was one.
+func (e *RequestCanceledError) Unwrap() error {
+ return e.Err
+}
+func (e *RequestCanceledError) Error() string {
+ return fmt.Sprintf("request canceled, %v", e.Err)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/doc.go
new file mode 100644
index 000000000..42ced06e2
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/doc.go
@@ -0,0 +1,80 @@
+// Package retry provides interfaces and implementations for SDK request retry behavior.
+//
+// Retryer Interface and Implementations
+//
+// This packages defines Retryer interface that is used to either implement custom retry behavior
+// or to extend the existing retry implementations provided by the SDK. This packages provides a single
+// retry implementations: Standard.
+//
+// Standard
+//
+// Standard is the default retryer implementation used by service clients. The standard retryer is a rate limited
+// retryer that has a configurable max attempts to limit the number of retry attempts when a retryable error occurs.
+// In addition, the retryer uses a configurable token bucket to rate limit the retry attempts across the client,
+// and uses an additional delay policy to limit the time between a requests subsequent attempts.
+//
+// By default the standard retryer uses the DefaultRetryables slice of IsErrorRetryable types to determine whether
+// a given error is retryable. By default this list of retryables includes the following:
+// - Retrying errors that implement the RetryableError method, and return true.
+// - Connection Errors
+// - Errors that implement a ConnectionError, Temporary, or Timeout method that return true.
+// - Connection Reset Errors.
+// - net.OpErr types that are dialing errors or are temporary.
+// - HTTP Status Codes: 500, 502, 503, and 504.
+// - API Error Codes
+// - RequestTimeout, RequestTimeoutException
+// - Throttling, ThrottlingException, ThrottledException, RequestThrottledException, TooManyRequestsException,
+// RequestThrottled, SlowDown, EC2ThrottledException
+// - ProvisionedThroughputExceededException, RequestLimitExceeded, BandwidthLimitExceeded, LimitExceededException
+// - TransactionInProgressException, PriorRequestNotComplete
+//
+// The standard retryer will not retry a request in the event if the context associated with the request
+// has been cancelled. Applications must handle this case explicitly if they wish to retry with a different context
+// value.
+//
+// You can configure the standard retryer implementation to fit your applications by constructing a standard retryer
+// using the NewStandard function, and providing one more functional arguments that mutate the StandardOptions
+// structure. StandardOptions provides the ability to modify the token bucket rate limiter, retryable error conditions,
+// and the retry delay policy.
+//
+// For example to modify the default retry attempts for the standard retryer:
+//
+// // configure the custom retryer
+// customRetry := retry.NewStandard(func(o *retry.StandardOptions) {
+// o.MaxAttempts = 5
+// })
+//
+// // create a service client with the retryer
+// s3.NewFromConfig(cfg, func(o *s3.Options) {
+// o.Retryer = customRetry
+// })
+//
+// Utilities
+//
+// A number of package functions have been provided to easily wrap retryer implementations in an implementation agnostic
+// way. These are:
+//
+// AddWithErrorCodes - Provides the ability to add additional API error codes that should be considered retryable
+// in addition to those considered retryable by the provided retryer.
+//
+// AddWithMaxAttempts - Provides the ability to set the max number of attempts for retrying a request by wrapping
+// a retryer implementation.
+//
+// AddWithMaxBackoffDelay - Provides the ability to set the max back off delay that can occur before retrying a
+// request by wrapping a retryer implementation.
+//
+// The following package functions have been provided to easily satisfy different retry interfaces to further customize
+// a given retryer's behavior:
+//
+// BackoffDelayerFunc - Can be used to wrap a function to satisfy the BackoffDelayer interface. For example,
+// you can use this method to easily create custom back off policies to be used with the
+// standard retryer.
+//
+// IsErrorRetryableFunc - Can be used to wrap a function to satisfy the IsErrorRetryable interface. For example,
+// this can be used to extend the standard retryer to add additional logic ot determine if a
+// error should be retried.
+//
+// IsErrorTimeoutFunc - Can be used to wrap a function to satisfy IsErrorTimeout interface. For example,
+// this can be used to extend the standard retryer to add additional logic to determine if an
+// error should be considered a timeout.
+package retry
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/errors.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/errors.go
new file mode 100644
index 000000000..3e432eefe
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/errors.go
@@ -0,0 +1,20 @@
+package retry
+
+import "fmt"
+
+// MaxAttemptsError provides the error when the maximum number of attempts have
+// been exceeded.
+type MaxAttemptsError struct {
+ Attempt int
+ Err error
+}
+
+func (e *MaxAttemptsError) Error() string {
+ return fmt.Sprintf("exceeded maximum number of attempts, %d, %v", e.Attempt, e.Err)
+}
+
+// Unwrap returns the nested error causing the max attempts error. Provides the
+// implementation for errors.Is and errors.As to unwrap nested errors.
+func (e *MaxAttemptsError) Unwrap() error {
+ return e.Err
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/jitter_backoff.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/jitter_backoff.go
new file mode 100644
index 000000000..c266996de
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/jitter_backoff.go
@@ -0,0 +1,49 @@
+package retry
+
+import (
+ "math"
+ "time"
+
+ "github.com/aws/aws-sdk-go-v2/internal/rand"
+ "github.com/aws/aws-sdk-go-v2/internal/timeconv"
+)
+
+// ExponentialJitterBackoff provides backoff delays with jitter based on the
+// number of attempts.
+type ExponentialJitterBackoff struct {
+ maxBackoff time.Duration
+ // precomputed number of attempts needed to reach max backoff.
+ maxBackoffAttempts float64
+
+ randFloat64 func() (float64, error)
+}
+
+// NewExponentialJitterBackoff returns an ExponentialJitterBackoff configured
+// for the max backoff.
+func NewExponentialJitterBackoff(maxBackoff time.Duration) *ExponentialJitterBackoff {
+ return &ExponentialJitterBackoff{
+ maxBackoff: maxBackoff,
+ maxBackoffAttempts: math.Log2(
+ float64(maxBackoff) / float64(time.Second)),
+ randFloat64: rand.CryptoRandFloat64,
+ }
+}
+
+// BackoffDelay returns the duration to wait before the next attempt should be
+// made. Returns an error if unable get a duration.
+func (j *ExponentialJitterBackoff) BackoffDelay(attempt int, err error) (time.Duration, error) {
+ if attempt > int(j.maxBackoffAttempts) {
+ return j.maxBackoff, nil
+ }
+
+ b, err := j.randFloat64()
+ if err != nil {
+ return 0, err
+ }
+
+ // [0.0, 1.0) * 2 ^ attempts
+ ri := int64(1 << uint64(attempt))
+ delaySeconds := b * float64(ri)
+
+ return timeconv.FloatSecondsDur(delaySeconds), nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/metadata.go
new file mode 100644
index 000000000..7a3f18301
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/metadata.go
@@ -0,0 +1,52 @@
+package retry
+
+import (
+ awsmiddle "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/smithy-go/middleware"
+)
+
+// attemptResultsKey is a metadata accessor key to retrieve metadata
+// for all request attempts.
+type attemptResultsKey struct {
+}
+
+// GetAttemptResults retrieves attempts results from middleware metadata.
+func GetAttemptResults(metadata middleware.Metadata) (AttemptResults, bool) {
+ m, ok := metadata.Get(attemptResultsKey{}).(AttemptResults)
+ return m, ok
+}
+
+// AttemptResults represents struct containing metadata returned by all request attempts.
+type AttemptResults struct {
+
+ // Results is a slice consisting attempt result from all request attempts.
+ // Results are stored in order request attempt is made.
+ Results []AttemptResult
+}
+
+// AttemptResult represents attempt result returned by a single request attempt.
+type AttemptResult struct {
+
+ // Err is the error if received for the request attempt.
+ Err error
+
+ // Retryable denotes if request may be retried. This states if an
+ // error is considered retryable.
+ Retryable bool
+
+ // Retried indicates if this request was retried.
+ Retried bool
+
+ // ResponseMetadata is any existing metadata passed via the response middlewares.
+ ResponseMetadata middleware.Metadata
+}
+
+// addAttemptResults adds attempt results to middleware metadata
+func addAttemptResults(metadata *middleware.Metadata, v AttemptResults) {
+ metadata.Set(attemptResultsKey{}, v)
+}
+
+// GetRawResponse returns raw response recorded for the attempt result
+func (a AttemptResult) GetRawResponse() interface{} {
+ return awsmiddle.GetRawResponse(a.ResponseMetadata)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/middleware.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/middleware.go
new file mode 100644
index 000000000..8d703ed21
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/middleware.go
@@ -0,0 +1,273 @@
+package retry
+
+import (
+ "context"
+ "fmt"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/aws/aws-sdk-go-v2/aws"
+ awsmiddle "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/internal/sdk"
+ "github.com/aws/smithy-go/logging"
+ "github.com/aws/smithy-go/middleware"
+ smithymiddle "github.com/aws/smithy-go/middleware"
+ "github.com/aws/smithy-go/transport/http"
+)
+
+// RequestCloner is a function that can take an input request type and clone the request
+// for use in a subsequent retry attempt
+type RequestCloner func(interface{}) interface{}
+
+type retryMetadata struct {
+ AttemptNum int
+ AttemptTime time.Time
+ MaxAttempts int
+ AttemptClockSkew time.Duration
+}
+
+// Attempt is a Smithy FinalizeMiddleware that handles retry attempts using the provided
+// Retryer implementation
+type Attempt struct {
+ // Enable the logging of retry attempts performed by the SDK.
+ // This will include logging retry attempts, unretryable errors, and when max attempts are reached.
+ LogAttempts bool
+
+ retryer aws.Retryer
+ requestCloner RequestCloner
+}
+
+// NewAttemptMiddleware returns a new Attempt retry middleware.
+func NewAttemptMiddleware(retryer aws.Retryer, requestCloner RequestCloner, optFns ...func(*Attempt)) *Attempt {
+ m := &Attempt{retryer: retryer, requestCloner: requestCloner}
+ for _, fn := range optFns {
+ fn(m)
+ }
+ return m
+}
+
+// ID returns the middleware identifier
+func (r *Attempt) ID() string {
+ return "Retry"
+}
+
+func (r Attempt) logf(logger logging.Logger, classification logging.Classification, format string, v ...interface{}) {
+ if !r.LogAttempts {
+ return
+ }
+ logger.Logf(classification, format, v...)
+}
+
+// HandleFinalize utilizes the provider Retryer implementation to attempt retries over the next handler
+func (r Attempt) HandleFinalize(ctx context.Context, in smithymiddle.FinalizeInput, next smithymiddle.FinalizeHandler) (
+ out smithymiddle.FinalizeOutput, metadata smithymiddle.Metadata, err error,
+) {
+ var attemptNum int
+ var attemptClockSkew time.Duration
+ var attemptResults AttemptResults
+
+ maxAttempts := r.retryer.MaxAttempts()
+
+ for {
+ attemptNum++
+ attemptInput := in
+ attemptInput.Request = r.requestCloner(attemptInput.Request)
+
+ attemptCtx := setRetryMetadata(ctx, retryMetadata{
+ AttemptNum: attemptNum,
+ AttemptTime: sdk.NowTime().UTC(),
+ MaxAttempts: maxAttempts,
+ AttemptClockSkew: attemptClockSkew,
+ })
+
+ var attemptResult AttemptResult
+
+ out, attemptResult, err = r.handleAttempt(attemptCtx, attemptInput, next)
+
+ var ok bool
+ attemptClockSkew, ok = awsmiddle.GetAttemptSkew(attemptResult.ResponseMetadata)
+ if !ok {
+ attemptClockSkew = 0
+ }
+
+ shouldRetry := attemptResult.Retried
+
+ // add attempt metadata to list of all attempt metadata
+ attemptResults.Results = append(attemptResults.Results, attemptResult)
+
+ if !shouldRetry {
+ break
+ }
+ }
+
+ addAttemptResults(&metadata, attemptResults)
+ return out, metadata, err
+}
+
+// handleAttempt handles an individual request attempt.
+func (r Attempt) handleAttempt(ctx context.Context, in smithymiddle.FinalizeInput, next smithymiddle.FinalizeHandler) (
+ out smithymiddle.FinalizeOutput, attemptResult AttemptResult, err error,
+) {
+ defer func() {
+ attemptResult.Err = err
+ }()
+
+ relRetryToken := r.retryer.GetInitialToken()
+ logger := smithymiddle.GetLogger(ctx)
+ service, operation := awsmiddle.GetServiceID(ctx), awsmiddle.GetOperationName(ctx)
+
+ retryMetadata, _ := getRetryMetadata(ctx)
+ attemptNum := retryMetadata.AttemptNum
+ maxAttempts := retryMetadata.MaxAttempts
+
+ if attemptNum > 1 {
+ if rewindable, ok := in.Request.(interface{ RewindStream() error }); ok {
+ if rewindErr := rewindable.RewindStream(); rewindErr != nil {
+ err = fmt.Errorf("failed to rewind transport stream for retry, %w", rewindErr)
+ return out, attemptResult, err
+ }
+ }
+
+ r.logf(logger, logging.Debug, "retrying request %s/%s, attempt %d", service, operation, attemptNum)
+ }
+
+ var metadata smithymiddle.Metadata
+ out, metadata, err = next.HandleFinalize(ctx, in)
+ attemptResult.ResponseMetadata = metadata
+
+ if releaseError := relRetryToken(err); releaseError != nil && err != nil {
+ err = fmt.Errorf("failed to release token after request error, %w", err)
+ return out, attemptResult, err
+ }
+
+ if err == nil {
+ return out, attemptResult, err
+ }
+
+ retryable := r.retryer.IsErrorRetryable(err)
+ if !retryable {
+ r.logf(logger, logging.Debug, "request failed with unretryable error %v", err)
+ return out, attemptResult, err
+ }
+
+ // set retryable to true
+ attemptResult.Retryable = true
+
+ if maxAttempts > 0 && attemptNum >= maxAttempts {
+ r.logf(logger, logging.Debug, "max retry attempts exhausted, max %d", maxAttempts)
+ err = &MaxAttemptsError{
+ Attempt: attemptNum,
+ Err: err,
+ }
+ return out, attemptResult, err
+ }
+
+ relRetryToken, reqErr := r.retryer.GetRetryToken(ctx, err)
+ if reqErr != nil {
+ return out, attemptResult, reqErr
+ }
+
+ retryDelay, reqErr := r.retryer.RetryDelay(attemptNum, err)
+ if reqErr != nil {
+ return out, attemptResult, reqErr
+ }
+
+ if reqErr = sdk.SleepWithContext(ctx, retryDelay); reqErr != nil {
+ err = &aws.RequestCanceledError{Err: reqErr}
+ return out, attemptResult, err
+ }
+
+ attemptResult.Retried = true
+
+ return out, attemptResult, err
+}
+
+// MetricsHeader attaches SDK request metric header for retries to the transport
+type MetricsHeader struct{}
+
+// ID returns the middleware identifier
+func (r *MetricsHeader) ID() string {
+ return "RetryMetricsHeader"
+}
+
+// HandleFinalize attaches the sdk request metric header to the transport layer
+func (r MetricsHeader) HandleFinalize(ctx context.Context, in smithymiddle.FinalizeInput, next smithymiddle.FinalizeHandler) (
+ out smithymiddle.FinalizeOutput, metadata smithymiddle.Metadata, err error,
+) {
+ retryMetadata, _ := getRetryMetadata(ctx)
+
+ const retryMetricHeader = "Amz-Sdk-Request"
+ var parts []string
+
+ parts = append(parts, "attempt="+strconv.Itoa(retryMetadata.AttemptNum))
+ if retryMetadata.MaxAttempts != 0 {
+ parts = append(parts, "max="+strconv.Itoa(retryMetadata.MaxAttempts))
+ }
+
+ var ttl time.Time
+ if deadline, ok := ctx.Deadline(); ok {
+ ttl = deadline
+ }
+
+ // Only append the TTL if it can be determined.
+ if !ttl.IsZero() && retryMetadata.AttemptClockSkew > 0 {
+ const unixTimeFormat = "20060102T150405Z"
+ ttl = ttl.Add(retryMetadata.AttemptClockSkew)
+ parts = append(parts, "ttl="+ttl.Format(unixTimeFormat))
+ }
+
+ switch req := in.Request.(type) {
+ case *http.Request:
+ req.Header[retryMetricHeader] = append(req.Header[retryMetricHeader][:0], strings.Join(parts, "; "))
+ default:
+ return out, metadata, fmt.Errorf("unknown transport type %T", req)
+ }
+
+ return next.HandleFinalize(ctx, in)
+}
+
+type retryMetadataKey struct{}
+
+// getRetryMetadata retrieves retryMetadata from the context and a bool
+// indicating if it was set.
+//
+// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues
+// to clear all stack values.
+func getRetryMetadata(ctx context.Context) (metadata retryMetadata, ok bool) {
+ metadata, ok = middleware.GetStackValue(ctx, retryMetadataKey{}).(retryMetadata)
+ return metadata, ok
+}
+
+// setRetryMetadata sets the retryMetadata on the context.
+//
+// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues
+// to clear all stack values.
+func setRetryMetadata(ctx context.Context, metadata retryMetadata) context.Context {
+ return middleware.WithStackValue(ctx, retryMetadataKey{}, metadata)
+}
+
+// AddRetryMiddlewaresOptions is the set of options that can be passed to AddRetryMiddlewares for configuring retry
+// associated middleware.
+type AddRetryMiddlewaresOptions struct {
+ Retryer aws.Retryer
+
+ // Enable the logging of retry attempts performed by the SDK.
+ // This will include logging retry attempts, unretryable errors, and when max attempts are reached.
+ LogRetryAttempts bool
+}
+
+// AddRetryMiddlewares adds retry middleware to operation middleware stack
+func AddRetryMiddlewares(stack *smithymiddle.Stack, options AddRetryMiddlewaresOptions) error {
+ attempt := NewAttemptMiddleware(options.Retryer, http.RequestCloner, func(middleware *Attempt) {
+ middleware.LogAttempts = options.LogRetryAttempts
+ })
+
+ if err := stack.Finalize.Add(attempt, smithymiddle.After); err != nil {
+ return err
+ }
+ if err := stack.Finalize.Add(&MetricsHeader{}, smithymiddle.After); err != nil {
+ return err
+ }
+ return nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/retry.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/retry.go
new file mode 100644
index 000000000..ad81b8c98
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/retry.go
@@ -0,0 +1,72 @@
+package retry
+
+import (
+ "time"
+
+ "github.com/aws/aws-sdk-go-v2/aws"
+)
+
+// AddWithErrorCodes returns a Retryer with additional error codes considered
+// for determining if the error should be retried.
+func AddWithErrorCodes(r aws.Retryer, codes ...string) aws.Retryer {
+ retryable := &RetryableErrorCode{
+ Codes: map[string]struct{}{},
+ }
+ for _, c := range codes {
+ retryable.Codes[c] = struct{}{}
+ }
+
+ return &withIsErrorRetryable{
+ Retryer: r,
+ Retryable: retryable,
+ }
+}
+
+type withIsErrorRetryable struct {
+ aws.Retryer
+ Retryable IsErrorRetryable
+}
+
+func (r *withIsErrorRetryable) IsErrorRetryable(err error) bool {
+ if v := r.Retryable.IsErrorRetryable(err); v != aws.UnknownTernary {
+ return v.Bool()
+ }
+ return r.Retryer.IsErrorRetryable(err)
+}
+
+// AddWithMaxAttempts returns a Retryer with MaxAttempts set to the value
+// specified.
+func AddWithMaxAttempts(r aws.Retryer, max int) aws.Retryer {
+ return &withMaxAttempts{
+ Retryer: r,
+ Max: max,
+ }
+}
+
+type withMaxAttempts struct {
+ aws.Retryer
+ Max int
+}
+
+func (w *withMaxAttempts) MaxAttempts() int {
+ return w.Max
+}
+
+// AddWithMaxBackoffDelay returns a retryer wrapping the passed in retryer
+// overriding the RetryDelay behavior for a alternate minimum initial backoff
+// delay.
+func AddWithMaxBackoffDelay(r aws.Retryer, delay time.Duration) aws.Retryer {
+ return &withMaxBackoffDelay{
+ Retryer: r,
+ backoff: NewExponentialJitterBackoff(delay),
+ }
+}
+
+type withMaxBackoffDelay struct {
+ aws.Retryer
+ backoff *ExponentialJitterBackoff
+}
+
+func (r *withMaxBackoffDelay) RetryDelay(attempt int, err error) (time.Duration, error) {
+ return r.backoff.BackoffDelay(attempt, err)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/retryable_error.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/retryable_error.go
new file mode 100644
index 000000000..c695e6fe5
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/retryable_error.go
@@ -0,0 +1,186 @@
+package retry
+
+import (
+ "errors"
+ "net"
+ "net/url"
+ "strings"
+
+ "github.com/aws/aws-sdk-go-v2/aws"
+)
+
+// IsErrorRetryable provides the interface of an implementation to determine if
+// a error as the result of an operation is retryable.
+type IsErrorRetryable interface {
+ IsErrorRetryable(error) aws.Ternary
+}
+
+// IsErrorRetryables is a collection of checks to determine of the error is
+// retryable. Iterates through the checks and returns the state of retryable
+// if any check returns something other than unknown.
+type IsErrorRetryables []IsErrorRetryable
+
+// IsErrorRetryable returns if the error is retryable if any of the checks in
+// the list return a value other than unknown.
+func (r IsErrorRetryables) IsErrorRetryable(err error) aws.Ternary {
+ for _, re := range r {
+ if v := re.IsErrorRetryable(err); v != aws.UnknownTernary {
+ return v
+ }
+ }
+ return aws.UnknownTernary
+}
+
+// IsErrorRetryableFunc wraps a function with the IsErrorRetryable interface.
+type IsErrorRetryableFunc func(error) aws.Ternary
+
+// IsErrorRetryable returns if the error is retryable.
+func (fn IsErrorRetryableFunc) IsErrorRetryable(err error) aws.Ternary {
+ return fn(err)
+}
+
+// RetryableError is an IsErrorRetryable implementation which uses the
+// optional interface Retryable on the error value to determine if the error is
+// retryable.
+type RetryableError struct{}
+
+// IsErrorRetryable returns if the error is retryable if it satisfies the
+// Retryable interface, and returns if the attempt should be retried.
+func (RetryableError) IsErrorRetryable(err error) aws.Ternary {
+ var v interface{ RetryableError() bool }
+
+ if !errors.As(err, &v) {
+ return aws.UnknownTernary
+ }
+
+ return aws.BoolTernary(v.RetryableError())
+}
+
+// NoRetryCanceledError detects if the error was an request canceled error and
+// returns if so.
+type NoRetryCanceledError struct{}
+
+// IsErrorRetryable returns the error is not retryable if the request was
+// canceled.
+func (NoRetryCanceledError) IsErrorRetryable(err error) aws.Ternary {
+ var v interface{ CanceledError() bool }
+
+ if !errors.As(err, &v) {
+ return aws.UnknownTernary
+ }
+
+ if v.CanceledError() {
+ return aws.FalseTernary
+ }
+ return aws.UnknownTernary
+}
+
+// RetryableConnectionError determines if the underlying error is an HTTP
+// connection and returns if it should be retried.
+//
+// Includes errors such as connection reset, connection refused, net dial,
+// temporary, and timeout errors.
+type RetryableConnectionError struct{}
+
+// IsErrorRetryable returns if the error is caused by and HTTP connection
+// error, and should be retried.
+func (r RetryableConnectionError) IsErrorRetryable(err error) aws.Ternary {
+ if err == nil {
+ return aws.UnknownTernary
+ }
+ var retryable bool
+
+ var conErr interface{ ConnectionError() bool }
+ var tempErr interface{ Temporary() bool }
+ var timeoutErr interface{ Timeout() bool }
+ var urlErr *url.Error
+ var netOpErr *net.OpError
+
+ switch {
+ case errors.As(err, &conErr) && conErr.ConnectionError():
+ retryable = true
+
+ case strings.Contains(err.Error(), "connection reset"):
+ retryable = true
+
+ case errors.As(err, &urlErr):
+ // Refused connections should be retried as the service may not yet be
+ // running on the port. Go TCP dial considers refused connections as
+ // not temporary.
+ if strings.Contains(urlErr.Error(), "connection refused") {
+ retryable = true
+ } else {
+ return r.IsErrorRetryable(errors.Unwrap(urlErr))
+ }
+
+ case errors.As(err, &netOpErr):
+ // Network dial, or temporary network errors are always retryable.
+ if strings.EqualFold(netOpErr.Op, "dial") || netOpErr.Temporary() {
+ retryable = true
+ } else {
+ return r.IsErrorRetryable(errors.Unwrap(netOpErr))
+ }
+
+ case errors.As(err, &tempErr) && tempErr.Temporary():
+ // Fallback to the generic temporary check, with temporary errors
+ // retryable.
+ retryable = true
+
+ case errors.As(err, &timeoutErr) && timeoutErr.Timeout():
+ // Fallback to the generic timeout check, with timeout errors
+ // retryable.
+ retryable = true
+
+ default:
+ return aws.UnknownTernary
+ }
+
+ return aws.BoolTernary(retryable)
+
+}
+
+// RetryableHTTPStatusCode provides a IsErrorRetryable based on HTTP status
+// codes.
+type RetryableHTTPStatusCode struct {
+ Codes map[int]struct{}
+}
+
+// IsErrorRetryable return if the passed in error is retryable based on the
+// HTTP status code.
+func (r RetryableHTTPStatusCode) IsErrorRetryable(err error) aws.Ternary {
+ var v interface{ HTTPStatusCode() int }
+
+ if !errors.As(err, &v) {
+ return aws.UnknownTernary
+ }
+
+ _, ok := r.Codes[v.HTTPStatusCode()]
+ if !ok {
+ return aws.UnknownTernary
+ }
+
+ return aws.TrueTernary
+}
+
+// RetryableErrorCode determines if an attempt should be retried based on the
+// API error code.
+type RetryableErrorCode struct {
+ Codes map[string]struct{}
+}
+
+// IsErrorRetryable return if the error is retryable based on the error codes.
+// Returns unknown if the error doesn't have a code or it is unknown.
+func (r RetryableErrorCode) IsErrorRetryable(err error) aws.Ternary {
+ var v interface{ ErrorCode() string }
+
+ if !errors.As(err, &v) {
+ return aws.UnknownTernary
+ }
+
+ _, ok := r.Codes[v.ErrorCode()]
+ if !ok {
+ return aws.UnknownTernary
+ }
+
+ return aws.TrueTernary
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/standard.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/standard.go
new file mode 100644
index 000000000..be2f81469
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/standard.go
@@ -0,0 +1,210 @@
+package retry
+
+import (
+ "context"
+ "time"
+
+ "github.com/aws/aws-sdk-go-v2/aws/ratelimit"
+)
+
+// BackoffDelayer provides the interface for determining the delay to before
+// another request attempt, that previously failed.
+type BackoffDelayer interface {
+ BackoffDelay(attempt int, err error) (time.Duration, error)
+}
+
+// BackoffDelayerFunc provides a wrapper around a function to determine the
+// backoff delay of an attempt retry.
+type BackoffDelayerFunc func(int, error) (time.Duration, error)
+
+// BackoffDelay returns the delay before attempt to retry a request.
+func (fn BackoffDelayerFunc) BackoffDelay(attempt int, err error) (time.Duration, error) {
+ return fn(attempt, err)
+}
+
+const (
+ // DefaultMaxAttempts is the maximum of attempts for an API request
+ DefaultMaxAttempts int = 3
+
+ // DefaultMaxBackoff is the maximum back off delay between attempts
+ DefaultMaxBackoff time.Duration = 20 * time.Second
+)
+
+// Default retry token quota values.
+const (
+ DefaultRetryRateTokens uint = 500
+ DefaultRetryCost uint = 5
+ DefaultRetryTimeoutCost uint = 10
+ DefaultNoRetryIncrement uint = 1
+)
+
+// DefaultRetryableHTTPStatusCodes is the default set of HTTP status codes the SDK
+// should consider as retryable errors.
+var DefaultRetryableHTTPStatusCodes = map[int]struct{}{
+ 500: {},
+ 502: {},
+ 503: {},
+ 504: {},
+}
+
+// DefaultRetryableErrorCodes provides the set of API error codes that should
+// be retried.
+var DefaultRetryableErrorCodes = map[string]struct{}{
+ "RequestTimeout": {},
+ "RequestTimeoutException": {},
+
+ // Throttled status codes
+ "Throttling": {},
+ "ThrottlingException": {},
+ "ThrottledException": {},
+ "RequestThrottledException": {},
+ "TooManyRequestsException": {},
+ "ProvisionedThroughputExceededException": {},
+ "TransactionInProgressException": {},
+ "RequestLimitExceeded": {},
+ "BandwidthLimitExceeded": {},
+ "LimitExceededException": {},
+ "RequestThrottled": {},
+ "SlowDown": {},
+ "PriorRequestNotComplete": {},
+ "EC2ThrottledException": {},
+}
+
+// DefaultRetryables provides the set of retryable checks that are used by
+// default.
+var DefaultRetryables = []IsErrorRetryable{
+ NoRetryCanceledError{},
+ RetryableError{},
+ RetryableConnectionError{},
+ RetryableHTTPStatusCode{
+ Codes: DefaultRetryableHTTPStatusCodes,
+ },
+ RetryableErrorCode{
+ Codes: DefaultRetryableErrorCodes,
+ },
+}
+
+// StandardOptions provides the functional options for configuring the standard
+// retryable, and delay behavior.
+type StandardOptions struct {
+ MaxAttempts int
+ MaxBackoff time.Duration
+ Backoff BackoffDelayer
+
+ Retryables []IsErrorRetryable
+ Timeouts []IsErrorTimeout
+
+ RateLimiter RateLimiter
+ RetryCost uint
+ RetryTimeoutCost uint
+ NoRetryIncrement uint
+}
+
+// RateLimiter provides the interface for limiting the rate of request retries
+// allowed by the retrier.
+type RateLimiter interface {
+ GetToken(ctx context.Context, cost uint) (releaseToken func() error, err error)
+ AddTokens(uint) error
+}
+
+func nopTokenRelease(error) error { return nil }
+
+// Standard is the standard retry pattern for the SDK. It uses a set of
+// retryable checks to determine of the failed request should be retried, and
+// what retry delay should be used.
+type Standard struct {
+ options StandardOptions
+
+ timeout IsErrorTimeout
+ retryable IsErrorRetryable
+ backoff BackoffDelayer
+}
+
+// NewStandard initializes a standard retry behavior with defaults that can be
+// overridden via functional options.
+func NewStandard(fnOpts ...func(*StandardOptions)) *Standard {
+ o := StandardOptions{
+ MaxAttempts: DefaultMaxAttempts,
+ MaxBackoff: DefaultMaxBackoff,
+ Retryables: DefaultRetryables,
+
+ RateLimiter: ratelimit.NewTokenRateLimit(DefaultRetryRateTokens),
+ RetryCost: DefaultRetryCost,
+ RetryTimeoutCost: DefaultRetryTimeoutCost,
+ NoRetryIncrement: DefaultNoRetryIncrement,
+ }
+ for _, fn := range fnOpts {
+ fn(&o)
+ }
+
+ backoff := o.Backoff
+ if backoff == nil {
+ backoff = NewExponentialJitterBackoff(o.MaxBackoff)
+ }
+
+ rs := make([]IsErrorRetryable, len(o.Retryables))
+ copy(rs, o.Retryables)
+
+ ts := make([]IsErrorTimeout, len(o.Timeouts))
+ copy(ts, o.Timeouts)
+
+ return &Standard{
+ options: o,
+ backoff: backoff,
+ retryable: IsErrorRetryables(rs),
+ timeout: IsErrorTimeouts(ts),
+ }
+}
+
+// MaxAttempts returns the maximum number of attempts that can be made for a
+// request before failing.
+func (s *Standard) MaxAttempts() int {
+ return s.options.MaxAttempts
+}
+
+// IsErrorRetryable returns if the error is can be retried or not. Should not
+// consider the number of attempts made.
+func (s *Standard) IsErrorRetryable(err error) bool {
+ return s.retryable.IsErrorRetryable(err).Bool()
+}
+
+// RetryDelay returns the delay to use before another request attempt is made.
+func (s *Standard) RetryDelay(attempt int, err error) (time.Duration, error) {
+ return s.backoff.BackoffDelay(attempt, err)
+}
+
+// GetInitialToken returns the initial request token that can increment the
+// retry token pool if the request is successful.
+func (s *Standard) GetInitialToken() func(error) error {
+ return releaseToken(s.incrementTokens).release
+}
+
+func (s *Standard) incrementTokens() error {
+ return s.options.RateLimiter.AddTokens(s.options.NoRetryIncrement)
+}
+
+// GetRetryToken attempts to deduct the retry cost from the retry token pool.
+// Returning the token release function, or error.
+func (s *Standard) GetRetryToken(ctx context.Context, err error) (func(error) error, error) {
+ cost := s.options.RetryCost
+ if s.timeout.IsErrorTimeout(err).Bool() {
+ cost = s.options.RetryTimeoutCost
+ }
+
+ fn, err := s.options.RateLimiter.GetToken(ctx, cost)
+ if err != nil {
+ return nil, err
+ }
+
+ return releaseToken(fn).release, nil
+}
+
+type releaseToken func() error
+
+func (f releaseToken) release(err error) error {
+ if err != nil {
+ return nil
+ }
+
+ return f()
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/timeout_error.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/timeout_error.go
new file mode 100644
index 000000000..3d47870d2
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/timeout_error.go
@@ -0,0 +1,52 @@
+package retry
+
+import (
+ "errors"
+
+ "github.com/aws/aws-sdk-go-v2/aws"
+)
+
+// IsErrorTimeout provides the interface of an implementation to determine if
+// a error matches.
+type IsErrorTimeout interface {
+ IsErrorTimeout(err error) aws.Ternary
+}
+
+// IsErrorTimeouts is a collection of checks to determine of the error is
+// retryable. Iterates through the checks and returns the state of retryable
+// if any check returns something other than unknown.
+type IsErrorTimeouts []IsErrorTimeout
+
+// IsErrorTimeout returns if the error is retryable if any of the checks in
+// the list return a value other than unknown.
+func (ts IsErrorTimeouts) IsErrorTimeout(err error) aws.Ternary {
+ for _, t := range ts {
+ if v := t.IsErrorTimeout(err); v != aws.UnknownTernary {
+ return v
+ }
+ }
+ return aws.UnknownTernary
+}
+
+// IsErrorTimeoutFunc wraps a function with the IsErrorTimeout interface.
+type IsErrorTimeoutFunc func(error) aws.Ternary
+
+// IsErrorTimeout returns if the error is retryable.
+func (fn IsErrorTimeoutFunc) IsErrorTimeout(err error) aws.Ternary {
+ return fn(err)
+}
+
+// TimeouterError provides the IsErrorTimeout implementation for determining if
+// an error is a timeout based on type with the Timeout method.
+type TimeouterError struct{}
+
+// IsErrorTimeout returns if the error is a timeout error.
+func (t TimeouterError) IsErrorTimeout(err error) aws.Ternary {
+ var v interface{ Timeout() bool }
+
+ if !errors.As(err, &v) {
+ return aws.UnknownTernary
+ }
+
+ return aws.BoolTernary(v.Timeout())
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/retryer.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/retryer.go
new file mode 100644
index 000000000..0489508ef
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/retryer.go
@@ -0,0 +1,62 @@
+package aws
+
+import (
+ "context"
+ "fmt"
+ "time"
+)
+
+// Retryer is an interface to determine if a given error from a
+// request should be retried, and if so what backoff delay to apply. The
+// default implementation used by most services is the retry package's Standard
+// type. Which contains basic retry logic using exponential backoff.
+type Retryer interface {
+ // IsErrorRetryable returns if the failed request is retryable. This check
+ // should determine if the error can be retried, or if the error is
+ // terminal.
+ IsErrorRetryable(error) bool
+
+ // MaxAttempts returns the maximum number of attempts that can be made for
+ // a request before failing. A value of 0 implies that the request should
+ // be retried until it succeeds if the errors are retryable.
+ MaxAttempts() int
+
+ // RetryDelay returns the delay that should be used before retrying the
+ // request. Will return error if the if the delay could not be determined.
+ RetryDelay(attempt int, opErr error) (time.Duration, error)
+
+ // GetRetryToken attempts to deduct the retry cost from the retry token pool.
+ // Returning the token release function, or error.
+ GetRetryToken(ctx context.Context, opErr error) (releaseToken func(error) error, err error)
+
+ // GetInitalToken returns the initial request token that can increment the
+ // retry token pool if the request is successful.
+ GetInitialToken() (releaseToken func(error) error)
+}
+
+// NopRetryer provides a RequestRetryDecider implementation that will flag
+// all attempt errors as not retryable, with a max attempts of 1.
+type NopRetryer struct{}
+
+// IsErrorRetryable returns false for all error values.
+func (NopRetryer) IsErrorRetryable(error) bool { return false }
+
+// MaxAttempts always returns 1 for the original request attempt.
+func (NopRetryer) MaxAttempts() int { return 1 }
+
+// RetryDelay is not valid for the NopRetryer. Will always return error.
+func (NopRetryer) RetryDelay(int, error) (time.Duration, error) {
+ return 0, fmt.Errorf("not retrying any request errors")
+}
+
+// GetRetryToken returns a stub function that does nothing.
+func (NopRetryer) GetRetryToken(context.Context, error) (func(error) error, error) {
+ return nopReleaseToken, nil
+}
+
+// GetInitialToken returns a stub function that does nothing.
+func (NopRetryer) GetInitialToken() func(error) error {
+ return nopReleaseToken
+}
+
+func nopReleaseToken(error) error { return nil }
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/cache.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/cache.go
new file mode 100644
index 000000000..cbf22f1d0
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/cache.go
@@ -0,0 +1,115 @@
+package v4
+
+import (
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/aws/aws-sdk-go-v2/aws"
+)
+
+func lookupKey(service, region string) string {
+ var s strings.Builder
+ s.Grow(len(region) + len(service) + 3)
+ s.WriteString(region)
+ s.WriteRune('/')
+ s.WriteString(service)
+ return s.String()
+}
+
+type derivedKey struct {
+ AccessKey string
+ Date time.Time
+ Credential []byte
+}
+
+type derivedKeyCache struct {
+ values map[string]derivedKey
+ mutex sync.RWMutex
+}
+
+func newDerivedKeyCache() derivedKeyCache {
+ return derivedKeyCache{
+ values: make(map[string]derivedKey),
+ }
+}
+
+func (s *derivedKeyCache) Get(credentials aws.Credentials, service, region string, signingTime SigningTime) []byte {
+ key := lookupKey(service, region)
+ s.mutex.RLock()
+ if cred, ok := s.get(key, credentials, signingTime.Time); ok {
+ s.mutex.RUnlock()
+ return cred
+ }
+ s.mutex.RUnlock()
+
+ s.mutex.Lock()
+ if cred, ok := s.get(key, credentials, signingTime.Time); ok {
+ s.mutex.Unlock()
+ return cred
+ }
+ cred := deriveKey(credentials.SecretAccessKey, service, region, signingTime)
+ entry := derivedKey{
+ AccessKey: credentials.AccessKeyID,
+ Date: signingTime.Time,
+ Credential: cred,
+ }
+ s.values[key] = entry
+ s.mutex.Unlock()
+
+ return cred
+}
+
+func (s *derivedKeyCache) get(key string, credentials aws.Credentials, signingTime time.Time) ([]byte, bool) {
+ cacheEntry, ok := s.retrieveFromCache(key)
+ if ok && cacheEntry.AccessKey == credentials.AccessKeyID && isSameDay(signingTime, cacheEntry.Date) {
+ return cacheEntry.Credential, true
+ }
+ return nil, false
+}
+
+func (s *derivedKeyCache) retrieveFromCache(key string) (derivedKey, bool) {
+ if v, ok := s.values[key]; ok {
+ return v, true
+ }
+ return derivedKey{}, false
+}
+
+// SigningKeyDeriver derives a signing key from a set of credentials
+type SigningKeyDeriver struct {
+ cache derivedKeyCache
+}
+
+// NewSigningKeyDeriver returns a new SigningKeyDeriver
+func NewSigningKeyDeriver() *SigningKeyDeriver {
+ return &SigningKeyDeriver{
+ cache: newDerivedKeyCache(),
+ }
+}
+
+// DeriveKey returns a derived signing key from the given credentials to be used with SigV4 signing.
+func (k *SigningKeyDeriver) DeriveKey(credential aws.Credentials, service, region string, signingTime SigningTime) []byte {
+ return k.cache.Get(credential, service, region, signingTime)
+}
+
+func deriveKey(secret, service, region string, t SigningTime) []byte {
+ hmacDate := HMACSHA256([]byte("AWS4"+secret), []byte(t.ShortTimeFormat()))
+ hmacRegion := HMACSHA256(hmacDate, []byte(region))
+ hmacService := HMACSHA256(hmacRegion, []byte(service))
+ return HMACSHA256(hmacService, []byte("aws4_request"))
+}
+
+func isSameDay(x, y time.Time) bool {
+ xYear, xMonth, xDay := x.Date()
+ yYear, yMonth, yDay := y.Date()
+
+ if xYear != yYear {
+ return false
+ }
+
+ if xMonth != yMonth {
+ return false
+ }
+
+ return xDay == yDay
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/const.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/const.go
new file mode 100644
index 000000000..89a76e2ea
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/const.go
@@ -0,0 +1,36 @@
+package v4
+
+const (
+ // EmptyStringSHA256 is the hex encoded sha256 value of an empty string
+ EmptyStringSHA256 = `e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855`
+
+ // UnsignedPayload indicates that the request payload body is unsigned
+ UnsignedPayload = "UNSIGNED-PAYLOAD"
+
+ // AmzAlgorithmKey indicates the signing algorithm
+ AmzAlgorithmKey = "X-Amz-Algorithm"
+
+ // AmzSecurityTokenKey indicates the security token to be used with temporary credentials
+ AmzSecurityTokenKey = "X-Amz-Security-Token"
+
+ // AmzDateKey is the UTC timestamp for the request in the format YYYYMMDD'T'HHMMSS'Z'
+ AmzDateKey = "X-Amz-Date"
+
+ // AmzCredentialKey is the access key ID and credential scope
+ AmzCredentialKey = "X-Amz-Credential"
+
+ // AmzSignedHeadersKey is the set of headers signed for the request
+ AmzSignedHeadersKey = "X-Amz-SignedHeaders"
+
+ // AmzSignatureKey is the query parameter to store the SigV4 signature
+ AmzSignatureKey = "X-Amz-Signature"
+
+ // TimeFormat is the time format to be used in the X-Amz-Date header or query parameter
+ TimeFormat = "20060102T150405Z"
+
+ // ShortTimeFormat is the shorten time format used in the credential scope
+ ShortTimeFormat = "20060102"
+
+ // ContentSHAKey is the SHA256 of request body
+ ContentSHAKey = "X-Amz-Content-Sha256"
+)
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/header_rules.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/header_rules.go
new file mode 100644
index 000000000..5e5953c73
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/header_rules.go
@@ -0,0 +1,82 @@
+package v4
+
+import (
+ sdkstrings "github.com/aws/aws-sdk-go-v2/internal/strings"
+)
+
+// Rules houses a set of Rule needed for validation of a
+// string value
+type Rules []Rule
+
+// Rule interface allows for more flexible rules and just simply
+// checks whether or not a value adheres to that Rule
+type Rule interface {
+ IsValid(value string) bool
+}
+
+// IsValid will iterate through all rules and see if any rules
+// apply to the value and supports nested rules
+func (r Rules) IsValid(value string) bool {
+ for _, rule := range r {
+ if rule.IsValid(value) {
+ return true
+ }
+ }
+ return false
+}
+
+// MapRule generic Rule for maps
+type MapRule map[string]struct{}
+
+// IsValid for the map Rule satisfies whether it exists in the map
+func (m MapRule) IsValid(value string) bool {
+ _, ok := m[value]
+ return ok
+}
+
+// Whitelist is a generic Rule for whitelisting
+type Whitelist struct {
+ Rule
+}
+
+// IsValid for Whitelist checks if the value is within the Whitelist
+func (w Whitelist) IsValid(value string) bool {
+ return w.Rule.IsValid(value)
+}
+
+// Blacklist is a generic Rule for blacklisting
+type Blacklist struct {
+ Rule
+}
+
+// IsValid for Whitelist checks if the value is within the Whitelist
+func (b Blacklist) IsValid(value string) bool {
+ return !b.Rule.IsValid(value)
+}
+
+// Patterns is a list of strings to match against
+type Patterns []string
+
+// IsValid for Patterns checks each pattern and returns if a match has
+// been found
+func (p Patterns) IsValid(value string) bool {
+ for _, pattern := range p {
+ if sdkstrings.HasPrefixFold(value, pattern) {
+ return true
+ }
+ }
+ return false
+}
+
+// InclusiveRules rules allow for rules to depend on one another
+type InclusiveRules []Rule
+
+// IsValid will return true if all rules are true
+func (r InclusiveRules) IsValid(value string) bool {
+ for _, rule := range r {
+ if !rule.IsValid(value) {
+ return false
+ }
+ }
+ return true
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/headers.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/headers.go
new file mode 100644
index 000000000..b62d985cc
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/headers.go
@@ -0,0 +1,67 @@
+package v4
+
+// IgnoredHeaders is a list of headers that are ignored during signing
+var IgnoredHeaders = Rules{
+ Blacklist{
+ MapRule{
+ "Authorization": struct{}{},
+ "User-Agent": struct{}{},
+ "X-Amzn-Trace-Id": struct{}{},
+ },
+ },
+}
+
+// RequiredSignedHeaders is a whitelist for Build canonical headers.
+var RequiredSignedHeaders = Rules{
+ Whitelist{
+ MapRule{
+ "Cache-Control": struct{}{},
+ "Content-Disposition": struct{}{},
+ "Content-Encoding": struct{}{},
+ "Content-Language": struct{}{},
+ "Content-Md5": struct{}{},
+ "Content-Type": struct{}{},
+ "Expires": struct{}{},
+ "If-Match": struct{}{},
+ "If-Modified-Since": struct{}{},
+ "If-None-Match": struct{}{},
+ "If-Unmodified-Since": struct{}{},
+ "Range": struct{}{},
+ "X-Amz-Acl": struct{}{},
+ "X-Amz-Copy-Source": struct{}{},
+ "X-Amz-Copy-Source-If-Match": struct{}{},
+ "X-Amz-Copy-Source-If-Modified-Since": struct{}{},
+ "X-Amz-Copy-Source-If-None-Match": struct{}{},
+ "X-Amz-Copy-Source-If-Unmodified-Since": struct{}{},
+ "X-Amz-Copy-Source-Range": struct{}{},
+ "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm": struct{}{},
+ "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key": struct{}{},
+ "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key-Md5": struct{}{},
+ "X-Amz-Grant-Full-control": struct{}{},
+ "X-Amz-Grant-Read": struct{}{},
+ "X-Amz-Grant-Read-Acp": struct{}{},
+ "X-Amz-Grant-Write": struct{}{},
+ "X-Amz-Grant-Write-Acp": struct{}{},
+ "X-Amz-Metadata-Directive": struct{}{},
+ "X-Amz-Mfa": struct{}{},
+ "X-Amz-Request-Payer": struct{}{},
+ "X-Amz-Server-Side-Encryption": struct{}{},
+ "X-Amz-Server-Side-Encryption-Aws-Kms-Key-Id": struct{}{},
+ "X-Amz-Server-Side-Encryption-Customer-Algorithm": struct{}{},
+ "X-Amz-Server-Side-Encryption-Customer-Key": struct{}{},
+ "X-Amz-Server-Side-Encryption-Customer-Key-Md5": struct{}{},
+ "X-Amz-Storage-Class": struct{}{},
+ "X-Amz-Website-Redirect-Location": struct{}{},
+ "X-Amz-Content-Sha256": struct{}{},
+ "X-Amz-Tagging": struct{}{},
+ },
+ },
+ Patterns{"X-Amz-Meta-"},
+}
+
+// AllowedQueryHoisting is a whitelist for Build query headers. The boolean value
+// represents whether or not it is a pattern.
+var AllowedQueryHoisting = InclusiveRules{
+ Blacklist{RequiredSignedHeaders},
+ Patterns{"X-Amz-"},
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/hmac.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/hmac.go
new file mode 100644
index 000000000..e7fa7a1b1
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/hmac.go
@@ -0,0 +1,13 @@
+package v4
+
+import (
+ "crypto/hmac"
+ "crypto/sha256"
+)
+
+// HMACSHA256 computes a HMAC-SHA256 of data given the provided key.
+func HMACSHA256(key []byte, data []byte) []byte {
+ hash := hmac.New(sha256.New, key)
+ hash.Write(data)
+ return hash.Sum(nil)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/host.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/host.go
new file mode 100644
index 000000000..bf93659a4
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/host.go
@@ -0,0 +1,75 @@
+package v4
+
+import (
+ "net/http"
+ "strings"
+)
+
+// SanitizeHostForHeader removes default port from host and updates request.Host
+func SanitizeHostForHeader(r *http.Request) {
+ host := getHost(r)
+ port := portOnly(host)
+ if port != "" && isDefaultPort(r.URL.Scheme, port) {
+ r.Host = stripPort(host)
+ }
+}
+
+// Returns host from request
+func getHost(r *http.Request) string {
+ if r.Host != "" {
+ return r.Host
+ }
+
+ return r.URL.Host
+}
+
+// Hostname returns u.Host, without any port number.
+//
+// If Host is an IPv6 literal with a port number, Hostname returns the
+// IPv6 literal without the square brackets. IPv6 literals may include
+// a zone identifier.
+//
+// Copied from the Go 1.8 standard library (net/url)
+func stripPort(hostport string) string {
+ colon := strings.IndexByte(hostport, ':')
+ if colon == -1 {
+ return hostport
+ }
+ if i := strings.IndexByte(hostport, ']'); i != -1 {
+ return strings.TrimPrefix(hostport[:i], "[")
+ }
+ return hostport[:colon]
+}
+
+// Port returns the port part of u.Host, without the leading colon.
+// If u.Host doesn't contain a port, Port returns an empty string.
+//
+// Copied from the Go 1.8 standard library (net/url)
+func portOnly(hostport string) string {
+ colon := strings.IndexByte(hostport, ':')
+ if colon == -1 {
+ return ""
+ }
+ if i := strings.Index(hostport, "]:"); i != -1 {
+ return hostport[i+len("]:"):]
+ }
+ if strings.Contains(hostport, "]") {
+ return ""
+ }
+ return hostport[colon+len(":"):]
+}
+
+// Returns true if the specified URI is using the standard port
+// (i.e. port 80 for HTTP URIs or 443 for HTTPS URIs)
+func isDefaultPort(scheme, port string) bool {
+ if port == "" {
+ return true
+ }
+
+ lowerCaseScheme := strings.ToLower(scheme)
+ if (lowerCaseScheme == "http" && port == "80") || (lowerCaseScheme == "https" && port == "443") {
+ return true
+ }
+
+ return false
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/time.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/time.go
new file mode 100644
index 000000000..1de06a765
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/time.go
@@ -0,0 +1,36 @@
+package v4
+
+import "time"
+
+// SigningTime provides a wrapper around a time.Time which provides cached values for SigV4 signing.
+type SigningTime struct {
+ time.Time
+ timeFormat string
+ shortTimeFormat string
+}
+
+// NewSigningTime creates a new SigningTime given a time.Time
+func NewSigningTime(t time.Time) SigningTime {
+ return SigningTime{
+ Time: t,
+ }
+}
+
+// TimeFormat provides a time formatted in the X-Amz-Date format.
+func (m *SigningTime) TimeFormat() string {
+ return m.format(&m.timeFormat, TimeFormat)
+}
+
+// ShortTimeFormat provides a time formatted of 20060102.
+func (m *SigningTime) ShortTimeFormat() string {
+ return m.format(&m.shortTimeFormat, ShortTimeFormat)
+}
+
+func (m *SigningTime) format(target *string, format string) string {
+ if len(*target) > 0 {
+ return *target
+ }
+ v := m.Time.Format(format)
+ *target = v
+ return v
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/util.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/util.go
new file mode 100644
index 000000000..741019b5f
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/util.go
@@ -0,0 +1,64 @@
+package v4
+
+import (
+ "net/url"
+ "strings"
+)
+
+const doubleSpace = " "
+
+// StripExcessSpaces will rewrite the passed in slice's string values to not
+// contain muliple side-by-side spaces.
+func StripExcessSpaces(str string) string {
+ var j, k, l, m, spaces int
+ // Trim trailing spaces
+ for j = len(str) - 1; j >= 0 && str[j] == ' '; j-- {
+ }
+
+ // Trim leading spaces
+ for k = 0; k < j && str[k] == ' '; k++ {
+ }
+ str = str[k : j+1]
+
+ // Strip multiple spaces.
+ j = strings.Index(str, doubleSpace)
+ if j < 0 {
+ return str
+ }
+
+ buf := []byte(str)
+ for k, m, l = j, j, len(buf); k < l; k++ {
+ if buf[k] == ' ' {
+ if spaces == 0 {
+ // First space.
+ buf[m] = buf[k]
+ m++
+ }
+ spaces++
+ } else {
+ // End of multiple spaces.
+ spaces = 0
+ buf[m] = buf[k]
+ m++
+ }
+ }
+
+ return string(buf[:m])
+}
+
+// GetURIPath returns the escaped URI component from the provided URL
+func GetURIPath(u *url.URL) string {
+ var uri string
+
+ if len(u.Opaque) > 0 {
+ uri = "/" + strings.Join(strings.Split(u.Opaque, "/")[3:], "/")
+ } else {
+ uri = u.EscapedPath()
+ }
+
+ if len(uri) == 0 {
+ uri = "/"
+ }
+
+ return uri
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/middleware.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/middleware.go
new file mode 100644
index 000000000..ffa297668
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/middleware.go
@@ -0,0 +1,292 @@
+package v4
+
+import (
+ "context"
+ "crypto/sha256"
+ "encoding/hex"
+ "fmt"
+ "io"
+
+ "github.com/aws/aws-sdk-go-v2/aws"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ v4Internal "github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4"
+ "github.com/aws/aws-sdk-go-v2/internal/sdk"
+ "github.com/aws/smithy-go/middleware"
+ smithyHTTP "github.com/aws/smithy-go/transport/http"
+)
+
+const computePayloadHashMiddlewareID = "ComputePayloadHash"
+
+// HashComputationError indicates an error occurred while computing the signing hash
+type HashComputationError struct {
+ Err error
+}
+
+// Error is the error message
+func (e *HashComputationError) Error() string {
+ return fmt.Sprintf("failed to compute payload hash: %v", e.Err)
+}
+
+// Unwrap returns the underlying error if one is set
+func (e *HashComputationError) Unwrap() error {
+ return e.Err
+}
+
+// SigningError indicates an error condition occurred while performing SigV4 signing
+type SigningError struct {
+ Err error
+}
+
+func (e *SigningError) Error() string {
+ return fmt.Sprintf("failed to sign request: %v", e.Err)
+}
+
+// Unwrap returns the underlying error cause
+func (e *SigningError) Unwrap() error {
+ return e.Err
+}
+
+// unsignedPayload sets the SigV4 request payload hash to unsigned.
+//
+// Will not set the Unsigned Payload magic SHA value, if a SHA has already been
+// stored in the context. (e.g. application pre-computed SHA256 before making
+// API call).
+//
+// This middleware does not check the X-Amz-Content-Sha256 header, if that
+// header is serialized a middleware must translate it into the context.
+type unsignedPayload struct{}
+
+// AddUnsignedPayloadMiddleware adds unsignedPayload to the operation
+// middleware stack
+func AddUnsignedPayloadMiddleware(stack *middleware.Stack) error {
+ return stack.Build.Add(&unsignedPayload{}, middleware.After)
+}
+
+// ID returns the unsignedPayload identifier
+func (m *unsignedPayload) ID() string {
+ return computePayloadHashMiddlewareID
+}
+
+// HandleBuild sets the payload hash to be an unsigned payload
+func (m *unsignedPayload) HandleBuild(
+ ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler,
+) (
+ out middleware.BuildOutput, metadata middleware.Metadata, err error,
+) {
+ // This should not compute the content SHA256 if the value is already
+ // known. (e.g. application pre-computed SHA256 before making API call).
+ // Does not have any tight coupling to the X-Amz-Content-Sha256 header, if
+ // that header is provided a middleware must translate it into the context.
+ contentSHA := GetPayloadHash(ctx)
+ if len(contentSHA) == 0 {
+ contentSHA = v4Internal.UnsignedPayload
+ }
+
+ ctx = SetPayloadHash(ctx, contentSHA)
+ return next.HandleBuild(ctx, in)
+}
+
+// computePayloadSHA256 computes SHA256 payload hash to sign.
+//
+// Will not set the Unsigned Payload magic SHA value, if a SHA has already been
+// stored in the context. (e.g. application pre-computed SHA256 before making
+// API call).
+//
+// This middleware does not check the X-Amz-Content-Sha256 header, if that
+// header is serialized a middleware must translate it into the context.
+type computePayloadSHA256 struct{}
+
+// AddComputePayloadSHA256Middleware adds computePayloadSHA256 to the
+// operation middleware stack
+func AddComputePayloadSHA256Middleware(stack *middleware.Stack) error {
+ return stack.Build.Add(&computePayloadSHA256{}, middleware.After)
+}
+
+// RemoveComputePayloadSHA256Middleware removes computePayloadSHA256 from the
+// operation middleware stack
+func RemoveComputePayloadSHA256Middleware(stack *middleware.Stack) error {
+ _, err := stack.Build.Remove(computePayloadHashMiddlewareID)
+ return err
+}
+
+// ID is the middleware name
+func (m *computePayloadSHA256) ID() string {
+ return computePayloadHashMiddlewareID
+}
+
+// HandleBuild compute the payload hash for the request payload
+func (m *computePayloadSHA256) HandleBuild(
+ ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler,
+) (
+ out middleware.BuildOutput, metadata middleware.Metadata, err error,
+) {
+ req, ok := in.Request.(*smithyHTTP.Request)
+ if !ok {
+ return out, metadata, &HashComputationError{
+ Err: fmt.Errorf("unexpected request middleware type %T", in.Request),
+ }
+ }
+
+ // This should not compute the content SHA256 if the value is already
+ // known. (e.g. application pre-computed SHA256 before making API call)
+ // Does not have any tight coupling to the X-Amz-Content-Sha256 header, if
+ // that header is provided a middleware must translate it into the context.
+ if contentSHA := GetPayloadHash(ctx); len(contentSHA) != 0 {
+ return next.HandleBuild(ctx, in)
+ }
+
+ hash := sha256.New()
+ if stream := req.GetStream(); stream != nil {
+ _, err = io.Copy(hash, stream)
+ if err != nil {
+ return out, metadata, &HashComputationError{
+ Err: fmt.Errorf("failed to compute payload hash, %w", err),
+ }
+ }
+
+ if err := req.RewindStream(); err != nil {
+ return out, metadata, &HashComputationError{
+ Err: fmt.Errorf("failed to seek body to start, %w", err),
+ }
+ }
+ }
+
+ ctx = SetPayloadHash(ctx, hex.EncodeToString(hash.Sum(nil)))
+
+ return next.HandleBuild(ctx, in)
+}
+
+// contentSHA256Header sets the X-Amz-Content-Sha256 header value to
+// the Payload hash stored in the context.
+type contentSHA256Header struct{}
+
+// AddContentSHA256HeaderMiddleware adds ContentSHA256Header to the
+// operation middleware stack
+func AddContentSHA256HeaderMiddleware(stack *middleware.Stack) error {
+ return stack.Build.Insert(&contentSHA256Header{}, computePayloadHashMiddlewareID, middleware.After)
+}
+
+// RemoveContentSHA256HeaderMiddleware removes contentSHA256Header middleware
+// from the operation middleware stack
+func RemoveContentSHA256HeaderMiddleware(stack *middleware.Stack) error {
+ _, err := stack.Build.Remove((*contentSHA256Header)(nil).ID())
+ return err
+}
+
+// ID returns the ContentSHA256HeaderMiddleware identifier
+func (m *contentSHA256Header) ID() string {
+ return "SigV4ContentSHA256Header"
+}
+
+// HandleBuild sets the X-Amz-Content-Sha256 header value to the Payload hash
+// stored in the context.
+func (m *contentSHA256Header) HandleBuild(
+ ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler,
+) (
+ out middleware.BuildOutput, metadata middleware.Metadata, err error,
+) {
+ req, ok := in.Request.(*smithyHTTP.Request)
+ if !ok {
+ return out, metadata, &HashComputationError{Err: fmt.Errorf("unexpected request middleware type %T", in.Request)}
+ }
+
+ req.Header.Set(v4Internal.ContentSHAKey, GetPayloadHash(ctx))
+
+ return next.HandleBuild(ctx, in)
+}
+
+// SignHTTPRequestMiddlewareOptions is the configuration options for the SignHTTPRequestMiddleware middleware.
+type SignHTTPRequestMiddlewareOptions struct {
+ CredentialsProvider aws.CredentialsProvider
+ Signer HTTPSigner
+ LogSigning bool
+}
+
+// SignHTTPRequestMiddleware is a `FinalizeMiddleware` implementation for SigV4 HTTP Signing
+type SignHTTPRequestMiddleware struct {
+ credentialsProvider aws.CredentialsProvider
+ signer HTTPSigner
+ logSigning bool
+}
+
+// NewSignHTTPRequestMiddleware constructs a SignHTTPRequestMiddleware using the given Signer for signing requests
+func NewSignHTTPRequestMiddleware(options SignHTTPRequestMiddlewareOptions) *SignHTTPRequestMiddleware {
+ return &SignHTTPRequestMiddleware{
+ credentialsProvider: options.CredentialsProvider,
+ signer: options.Signer,
+ logSigning: options.LogSigning,
+ }
+}
+
+// ID is the SignHTTPRequestMiddleware identifier
+func (s *SignHTTPRequestMiddleware) ID() string {
+ return "Signing"
+}
+
+// HandleFinalize will take the provided input and sign the request using the SigV4 authentication scheme
+func (s *SignHTTPRequestMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) (
+ out middleware.FinalizeOutput, metadata middleware.Metadata, err error,
+) {
+ if !haveCredentialProvider(s.credentialsProvider) {
+ return next.HandleFinalize(ctx, in)
+ }
+
+ req, ok := in.Request.(*smithyHTTP.Request)
+ if !ok {
+ return out, metadata, &SigningError{Err: fmt.Errorf("unexpected request middleware type %T", in.Request)}
+ }
+
+ signingName, signingRegion := awsmiddleware.GetSigningName(ctx), awsmiddleware.GetSigningRegion(ctx)
+ payloadHash := GetPayloadHash(ctx)
+ if len(payloadHash) == 0 {
+ return out, metadata, &SigningError{Err: fmt.Errorf("computed payload hash missing from context")}
+ }
+
+ credentials, err := s.credentialsProvider.Retrieve(ctx)
+ if err != nil {
+ return out, metadata, &SigningError{Err: fmt.Errorf("failed to retrieve credentials: %w", err)}
+ }
+
+ err = s.signer.SignHTTP(ctx, credentials, req.Request, payloadHash, signingName, signingRegion, sdk.NowTime(),
+ func(o *SignerOptions) {
+ o.Logger = middleware.GetLogger(ctx)
+ o.LogSigning = s.logSigning
+ })
+ if err != nil {
+ return out, metadata, &SigningError{Err: fmt.Errorf("failed to sign http request, %w", err)}
+ }
+
+ return next.HandleFinalize(ctx, in)
+}
+
+func haveCredentialProvider(p aws.CredentialsProvider) bool {
+ if p == nil {
+ return false
+ }
+ switch p.(type) {
+ case aws.AnonymousCredentials,
+ *aws.AnonymousCredentials:
+ return false
+ }
+
+ return true
+}
+
+type payloadHashKey struct{}
+
+// GetPayloadHash retrieves the payload hash to use for signing
+//
+// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues
+// to clear all stack values.
+func GetPayloadHash(ctx context.Context) (v string) {
+ v, _ = middleware.GetStackValue(ctx, payloadHashKey{}).(string)
+ return v
+}
+
+// SetPayloadHash sets the payload hash to be used for signing the request
+//
+// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues
+// to clear all stack values.
+func SetPayloadHash(ctx context.Context, hash string) context.Context {
+ return middleware.WithStackValue(ctx, payloadHashKey{}, hash)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/presign_middleware.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/presign_middleware.go
new file mode 100644
index 000000000..e1a066512
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/presign_middleware.go
@@ -0,0 +1,127 @@
+package v4
+
+import (
+ "context"
+ "fmt"
+ "net/http"
+ "time"
+
+ "github.com/aws/aws-sdk-go-v2/aws"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/internal/sdk"
+ "github.com/aws/smithy-go/middleware"
+ smithyHTTP "github.com/aws/smithy-go/transport/http"
+)
+
+// HTTPPresigner is an interface to a SigV4 signer that can sign create a
+// presigned URL for a HTTP requests.
+type HTTPPresigner interface {
+ PresignHTTP(
+ ctx context.Context, credentials aws.Credentials, r *http.Request,
+ payloadHash string, service string, region string, signingTime time.Time,
+ optFns ...func(*SignerOptions),
+ ) (url string, signedHeader http.Header, err error)
+}
+
+// PresignedHTTPRequest provides the URL and signed headers that are included
+// in the presigned URL.
+type PresignedHTTPRequest struct {
+ URL string
+ Method string
+ SignedHeader http.Header
+}
+
+// PresignHTTPRequestMiddlewareOptions is the options for the PresignHTTPRequestMiddleware middleware.
+type PresignHTTPRequestMiddlewareOptions struct {
+ CredentialsProvider aws.CredentialsProvider
+ Presigner HTTPPresigner
+ LogSigning bool
+}
+
+// PresignHTTPRequestMiddleware provides the Finalize middleware for creating a
+// presigned URL for an HTTP request.
+//
+// Will short circuit the middleware stack and not forward onto the next
+// Finalize handler.
+type PresignHTTPRequestMiddleware struct {
+ credentialsProvider aws.CredentialsProvider
+ presigner HTTPPresigner
+ logSigning bool
+}
+
+// NewPresignHTTPRequestMiddleware returns a new PresignHTTPRequestMiddleware
+// initialized with the presigner.
+func NewPresignHTTPRequestMiddleware(options PresignHTTPRequestMiddlewareOptions) *PresignHTTPRequestMiddleware {
+ return &PresignHTTPRequestMiddleware{
+ credentialsProvider: options.CredentialsProvider,
+ presigner: options.Presigner,
+ logSigning: options.LogSigning,
+ }
+}
+
+// ID provides the middleware ID.
+func (*PresignHTTPRequestMiddleware) ID() string { return "PresignHTTPRequest" }
+
+// HandleFinalize will take the provided input and create a presigned url for
+// the http request using the SigV4 presign authentication scheme.
+//
+// Since the signed request is not a valid HTTP request
+func (s *PresignHTTPRequestMiddleware) HandleFinalize(
+ ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler,
+) (
+ out middleware.FinalizeOutput, metadata middleware.Metadata, err error,
+) {
+ req, ok := in.Request.(*smithyHTTP.Request)
+ if !ok {
+ return out, metadata, &SigningError{
+ Err: fmt.Errorf("unexpected request middleware type %T", in.Request),
+ }
+ }
+
+ httpReq := req.Build(ctx)
+ if !haveCredentialProvider(s.credentialsProvider) {
+ out.Result = &PresignedHTTPRequest{
+ URL: httpReq.URL.String(),
+ Method: httpReq.Method,
+ SignedHeader: http.Header{},
+ }
+
+ return out, metadata, nil
+ }
+
+ signingName := awsmiddleware.GetSigningName(ctx)
+ signingRegion := awsmiddleware.GetSigningRegion(ctx)
+ payloadHash := GetPayloadHash(ctx)
+ if len(payloadHash) == 0 {
+ return out, metadata, &SigningError{
+ Err: fmt.Errorf("computed payload hash missing from context"),
+ }
+ }
+
+ credentials, err := s.credentialsProvider.Retrieve(ctx)
+ if err != nil {
+ return out, metadata, &SigningError{
+ Err: fmt.Errorf("failed to retrieve credentials: %w", err),
+ }
+ }
+
+ u, h, err := s.presigner.PresignHTTP(ctx, credentials,
+ httpReq, payloadHash, signingName, signingRegion, sdk.NowTime(),
+ func(o *SignerOptions) {
+ o.Logger = middleware.GetLogger(ctx)
+ o.LogSigning = s.logSigning
+ })
+ if err != nil {
+ return out, metadata, &SigningError{
+ Err: fmt.Errorf("failed to sign http request, %w", err),
+ }
+ }
+
+ out.Result = &PresignedHTTPRequest{
+ URL: u,
+ Method: httpReq.Method,
+ SignedHeader: h,
+ }
+
+ return out, metadata, nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/v4.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/v4.go
new file mode 100644
index 000000000..61c669843
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/v4.go
@@ -0,0 +1,539 @@
+// Package v4 implements signing for AWS V4 signer
+//
+// Provides request signing for request that need to be signed with
+// AWS V4 Signatures.
+//
+// Standalone Signer
+//
+// Generally using the signer outside of the SDK should not require any additional
+// The signer does this by taking advantage of the URL.EscapedPath method. If your request URI requires
+// additional escaping you many need to use the URL.Opaque to define what the raw URI should be sent
+// to the service as.
+//
+// The signer will first check the URL.Opaque field, and use its value if set.
+// The signer does require the URL.Opaque field to be set in the form of:
+//
+// "///"
+//
+// // e.g.
+// "//example.com/some/path"
+//
+// The leading "//" and hostname are required or the URL.Opaque escaping will
+// not work correctly.
+//
+// If URL.Opaque is not set the signer will fallback to the URL.EscapedPath()
+// method and using the returned value.
+//
+// AWS v4 signature validation requires that the canonical string's URI path
+// element must be the URI escaped form of the HTTP request's path.
+// http://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html
+//
+// The Go HTTP client will perform escaping automatically on the request. Some
+// of these escaping may cause signature validation errors because the HTTP
+// request differs from the URI path or query that the signature was generated.
+// https://golang.org/pkg/net/url/#URL.EscapedPath
+//
+// Because of this, it is recommended that when using the signer outside of the
+// SDK that explicitly escaping the request prior to being signed is preferable,
+// and will help prevent signature validation errors. This can be done by setting
+// the URL.Opaque or URL.RawPath. The SDK will use URL.Opaque first and then
+// call URL.EscapedPath() if Opaque is not set.
+//
+// Test `TestStandaloneSign` provides a complete example of using the signer
+// outside of the SDK and pre-escaping the URI path.
+package v4
+
+import (
+ "context"
+ "crypto/sha256"
+ "encoding/hex"
+ "fmt"
+ "hash"
+ "net/http"
+ "net/textproto"
+ "net/url"
+ "sort"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/aws/aws-sdk-go-v2/aws"
+ v4Internal "github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4"
+ "github.com/aws/smithy-go/encoding/httpbinding"
+ "github.com/aws/smithy-go/logging"
+)
+
+const (
+ signingAlgorithm = "AWS4-HMAC-SHA256"
+ authorizationHeader = "Authorization"
+)
+
+// HTTPSigner is an interface to a SigV4 signer that can sign HTTP requests
+type HTTPSigner interface {
+ SignHTTP(ctx context.Context, credentials aws.Credentials, r *http.Request, payloadHash string, service string, region string, signingTime time.Time, optFns ...func(*SignerOptions)) error
+}
+
+type keyDerivator interface {
+ DeriveKey(credential aws.Credentials, service, region string, signingTime v4Internal.SigningTime) []byte
+}
+
+// SignerOptions is the SigV4 Signer options.
+type SignerOptions struct {
+ // Disables the Signer's moving HTTP header key/value pairs from the HTTP
+ // request header to the request's query string. This is most commonly used
+ // with pre-signed requests preventing headers from being added to the
+ // request's query string.
+ DisableHeaderHoisting bool
+
+ // Disables the automatic escaping of the URI path of the request for the
+ // siganture's canonical string's path. For services that do not need additional
+ // escaping then use this to disable the signer escaping the path.
+ //
+ // S3 is an example of a service that does not need additional escaping.
+ //
+ // http://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html
+ DisableURIPathEscaping bool
+
+ // The logger to send log messages to.
+ Logger logging.Logger
+
+ // Enable logging of signed requests.
+ // This will enable logging of the canonical request, the string to sign, and for presigning the subsequent
+ // presigned URL.
+ LogSigning bool
+}
+
+// Signer applies AWS v4 signing to given request. Use this to sign requests
+// that need to be signed with AWS V4 Signatures.
+type Signer struct {
+ options SignerOptions
+ keyDerivator keyDerivator
+}
+
+// NewSigner returns a new SigV4 Signer
+func NewSigner(optFns ...func(signer *SignerOptions)) *Signer {
+ options := SignerOptions{}
+
+ for _, fn := range optFns {
+ fn(&options)
+ }
+
+ return &Signer{options: options, keyDerivator: v4Internal.NewSigningKeyDeriver()}
+}
+
+type httpSigner struct {
+ Request *http.Request
+ ServiceName string
+ Region string
+ Time v4Internal.SigningTime
+ Credentials aws.Credentials
+ KeyDerivator keyDerivator
+ IsPreSign bool
+
+ PayloadHash string
+
+ DisableHeaderHoisting bool
+ DisableURIPathEscaping bool
+}
+
+func (s *httpSigner) Build() (signedRequest, error) {
+ req := s.Request
+
+ query := req.URL.Query()
+ headers := req.Header
+
+ s.setRequiredSigningFields(headers, query)
+
+ // Sort Each Query Key's Values
+ for key := range query {
+ sort.Strings(query[key])
+ }
+
+ v4Internal.SanitizeHostForHeader(req)
+
+ credentialScope := s.buildCredentialScope()
+ credentialStr := s.Credentials.AccessKeyID + "/" + credentialScope
+ if s.IsPreSign {
+ query.Set(v4Internal.AmzCredentialKey, credentialStr)
+ }
+
+ unsignedHeaders := headers
+ if s.IsPreSign && !s.DisableHeaderHoisting {
+ var urlValues url.Values
+ urlValues, unsignedHeaders = buildQuery(v4Internal.AllowedQueryHoisting, headers)
+ for k := range urlValues {
+ query[k] = urlValues[k]
+ }
+ }
+
+ host := req.URL.Host
+ if len(req.Host) > 0 {
+ host = req.Host
+ }
+
+ signedHeaders, signedHeadersStr, canonicalHeaderStr := s.buildCanonicalHeaders(host, v4Internal.IgnoredHeaders, unsignedHeaders, s.Request.ContentLength)
+
+ if s.IsPreSign {
+ query.Set(v4Internal.AmzSignedHeadersKey, signedHeadersStr)
+ }
+
+ var rawQuery strings.Builder
+ rawQuery.WriteString(strings.Replace(query.Encode(), "+", "%20", -1))
+
+ canonicalURI := v4Internal.GetURIPath(req.URL)
+ if !s.DisableURIPathEscaping {
+ canonicalURI = httpbinding.EscapePath(canonicalURI, false)
+ }
+
+ canonicalString := s.buildCanonicalString(
+ req.Method,
+ canonicalURI,
+ rawQuery.String(),
+ signedHeadersStr,
+ canonicalHeaderStr,
+ )
+
+ strToSign := s.buildStringToSign(credentialScope, canonicalString)
+ signingSignature, err := s.buildSignature(strToSign)
+ if err != nil {
+ return signedRequest{}, err
+ }
+
+ if s.IsPreSign {
+ rawQuery.WriteString("&X-Amz-Signature=")
+ rawQuery.WriteString(signingSignature)
+ } else {
+ headers[authorizationHeader] = append(headers[authorizationHeader][:0], buildAuthorizationHeader(credentialStr, signedHeadersStr, signingSignature))
+ }
+
+ req.URL.RawQuery = rawQuery.String()
+
+ return signedRequest{
+ Request: req,
+ SignedHeaders: signedHeaders,
+ CanonicalString: canonicalString,
+ StringToSign: strToSign,
+ PreSigned: s.IsPreSign,
+ }, nil
+}
+
+func buildAuthorizationHeader(credentialStr, signedHeadersStr, signingSignature string) string {
+ const credential = "Credential="
+ const signedHeaders = "SignedHeaders="
+ const signature = "Signature="
+ const commaSpace = ", "
+
+ var parts strings.Builder
+ parts.Grow(len(signingAlgorithm) + 1 +
+ len(credential) + len(credentialStr) + 2 +
+ len(signedHeaders) + len(signedHeadersStr) + 2 +
+ len(signature) + len(signingSignature),
+ )
+ parts.WriteString(signingAlgorithm)
+ parts.WriteRune(' ')
+ parts.WriteString(credential)
+ parts.WriteString(credentialStr)
+ parts.WriteString(commaSpace)
+ parts.WriteString(signedHeaders)
+ parts.WriteString(signedHeadersStr)
+ parts.WriteString(commaSpace)
+ parts.WriteString(signature)
+ parts.WriteString(signingSignature)
+ return parts.String()
+}
+
+// SignHTTP signs AWS v4 requests with the provided payload hash, service name, region the
+// request is made to, and time the request is signed at. The signTime allows
+// you to specify that a request is signed for the future, and cannot be
+// used until then.
+//
+// The payloadHash is the hex encoded SHA-256 hash of the request payload, and
+// must be provided. Even if the request has no payload (aka body). If the
+// request has no payload you should use the hex encoded SHA-256 of an empty
+// string as the payloadHash value.
+//
+// "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
+//
+// Some services such as Amazon S3 accept alternative values for the payload
+// hash, such as "UNSIGNED-PAYLOAD" for requests where the body will not be
+// included in the request signature.
+//
+// https://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-header-based-auth.html
+//
+// Sign differs from Presign in that it will sign the request using HTTP
+// header values. This type of signing is intended for http.Request values that
+// will not be shared, or are shared in a way the header values on the request
+// will not be lost.
+//
+// The passed in request will be modified in place.
+func (s Signer) SignHTTP(ctx context.Context, credentials aws.Credentials, r *http.Request, payloadHash string, service string, region string, signingTime time.Time, optFns ...func(options *SignerOptions)) error {
+ options := s.options
+
+ for _, fn := range optFns {
+ fn(&options)
+ }
+
+ signer := &httpSigner{
+ Request: r,
+ PayloadHash: payloadHash,
+ ServiceName: service,
+ Region: region,
+ Credentials: credentials,
+ Time: v4Internal.NewSigningTime(signingTime.UTC()),
+ DisableHeaderHoisting: options.DisableHeaderHoisting,
+ DisableURIPathEscaping: options.DisableURIPathEscaping,
+ KeyDerivator: s.keyDerivator,
+ }
+
+ signedRequest, err := signer.Build()
+ if err != nil {
+ return err
+ }
+
+ logSigningInfo(ctx, options, &signedRequest, false)
+
+ return nil
+}
+
+// PresignHTTP signs AWS v4 requests with the payload hash, service name, region
+// the request is made to, and time the request is signed at. The signTime
+// allows you to specify that a request is signed for the future, and cannot
+// be used until then.
+//
+// Returns the signed URL and the map of HTTP headers that were included in the
+// signature or an error if signing the request failed. For presigned requests
+// these headers and their values must be included on the HTTP request when it
+// is made. This is helpful to know what header values need to be shared with
+// the party the presigned request will be distributed to.
+//
+// The payloadHash is the hex encoded SHA-256 hash of the request payload, and
+// must be provided. Even if the request has no payload (aka body). If the
+// request has no payload you should use the hex encoded SHA-256 of an empty
+// string as the payloadHash value.
+//
+// "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
+//
+// Some services such as Amazon S3 accept alternative values for the payload
+// hash, such as "UNSIGNED-PAYLOAD" for requests where the body will not be
+// included in the request signature.
+//
+// https://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-header-based-auth.html
+//
+// PresignHTTP differs from SignHTTP in that it will sign the request using
+// query string instead of header values. This allows you to share the
+// Presigned Request's URL with third parties, or distribute it throughout your
+// system with minimal dependencies.
+//
+// PresignHTTP will not set the expires time of the presigned request
+// automatically. To specify the expire duration for a request add the
+// "X-Amz-Expires" query parameter on the request with the value as the
+// duration in seconds the presigned URL should be considered valid for. This
+// parameter is not used by all AWS services, and is most notable used by
+// Amazon S3 APIs.
+//
+// expires := 20 * time.Minute
+// query := req.URL.Query()
+// query.Set("X-Amz-Expires", strconv.FormatInt(int64(expires/time.Second), 10)
+// req.URL.RawQuery = query.Encode()
+//
+// This method does not modify the provided request.
+func (s *Signer) PresignHTTP(
+ ctx context.Context, credentials aws.Credentials, r *http.Request,
+ payloadHash string, service string, region string, signingTime time.Time,
+ optFns ...func(*SignerOptions),
+) (signedURI string, signedHeaders http.Header, err error) {
+ options := s.options
+
+ for _, fn := range optFns {
+ fn(&options)
+ }
+
+ signer := &httpSigner{
+ Request: r.Clone(r.Context()),
+ PayloadHash: payloadHash,
+ ServiceName: service,
+ Region: region,
+ Credentials: credentials,
+ Time: v4Internal.NewSigningTime(signingTime.UTC()),
+ IsPreSign: true,
+ DisableHeaderHoisting: options.DisableHeaderHoisting,
+ DisableURIPathEscaping: options.DisableURIPathEscaping,
+ KeyDerivator: s.keyDerivator,
+ }
+
+ signedRequest, err := signer.Build()
+ if err != nil {
+ return "", nil, err
+ }
+
+ logSigningInfo(ctx, options, &signedRequest, true)
+
+ signedHeaders = make(http.Header)
+
+ // For the signed headers we canonicalize the header keys in the returned map.
+ // This avoids situations where can standard library double headers like host header. For example the standard
+ // library will set the Host header, even if it is present in lower-case form.
+ for k, v := range signedRequest.SignedHeaders {
+ key := textproto.CanonicalMIMEHeaderKey(k)
+ signedHeaders[key] = append(signedHeaders[key], v...)
+ }
+
+ return signedRequest.Request.URL.String(), signedHeaders, nil
+}
+
+func (s *httpSigner) buildCredentialScope() string {
+ return strings.Join([]string{
+ s.Time.ShortTimeFormat(),
+ s.Region,
+ s.ServiceName,
+ "aws4_request",
+ }, "/")
+}
+
+func buildQuery(r v4Internal.Rule, header http.Header) (url.Values, http.Header) {
+ query := url.Values{}
+ unsignedHeaders := http.Header{}
+ for k, h := range header {
+ if r.IsValid(k) {
+ query[k] = h
+ } else {
+ unsignedHeaders[k] = h
+ }
+ }
+
+ return query, unsignedHeaders
+}
+
+func (s *httpSigner) buildCanonicalHeaders(host string, rule v4Internal.Rule, header http.Header, length int64) (signed http.Header, signedHeaders, canonicalHeadersStr string) {
+ signed = make(http.Header)
+
+ var headers []string
+ const hostHeader = "host"
+ headers = append(headers, hostHeader)
+ signed[hostHeader] = append(signed[hostHeader], host)
+
+ if length > 0 {
+ const contentLengthHeader = "content-length"
+ headers = append(headers, contentLengthHeader)
+ signed[contentLengthHeader] = append(signed[contentLengthHeader], strconv.FormatInt(length, 10))
+ }
+
+ for k, v := range header {
+ if !rule.IsValid(k) {
+ continue // ignored header
+ }
+
+ lowerCaseKey := strings.ToLower(k)
+ if _, ok := signed[lowerCaseKey]; ok {
+ // include additional values
+ signed[lowerCaseKey] = append(signed[lowerCaseKey], v...)
+ continue
+ }
+
+ headers = append(headers, lowerCaseKey)
+ signed[lowerCaseKey] = v
+ }
+ sort.Strings(headers)
+
+ signedHeaders = strings.Join(headers, ";")
+
+ var canonicalHeaders strings.Builder
+ n := len(headers)
+ const colon = ':'
+ for i := 0; i < n; i++ {
+ if headers[i] == hostHeader {
+ canonicalHeaders.WriteString(hostHeader)
+ canonicalHeaders.WriteRune(colon)
+ canonicalHeaders.WriteString(v4Internal.StripExcessSpaces(host))
+ } else {
+ canonicalHeaders.WriteString(headers[i])
+ canonicalHeaders.WriteRune(colon)
+ canonicalHeaders.WriteString(strings.Join(signed[headers[i]], ","))
+ }
+ canonicalHeaders.WriteRune('\n')
+ }
+ canonicalHeadersStr = canonicalHeaders.String()
+
+ return signed, signedHeaders, canonicalHeadersStr
+}
+
+func (s *httpSigner) buildCanonicalString(method, uri, query, signedHeaders, canonicalHeaders string) string {
+ return strings.Join([]string{
+ method,
+ uri,
+ query,
+ canonicalHeaders,
+ signedHeaders,
+ s.PayloadHash,
+ }, "\n")
+}
+
+func (s *httpSigner) buildStringToSign(credentialScope, canonicalRequestString string) string {
+ return strings.Join([]string{
+ signingAlgorithm,
+ s.Time.TimeFormat(),
+ credentialScope,
+ hex.EncodeToString(makeHash(sha256.New(), []byte(canonicalRequestString))),
+ }, "\n")
+}
+
+func makeHash(hash hash.Hash, b []byte) []byte {
+ hash.Reset()
+ hash.Write(b)
+ return hash.Sum(nil)
+}
+
+func (s *httpSigner) buildSignature(strToSign string) (string, error) {
+ key := s.KeyDerivator.DeriveKey(s.Credentials, s.ServiceName, s.Region, s.Time)
+ return hex.EncodeToString(v4Internal.HMACSHA256(key, []byte(strToSign))), nil
+}
+
+func (s *httpSigner) setRequiredSigningFields(headers http.Header, query url.Values) {
+ amzDate := s.Time.TimeFormat()
+
+ if s.IsPreSign {
+ query.Set(v4Internal.AmzAlgorithmKey, signingAlgorithm)
+ if sessionToken := s.Credentials.SessionToken; len(sessionToken) > 0 {
+ query.Set("X-Amz-Security-Token", sessionToken)
+ }
+
+ query.Set(v4Internal.AmzDateKey, amzDate)
+ return
+ }
+
+ headers[v4Internal.AmzDateKey] = append(headers[v4Internal.AmzDateKey][:0], amzDate)
+
+ if len(s.Credentials.SessionToken) > 0 {
+ headers[v4Internal.AmzSecurityTokenKey] = append(headers[v4Internal.AmzSecurityTokenKey][:0], s.Credentials.SessionToken)
+ }
+}
+
+func logSigningInfo(ctx context.Context, options SignerOptions, request *signedRequest, isPresign bool) {
+ if !options.LogSigning {
+ return
+ }
+ signedURLMsg := ""
+ if isPresign {
+ signedURLMsg = fmt.Sprintf(logSignedURLMsg, request.Request.URL.String())
+ }
+ logger := logging.WithContext(ctx, options.Logger)
+ logger.Logf(logging.Debug, logSignInfoMsg, request.CanonicalString, request.StringToSign, signedURLMsg)
+}
+
+type signedRequest struct {
+ Request *http.Request
+ SignedHeaders http.Header
+ CanonicalString string
+ StringToSign string
+ PreSigned bool
+}
+
+const logSignInfoMsg = `Request Signature:
+---[ CANONICAL STRING ]-----------------------------
+%s
+---[ STRING TO SIGN ]--------------------------------
+%s%s
+-----------------------------------------------------`
+const logSignedURLMsg = `
+---[ SIGNED URL ]------------------------------------
+%s`
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/to_ptr.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/to_ptr.go
new file mode 100644
index 000000000..28a193b8a
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/to_ptr.go
@@ -0,0 +1,280 @@
+// Code generated by aws/generate.go DO NOT EDIT.
+
+package aws
+
+import (
+ "github.com/aws/smithy-go/ptr"
+ "time"
+)
+
+// Bool returns a pointer value for the bool value passed in.
+func Bool(v bool) *bool {
+ return ptr.Bool(v)
+}
+
+// BoolSlice returns a slice of bool pointers from the values
+// passed in.
+func BoolSlice(vs []bool) []*bool {
+ return ptr.BoolSlice(vs)
+}
+
+// BoolMap returns a map of bool pointers from the values
+// passed in.
+func BoolMap(vs map[string]bool) map[string]*bool {
+ return ptr.BoolMap(vs)
+}
+
+// Byte returns a pointer value for the byte value passed in.
+func Byte(v byte) *byte {
+ return ptr.Byte(v)
+}
+
+// ByteSlice returns a slice of byte pointers from the values
+// passed in.
+func ByteSlice(vs []byte) []*byte {
+ return ptr.ByteSlice(vs)
+}
+
+// ByteMap returns a map of byte pointers from the values
+// passed in.
+func ByteMap(vs map[string]byte) map[string]*byte {
+ return ptr.ByteMap(vs)
+}
+
+// String returns a pointer value for the string value passed in.
+func String(v string) *string {
+ return ptr.String(v)
+}
+
+// StringSlice returns a slice of string pointers from the values
+// passed in.
+func StringSlice(vs []string) []*string {
+ return ptr.StringSlice(vs)
+}
+
+// StringMap returns a map of string pointers from the values
+// passed in.
+func StringMap(vs map[string]string) map[string]*string {
+ return ptr.StringMap(vs)
+}
+
+// Int returns a pointer value for the int value passed in.
+func Int(v int) *int {
+ return ptr.Int(v)
+}
+
+// IntSlice returns a slice of int pointers from the values
+// passed in.
+func IntSlice(vs []int) []*int {
+ return ptr.IntSlice(vs)
+}
+
+// IntMap returns a map of int pointers from the values
+// passed in.
+func IntMap(vs map[string]int) map[string]*int {
+ return ptr.IntMap(vs)
+}
+
+// Int8 returns a pointer value for the int8 value passed in.
+func Int8(v int8) *int8 {
+ return ptr.Int8(v)
+}
+
+// Int8Slice returns a slice of int8 pointers from the values
+// passed in.
+func Int8Slice(vs []int8) []*int8 {
+ return ptr.Int8Slice(vs)
+}
+
+// Int8Map returns a map of int8 pointers from the values
+// passed in.
+func Int8Map(vs map[string]int8) map[string]*int8 {
+ return ptr.Int8Map(vs)
+}
+
+// Int16 returns a pointer value for the int16 value passed in.
+func Int16(v int16) *int16 {
+ return ptr.Int16(v)
+}
+
+// Int16Slice returns a slice of int16 pointers from the values
+// passed in.
+func Int16Slice(vs []int16) []*int16 {
+ return ptr.Int16Slice(vs)
+}
+
+// Int16Map returns a map of int16 pointers from the values
+// passed in.
+func Int16Map(vs map[string]int16) map[string]*int16 {
+ return ptr.Int16Map(vs)
+}
+
+// Int32 returns a pointer value for the int32 value passed in.
+func Int32(v int32) *int32 {
+ return ptr.Int32(v)
+}
+
+// Int32Slice returns a slice of int32 pointers from the values
+// passed in.
+func Int32Slice(vs []int32) []*int32 {
+ return ptr.Int32Slice(vs)
+}
+
+// Int32Map returns a map of int32 pointers from the values
+// passed in.
+func Int32Map(vs map[string]int32) map[string]*int32 {
+ return ptr.Int32Map(vs)
+}
+
+// Int64 returns a pointer value for the int64 value passed in.
+func Int64(v int64) *int64 {
+ return ptr.Int64(v)
+}
+
+// Int64Slice returns a slice of int64 pointers from the values
+// passed in.
+func Int64Slice(vs []int64) []*int64 {
+ return ptr.Int64Slice(vs)
+}
+
+// Int64Map returns a map of int64 pointers from the values
+// passed in.
+func Int64Map(vs map[string]int64) map[string]*int64 {
+ return ptr.Int64Map(vs)
+}
+
+// Uint returns a pointer value for the uint value passed in.
+func Uint(v uint) *uint {
+ return ptr.Uint(v)
+}
+
+// UintSlice returns a slice of uint pointers from the values
+// passed in.
+func UintSlice(vs []uint) []*uint {
+ return ptr.UintSlice(vs)
+}
+
+// UintMap returns a map of uint pointers from the values
+// passed in.
+func UintMap(vs map[string]uint) map[string]*uint {
+ return ptr.UintMap(vs)
+}
+
+// Uint8 returns a pointer value for the uint8 value passed in.
+func Uint8(v uint8) *uint8 {
+ return ptr.Uint8(v)
+}
+
+// Uint8Slice returns a slice of uint8 pointers from the values
+// passed in.
+func Uint8Slice(vs []uint8) []*uint8 {
+ return ptr.Uint8Slice(vs)
+}
+
+// Uint8Map returns a map of uint8 pointers from the values
+// passed in.
+func Uint8Map(vs map[string]uint8) map[string]*uint8 {
+ return ptr.Uint8Map(vs)
+}
+
+// Uint16 returns a pointer value for the uint16 value passed in.
+func Uint16(v uint16) *uint16 {
+ return ptr.Uint16(v)
+}
+
+// Uint16Slice returns a slice of uint16 pointers from the values
+// passed in.
+func Uint16Slice(vs []uint16) []*uint16 {
+ return ptr.Uint16Slice(vs)
+}
+
+// Uint16Map returns a map of uint16 pointers from the values
+// passed in.
+func Uint16Map(vs map[string]uint16) map[string]*uint16 {
+ return ptr.Uint16Map(vs)
+}
+
+// Uint32 returns a pointer value for the uint32 value passed in.
+func Uint32(v uint32) *uint32 {
+ return ptr.Uint32(v)
+}
+
+// Uint32Slice returns a slice of uint32 pointers from the values
+// passed in.
+func Uint32Slice(vs []uint32) []*uint32 {
+ return ptr.Uint32Slice(vs)
+}
+
+// Uint32Map returns a map of uint32 pointers from the values
+// passed in.
+func Uint32Map(vs map[string]uint32) map[string]*uint32 {
+ return ptr.Uint32Map(vs)
+}
+
+// Uint64 returns a pointer value for the uint64 value passed in.
+func Uint64(v uint64) *uint64 {
+ return ptr.Uint64(v)
+}
+
+// Uint64Slice returns a slice of uint64 pointers from the values
+// passed in.
+func Uint64Slice(vs []uint64) []*uint64 {
+ return ptr.Uint64Slice(vs)
+}
+
+// Uint64Map returns a map of uint64 pointers from the values
+// passed in.
+func Uint64Map(vs map[string]uint64) map[string]*uint64 {
+ return ptr.Uint64Map(vs)
+}
+
+// Float32 returns a pointer value for the float32 value passed in.
+func Float32(v float32) *float32 {
+ return ptr.Float32(v)
+}
+
+// Float32Slice returns a slice of float32 pointers from the values
+// passed in.
+func Float32Slice(vs []float32) []*float32 {
+ return ptr.Float32Slice(vs)
+}
+
+// Float32Map returns a map of float32 pointers from the values
+// passed in.
+func Float32Map(vs map[string]float32) map[string]*float32 {
+ return ptr.Float32Map(vs)
+}
+
+// Float64 returns a pointer value for the float64 value passed in.
+func Float64(v float64) *float64 {
+ return ptr.Float64(v)
+}
+
+// Float64Slice returns a slice of float64 pointers from the values
+// passed in.
+func Float64Slice(vs []float64) []*float64 {
+ return ptr.Float64Slice(vs)
+}
+
+// Float64Map returns a map of float64 pointers from the values
+// passed in.
+func Float64Map(vs map[string]float64) map[string]*float64 {
+ return ptr.Float64Map(vs)
+}
+
+// Time returns a pointer value for the time.Time value passed in.
+func Time(v time.Time) *time.Time {
+ return ptr.Time(v)
+}
+
+// TimeSlice returns a slice of time.Time pointers from the values
+// passed in.
+func TimeSlice(vs []time.Time) []*time.Time {
+ return ptr.TimeSlice(vs)
+}
+
+// TimeMap returns a map of time.Time pointers from the values
+// passed in.
+func TimeMap(vs map[string]time.Time) map[string]*time.Time {
+ return ptr.TimeMap(vs)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/transport/http/client.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/transport/http/client.go
new file mode 100644
index 000000000..b207a5655
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/transport/http/client.go
@@ -0,0 +1,301 @@
+package http
+
+import (
+ "crypto/tls"
+ "net"
+ "net/http"
+ "reflect"
+ "sync"
+ "time"
+)
+
+// Defaults for the HTTPTransportBuilder.
+var (
+ // Default connection pool options
+ DefaultHTTPTransportMaxIdleConns = 100
+ DefaultHTTPTransportMaxIdleConnsPerHost = 10
+
+ // Default connection timeouts
+ DefaultHTTPTransportIdleConnTimeout = 90 * time.Second
+ DefaultHTTPTransportTLSHandleshakeTimeout = 10 * time.Second
+ DefaultHTTPTransportExpectContinueTimeout = 1 * time.Second
+
+ // Default to TLS 1.2 for all HTTPS requests.
+ DefaultHTTPTransportTLSMinVersion uint16 = tls.VersionTLS12
+)
+
+// Timeouts for net.Dialer's network connection.
+var (
+ DefaultDialConnectTimeout = 30 * time.Second
+ DefaultDialKeepAliveTimeout = 30 * time.Second
+)
+
+// BuildableClient provides a HTTPClient implementation with options to
+// create copies of the HTTPClient when additional configuration is provided.
+//
+// The client's methods will not share the http.Transport value between copies
+// of the BuildableClient. Only exported member values of the Transport and
+// optional Dialer will be copied between copies of BuildableClient.
+type BuildableClient struct {
+ transport *http.Transport
+ dialer *net.Dialer
+
+ initOnce sync.Once
+
+ clientTimeout time.Duration
+ client *http.Client
+}
+
+// NewBuildableClient returns an initialized client for invoking HTTP
+// requests.
+func NewBuildableClient() *BuildableClient {
+ return &BuildableClient{}
+}
+
+// Do implements the HTTPClient interface's Do method to invoke a HTTP request,
+// and receive the response. Uses the BuildableClient's current
+// configuration to invoke the http.Request.
+//
+// If connection pooling is enabled (aka HTTP KeepAlive) the client will only
+// share pooled connections with its own instance. Copies of the
+// BuildableClient will have their own connection pools.
+//
+// Redirect (3xx) responses will not be followed, the HTTP response received
+// will returned instead.
+func (b *BuildableClient) Do(req *http.Request) (*http.Response, error) {
+ b.initOnce.Do(b.build)
+
+ return b.client.Do(req)
+}
+
+func (b *BuildableClient) build() {
+ b.client = wrapWithLimitedRedirect(&http.Client{
+ Timeout: b.clientTimeout,
+ Transport: b.GetTransport(),
+ })
+}
+
+func (b *BuildableClient) clone() *BuildableClient {
+ cpy := NewBuildableClient()
+ cpy.transport = b.GetTransport()
+ cpy.dialer = b.GetDialer()
+ cpy.clientTimeout = b.clientTimeout
+
+ return cpy
+}
+
+// WithTransportOptions copies the BuildableClient and returns it with the
+// http.Transport options applied.
+//
+// If a non (*http.Transport) was set as the round tripper, the round tripper
+// will be replaced with a default Transport value before invoking the option
+// functions.
+func (b *BuildableClient) WithTransportOptions(opts ...func(*http.Transport)) *BuildableClient {
+ cpy := b.clone()
+
+ tr := cpy.GetTransport()
+ for _, opt := range opts {
+ opt(tr)
+ }
+ cpy.transport = tr
+
+ return cpy
+}
+
+// WithDialerOptions copies the BuildableClient and returns it with the
+// net.Dialer options applied. Will set the client's http.Transport DialContext
+// member.
+func (b *BuildableClient) WithDialerOptions(opts ...func(*net.Dialer)) *BuildableClient {
+ cpy := b.clone()
+
+ dialer := cpy.GetDialer()
+ for _, opt := range opts {
+ opt(dialer)
+ }
+ cpy.dialer = dialer
+
+ tr := cpy.GetTransport()
+ tr.DialContext = cpy.dialer.DialContext
+ cpy.transport = tr
+
+ return cpy
+}
+
+// WithTimeout Sets the timeout used by the client for all requests.
+func (b *BuildableClient) WithTimeout(timeout time.Duration) *BuildableClient {
+ cpy := b.clone()
+ cpy.clientTimeout = timeout
+ return cpy
+}
+
+// GetTransport returns a copy of the client's HTTP Transport.
+func (b *BuildableClient) GetTransport() *http.Transport {
+ var tr *http.Transport
+ if b.transport != nil {
+ tr = b.transport.Clone()
+ } else {
+ tr = defaultHTTPTransport()
+ }
+
+ return tr
+}
+
+// GetDialer returns a copy of the client's network dialer.
+func (b *BuildableClient) GetDialer() *net.Dialer {
+ var dialer *net.Dialer
+ if b.dialer != nil {
+ dialer = shallowCopyStruct(b.dialer).(*net.Dialer)
+ } else {
+ dialer = defaultDialer()
+ }
+
+ return dialer
+}
+
+// GetTimeout returns a copy of the client's timeout to cancel requests with.
+func (b *BuildableClient) GetTimeout() time.Duration {
+ return b.clientTimeout
+}
+
+func defaultDialer() *net.Dialer {
+ return &net.Dialer{
+ Timeout: DefaultDialConnectTimeout,
+ KeepAlive: DefaultDialKeepAliveTimeout,
+ DualStack: true,
+ }
+}
+
+func defaultHTTPTransport() *http.Transport {
+ dialer := defaultDialer()
+
+ tr := &http.Transport{
+ Proxy: http.ProxyFromEnvironment,
+ DialContext: dialer.DialContext,
+ TLSHandshakeTimeout: DefaultHTTPTransportTLSHandleshakeTimeout,
+ MaxIdleConns: DefaultHTTPTransportMaxIdleConns,
+ MaxIdleConnsPerHost: DefaultHTTPTransportMaxIdleConnsPerHost,
+ IdleConnTimeout: DefaultHTTPTransportIdleConnTimeout,
+ ExpectContinueTimeout: DefaultHTTPTransportExpectContinueTimeout,
+ ForceAttemptHTTP2: true,
+ TLSClientConfig: &tls.Config{
+ MinVersion: DefaultHTTPTransportTLSMinVersion,
+ },
+ }
+
+ return tr
+}
+
+// shallowCopyStruct creates a shallow copy of the passed in source struct, and
+// returns that copy of the same struct type.
+func shallowCopyStruct(src interface{}) interface{} {
+ srcVal := reflect.ValueOf(src)
+ srcValType := srcVal.Type()
+
+ var returnAsPtr bool
+ if srcValType.Kind() == reflect.Ptr {
+ srcVal = srcVal.Elem()
+ srcValType = srcValType.Elem()
+ returnAsPtr = true
+ }
+ dstVal := reflect.New(srcValType).Elem()
+
+ for i := 0; i < srcValType.NumField(); i++ {
+ ft := srcValType.Field(i)
+ if len(ft.PkgPath) != 0 {
+ // unexported fields have a PkgPath
+ continue
+ }
+
+ dstVal.Field(i).Set(srcVal.Field(i))
+ }
+
+ if returnAsPtr {
+ dstVal = dstVal.Addr()
+ }
+
+ return dstVal.Interface()
+}
+
+// wrapWithLimitedRedirect updates the Client's Transport and CheckRedirect to
+// not follow any redirect other than 307 and 308. No other redirect will be
+// followed.
+//
+// If the client does not have a Transport defined will use a new SDK default
+// http.Transport configuration.
+func wrapWithLimitedRedirect(c *http.Client) *http.Client {
+ tr := c.Transport
+ if tr == nil {
+ tr = defaultHTTPTransport()
+ }
+
+ cc := *c
+ cc.CheckRedirect = limitedRedirect
+ cc.Transport = suppressBadHTTPRedirectTransport{
+ tr: tr,
+ }
+
+ return &cc
+}
+
+// limitedRedirect is a CheckRedirect that prevents the client from following
+// any non 307/308 HTTP status code redirects.
+//
+// The 307 and 308 redirects are allowed because the client must use the
+// original HTTP method for the redirected to location. Whereas 301 and 302
+// allow the client to switch to GET for the redirect.
+//
+// Suppresses all redirect requests with a URL of badHTTPRedirectLocation.
+func limitedRedirect(r *http.Request, via []*http.Request) error {
+ // Request.Response, in CheckRedirect is the response that is triggering
+ // the redirect.
+ resp := r.Response
+ if r.URL.String() == badHTTPRedirectLocation {
+ resp.Header.Del(badHTTPRedirectLocation)
+ return http.ErrUseLastResponse
+ }
+
+ switch resp.StatusCode {
+ case 307, 308:
+ // Only allow 307 and 308 redirects as they preserve the method.
+ return nil
+ }
+
+ return http.ErrUseLastResponse
+}
+
+// suppressBadHTTPRedirectTransport provides an http.RoundTripper
+// implementation that wraps another http.RoundTripper to prevent HTTP client
+// receiving 301 and 302 HTTP responses redirects without the required location
+// header.
+//
+// Clients using this utility must have a CheckRedirect, e.g. limitedRedirect,
+// that check for responses with having a URL of baseHTTPRedirectLocation, and
+// suppress the redirect.
+type suppressBadHTTPRedirectTransport struct {
+ tr http.RoundTripper
+}
+
+const badHTTPRedirectLocation = `https://amazonaws.com/badhttpredirectlocation`
+
+// RoundTrip backfills a stub location when a 301/302 response is received
+// without a location. This stub location is used by limitedRedirect to prevent
+// the HTTP client from failing attempting to use follow a redirect without a
+// location value.
+func (t suppressBadHTTPRedirectTransport) RoundTrip(r *http.Request) (*http.Response, error) {
+ resp, err := t.tr.RoundTrip(r)
+ if err != nil {
+ return resp, err
+ }
+
+ // S3 is the only known service to return 301 without location header.
+ // The Go standard library HTTP client will return an opaque error if it
+ // tries to follow a 301/302 response missing the location header.
+ switch resp.StatusCode {
+ case 301, 302:
+ if v := resp.Header.Get("Location"); len(v) == 0 {
+ resp.Header.Set("Location", badHTTPRedirectLocation)
+ }
+ }
+
+ return resp, err
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/transport/http/content_type.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/transport/http/content_type.go
new file mode 100644
index 000000000..556f54a7f
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/transport/http/content_type.go
@@ -0,0 +1,42 @@
+package http
+
+import (
+ "context"
+ "fmt"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// removeContentTypeHeader is a build middleware that removes
+// content type header if content-length header is unset or
+// is set to zero,
+type removeContentTypeHeader struct {
+}
+
+// ID the name of the middleware.
+func (m *removeContentTypeHeader) ID() string {
+ return "RemoveContentTypeHeader"
+}
+
+// HandleBuild adds or appends the constructed user agent to the request.
+func (m *removeContentTypeHeader) HandleBuild(ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler) (
+ out middleware.BuildOutput, metadata middleware.Metadata, err error,
+) {
+ req, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown transport type %T", in)
+ }
+
+ // remove contentTypeHeader when content-length is zero
+ if req.ContentLength == 0 {
+ req.Header.Del("content-type")
+ }
+
+ return next.HandleBuild(ctx, in)
+}
+
+// RemoveContentTypeHeader removes content-type header if
+// content length is unset or equal to zero.
+func RemoveContentTypeHeader(stack *middleware.Stack) error {
+ return stack.Build.Add(&removeContentTypeHeader{}, middleware.After)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/transport/http/response_error.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/transport/http/response_error.go
new file mode 100644
index 000000000..44651c990
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/transport/http/response_error.go
@@ -0,0 +1,33 @@
+package http
+
+import (
+ "errors"
+ "fmt"
+
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// ResponseError provides the HTTP centric error type wrapping the underlying error
+// with the HTTP response value and the deserialized RequestID.
+type ResponseError struct {
+ *smithyhttp.ResponseError
+
+ // RequestID associated with response error
+ RequestID string
+}
+
+// ServiceRequestID returns the request id associated with Response Error
+func (e *ResponseError) ServiceRequestID() string { return e.RequestID }
+
+// Error returns the formatted error
+func (e *ResponseError) Error() string {
+ return fmt.Sprintf(
+ "https response error StatusCode: %d, RequestID: %s, %v",
+ e.Response.StatusCode, e.RequestID, e.Err)
+}
+
+// As populates target and returns true if the type of target is a error type that
+// the ResponseError embeds, (e.g.AWS HTTP ResponseError)
+func (e *ResponseError) As(target interface{}) bool {
+ return errors.As(e.ResponseError, target)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/transport/http/response_error_middleware.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/transport/http/response_error_middleware.go
new file mode 100644
index 000000000..8fd14cecd
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/transport/http/response_error_middleware.go
@@ -0,0 +1,54 @@
+package http
+
+import (
+ "context"
+
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// AddResponseErrorMiddleware adds response error wrapper middleware
+func AddResponseErrorMiddleware(stack *middleware.Stack) error {
+ // add error wrapper middleware before request id retriever middleware so that it can wrap the error response
+ // returned by operation deserializers
+ return stack.Deserialize.Insert(&responseErrorWrapper{}, "RequestIDRetriever", middleware.Before)
+}
+
+type responseErrorWrapper struct {
+}
+
+// ID returns the middleware identifier
+func (m *responseErrorWrapper) ID() string {
+ return "ResponseErrorWrapper"
+}
+
+func (m *responseErrorWrapper) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err == nil {
+ // Nothing to do when there is no error.
+ return out, metadata, err
+ }
+
+ resp, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ // No raw response to wrap with.
+ return out, metadata, err
+ }
+
+ // look for request id in metadata
+ reqID, _ := awsmiddleware.GetRequestIDMetadata(metadata)
+
+ // Wrap the returned smithy error with the request id retrieved from the metadata
+ err = &ResponseError{
+ ResponseError: &smithyhttp.ResponseError{
+ Response: resp,
+ Err: err,
+ },
+ RequestID: reqID,
+ }
+
+ return out, metadata, err
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/transport/http/timeout_read_closer.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/transport/http/timeout_read_closer.go
new file mode 100644
index 000000000..993929bd9
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/transport/http/timeout_read_closer.go
@@ -0,0 +1,104 @@
+package http
+
+import (
+ "context"
+ "fmt"
+ "io"
+ "time"
+
+ "github.com/aws/smithy-go"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+type readResult struct {
+ n int
+ err error
+}
+
+// ResponseTimeoutError is an error when the reads from the response are
+// delayed longer than the timeout the read was configured for.
+type ResponseTimeoutError struct {
+ TimeoutDur time.Duration
+}
+
+// Timeout returns that the error is was caused by a timeout, and can be
+// retried.
+func (*ResponseTimeoutError) Timeout() bool { return true }
+
+func (e *ResponseTimeoutError) Error() string {
+ return fmt.Sprintf("read on body reach timeout limit, %v", e.TimeoutDur)
+}
+
+// timeoutReadCloser will handle body reads that take too long.
+// We will return a ErrReadTimeout error if a timeout occurs.
+type timeoutReadCloser struct {
+ reader io.ReadCloser
+ duration time.Duration
+}
+
+// Read will spin off a goroutine to call the reader's Read method. We will
+// select on the timer's channel or the read's channel. Whoever completes first
+// will be returned.
+func (r *timeoutReadCloser) Read(b []byte) (int, error) {
+ timer := time.NewTimer(r.duration)
+ c := make(chan readResult, 1)
+
+ go func() {
+ n, err := r.reader.Read(b)
+ timer.Stop()
+ c <- readResult{n: n, err: err}
+ }()
+
+ select {
+ case data := <-c:
+ return data.n, data.err
+ case <-timer.C:
+ return 0, &ResponseTimeoutError{TimeoutDur: r.duration}
+ }
+}
+
+func (r *timeoutReadCloser) Close() error {
+ return r.reader.Close()
+}
+
+// AddResponseReadTimeoutMiddleware adds a middleware to the stack that wraps the
+// response body so that a read that takes too long will return an error.
+func AddResponseReadTimeoutMiddleware(stack *middleware.Stack, duration time.Duration) error {
+ return stack.Deserialize.Add(&readTimeout{duration: duration}, middleware.After)
+}
+
+// readTimeout wraps the response body with a timeoutReadCloser
+type readTimeout struct {
+ duration time.Duration
+}
+
+// ID returns the id of the middleware
+func (*readTimeout) ID() string {
+ return "ReadResponseTimeout"
+}
+
+// HandleDeserialize implements the DeserializeMiddleware interface
+func (m *readTimeout) HandleDeserialize(
+ ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler,
+) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ response.Body = &timeoutReadCloser{
+ reader: response.Body,
+ duration: m.duration,
+ }
+ out.RawResponse = response
+
+ return out, metadata, err
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/types.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/types.go
new file mode 100644
index 000000000..cc3ae8114
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/types.go
@@ -0,0 +1,42 @@
+package aws
+
+import (
+ "fmt"
+)
+
+// Ternary is an enum allowing an unknown or none state in addition to a bool's
+// true and false.
+type Ternary int
+
+func (t Ternary) String() string {
+ switch t {
+ case UnknownTernary:
+ return "unknown"
+ case FalseTernary:
+ return "false"
+ case TrueTernary:
+ return "true"
+ default:
+ return fmt.Sprintf("unknown value, %d", int(t))
+ }
+}
+
+// Bool returns true if the value is TrueTernary, false otherwise.
+func (t Ternary) Bool() bool {
+ return t == TrueTernary
+}
+
+// Enumerations for the values of the Ternary type.
+const (
+ UnknownTernary Ternary = iota
+ FalseTernary
+ TrueTernary
+)
+
+// BoolTernary returns a true or false Ternary value for the bool provided.
+func BoolTernary(v bool) Ternary {
+ if v {
+ return TrueTernary
+ }
+ return FalseTernary
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/version.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/version.go
new file mode 100644
index 000000000..b7ba41a53
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/version.go
@@ -0,0 +1,8 @@
+// Package aws provides core functionality for making requests to AWS services.
+package aws
+
+// SDKName is the name of this AWS SDK
+const SDKName = "aws-sdk-go-v2"
+
+// SDKVersion is the version of this SDK
+const SDKVersion = "1.2.1"
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/LICENSE.txt b/vendor/github.com/aws/aws-sdk-go-v2/config/LICENSE.txt
new file mode 100644
index 000000000..d64569567
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/config/LICENSE.txt
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/config.go b/vendor/github.com/aws/aws-sdk-go-v2/config/config.go
new file mode 100644
index 000000000..fa2df2c82
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/config/config.go
@@ -0,0 +1,190 @@
+package config
+
+import (
+ "context"
+ "github.com/aws/aws-sdk-go-v2/aws"
+)
+
+// defaultLoaders are a slice of functions that will read external configuration
+// sources for configuration values. These values are read by the AWSConfigResolvers
+// using interfaces to extract specific information from the external configuration.
+var defaultLoaders = []loader{
+ loadEnvConfig,
+ loadSharedConfigIgnoreNotExist,
+}
+
+// defaultAWSConfigResolvers are a slice of functions that will resolve external
+// configuration values into AWS configuration values.
+//
+// This will setup the AWS configuration's Region,
+var defaultAWSConfigResolvers = []awsConfigResolver{
+ // Resolves the default configuration the SDK's aws.Config will be
+ // initialized with.
+ resolveDefaultAWSConfig,
+
+ // Sets the logger to be used. Could be user provided logger, and client
+ // logging mode.
+ resolveLogger,
+ resolveClientLogMode,
+
+ // Sets the HTTP client and configuration to use for making requests using
+ // the HTTP transport.
+ resolveHTTPClient,
+ resolveCustomCABundle,
+
+ // Sets the endpoint resolving behavior the API Clients will use for making
+ // requests to. Clients default to their own clients this allows overrides
+ // to be specified.
+ resolveEndpointResolver,
+
+ // Sets the retry behavior API clients will use within their retry attempt
+ // middleware. Defaults to unset, allowing API clients to define their own
+ // retry behavior.
+ resolveRetryer,
+
+ // Sets the region the API Clients should use for making requests to.
+ resolveRegion,
+ resolveEC2IMDSRegion,
+ resolveDefaultRegion,
+
+ // Sets the additional set of middleware stack mutators that will custom
+ // API client request pipeline middleware.
+ resolveAPIOptions,
+
+ // Sets the resolved credentials the API clients will use for
+ // authentication. Provides the SDK's default credential chain.
+ //
+ // Should probably be the last step in the resolve chain to ensure that all
+ // other configurations are resolved first in case downstream credentials
+ // implementations depend on or can be configured with earlier resolved
+ // configuration options.
+ resolveCredentials,
+}
+
+// A Config represents a generic configuration value or set of values. This type
+// will be used by the AWSConfigResolvers to extract
+//
+// General the Config type will use type assertion against the Provider interfaces
+// to extract specific data from the Config.
+type Config interface{}
+
+// A loader is used to load external configuration data and returns it as
+// a generic Config type.
+//
+// The loader should return an error if it fails to load the external configuration
+// or the configuration data is malformed, or required components missing.
+type loader func(context.Context, configs) (Config, error)
+
+// An awsConfigResolver will extract configuration data from the configs slice
+// using the provider interfaces to extract specific functionality. The extracted
+// configuration values will be written to the AWS Config value.
+//
+// The resolver should return an error if it it fails to extract the data, the
+// data is malformed, or incomplete.
+type awsConfigResolver func(ctx context.Context, cfg *aws.Config, configs configs) error
+
+// configs is a slice of Config values. These values will be used by the
+// AWSConfigResolvers to extract external configuration values to populate the
+// AWS Config type.
+//
+// Use AppendFromLoaders to add additional external Config values that are
+// loaded from external sources.
+//
+// Use ResolveAWSConfig after external Config values have been added or loaded
+// to extract the loaded configuration values into the AWS Config.
+type configs []Config
+
+// AppendFromLoaders iterates over the slice of loaders passed in calling each
+// loader function in order. The external config value returned by the loader
+// will be added to the returned configs slice.
+//
+// If a loader returns an error this method will stop iterating and return
+// that error.
+func (cs configs) AppendFromLoaders(ctx context.Context, loaders []loader) (configs, error) {
+ for _, fn := range loaders {
+ cfg, err := fn(ctx, cs)
+ if err != nil {
+ return nil, err
+ }
+
+ cs = append(cs, cfg)
+ }
+
+ return cs, nil
+}
+
+// ResolveAWSConfig returns a AWS configuration populated with values by calling
+// the resolvers slice passed in. Each resolver is called in order. Any resolver
+// may overwrite the AWS Configuration value of a previous resolver.
+//
+// If an resolver returns an error this method will return that error, and stop
+// iterating over the resolvers.
+func (cs configs) ResolveAWSConfig(ctx context.Context, resolvers []awsConfigResolver) (aws.Config, error) {
+ var cfg aws.Config
+
+ for _, fn := range resolvers {
+ if err := fn(ctx, &cfg, cs); err != nil {
+ // TODO provide better error?
+ return aws.Config{}, err
+ }
+ }
+
+ var sources []interface{}
+ for _, s := range cs {
+ sources = append(sources, s)
+ }
+ cfg.ConfigSources = sources
+
+ return cfg, nil
+}
+
+// ResolveConfig calls the provide function passing slice of configuration sources.
+// This implements the aws.ConfigResolver interface.
+func (cs configs) ResolveConfig(f func(configs []interface{}) error) error {
+ var cfgs []interface{}
+ for i := range cs {
+ cfgs = append(cfgs, cs[i])
+ }
+ return f(cfgs)
+}
+
+// LoadDefaultConfig reads the SDK's default external configurations, and
+// populates an AWS Config with the values from the external configurations.
+//
+// An optional variadic set of additional Config values can be provided as input
+// that will be prepended to the configs slice. Use this to add custom configuration.
+// The custom configurations must satisfy the respective providers for their data
+// or the custom data will be ignored by the resolvers and config loaders.
+//
+// cfg, err := config.LoadDefaultConfig( context.TODO(),
+// WithSharedConfigProfile("test-profile"),
+// )
+// if err != nil {
+// panic(fmt.Sprintf("failed loading config, %v", err))
+// }
+//
+//
+// The default configuration sources are:
+// * Environment Variables
+// * Shared Configuration and Shared Credentials files.
+func LoadDefaultConfig(ctx context.Context, optFns ...func(*LoadOptions) error) (cfg aws.Config, err error) {
+ var options LoadOptions
+ for _, optFn := range optFns {
+ optFn(&options)
+ }
+
+ // assign Load Options to configs
+ var cfgCpy = configs{options}
+
+ cfgCpy, err = cfgCpy.AppendFromLoaders(ctx, defaultLoaders)
+ if err != nil {
+ return aws.Config{}, err
+ }
+
+ cfg, err = cfgCpy.ResolveAWSConfig(ctx, defaultAWSConfigResolvers)
+ if err != nil {
+ return aws.Config{}, err
+ }
+
+ return cfg, nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/config/doc.go
new file mode 100644
index 000000000..31648ffb5
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/config/doc.go
@@ -0,0 +1,20 @@
+// Package config provides utilities for loading configuration from multiple
+// sources that can be used to configure the SDK's API clients, and utilities.
+//
+// The config package will load configuration from environment variables, AWS
+// shared configuration file (~/.aws/config), and AWS shared credentials file
+// (~/.aws/credentials).
+//
+// Use the LoadDefaultConfig to load configuration from all the SDK's supported
+// sources, and resolve credentials using the SDK's default credential chain.
+//
+// LoadDefaultConfig allows for a variadic list of additional Config sources that can
+// provide one or more configuration values which can be used to programmatically control the resolution
+// of a specific value, or allow for broader range of additional configuration sources not supported by the SDK.
+// A Config source implements one or more provider interfaces defined in this package. Config sources passed in will
+// take precedence over the default environment and shared config sources used by the SDK. If one or more Config sources
+// implement the same provider interface, priority will be handled by the order in which the sources were passed in.
+//
+// A number of helpers (prefixed by ``With``) are provided in this package that implement their respective provider
+// interface. These helpers should be used for overriding configuration programmatically at runtime.
+package config
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/env_config.go b/vendor/github.com/aws/aws-sdk-go-v2/config/env_config.go
new file mode 100644
index 000000000..85040b45d
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/config/env_config.go
@@ -0,0 +1,341 @@
+package config
+
+import (
+ "bytes"
+ "context"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+ "strings"
+
+ "github.com/aws/aws-sdk-go-v2/aws"
+)
+
+// CredentialsSourceName provides a name of the provider when config is
+// loaded from environment.
+const CredentialsSourceName = "EnvConfigCredentials"
+
+// Environment variables that will be read for configuration values.
+const (
+ awsAccessKeyIDEnvVar = "AWS_ACCESS_KEY_ID"
+ awsAccessKeyEnvVar = "AWS_ACCESS_KEY"
+
+ awsSecretAccessKeyEnvVar = "AWS_SECRET_ACCESS_KEY"
+ awsSecretKeyEnvVar = "AWS_SECRET_KEY"
+
+ awsSessionTokenEnvVar = "AWS_SESSION_TOKEN"
+
+ awsContainerCredentialsEndpointEnvVar = "AWS_CONTAINER_CREDENTIALS_FULL_URI"
+ awsContainerCredentialsRelativePathEnvVar = "AWS_CONTAINER_CREDENTIALS_RELATIVE_URI"
+ awsContainerPProviderAuthorizationEnvVar = "AWS_CONTAINER_AUTHORIZATION_TOKEN"
+
+ awsRegionEnvVar = "AWS_REGION"
+ awsDefaultRegionEnvVar = "AWS_DEFAULT_REGION"
+
+ awsProfileEnvVar = "AWS_PROFILE"
+ awsDefaultProfileEnvVar = "AWS_DEFAULT_PROFILE"
+
+ awsSharedCredentialsFileEnvVar = "AWS_SHARED_CREDENTIALS_FILE"
+
+ awsConfigFileEnvVar = "AWS_CONFIG_FILE"
+
+ awsCustomCABundleEnvVar = "AWS_CA_BUNDLE"
+
+ awsWebIdentityTokenFilePathEnvKey = "AWS_WEB_IDENTITY_TOKEN_FILE"
+
+ awsRoleARNEnvKey = "AWS_ROLE_ARN"
+ awsRoleSessionNameEnvKey = "AWS_ROLE_SESSION_NAME"
+
+ awsEnableEndpointDiscoveryEnvKey = "AWS_ENABLE_ENDPOINT_DISCOVERY"
+
+ awsS3UseARNRegionEnvVar = "AWS_S3_USE_ARN_REGION"
+)
+
+var (
+ credAccessEnvKeys = []string{
+ awsAccessKeyIDEnvVar,
+ awsAccessKeyEnvVar,
+ }
+ credSecretEnvKeys = []string{
+ awsSecretAccessKeyEnvVar,
+ awsSecretKeyEnvVar,
+ }
+ regionEnvKeys = []string{
+ awsRegionEnvVar,
+ awsDefaultRegionEnvVar,
+ }
+ profileEnvKeys = []string{
+ awsProfileEnvVar,
+ awsDefaultProfileEnvVar,
+ }
+)
+
+// EnvConfig is a collection of environment values the SDK will read
+// setup config from. All environment values are optional. But some values
+// such as credentials require multiple values to be complete or the values
+// will be ignored.
+type EnvConfig struct {
+ // Environment configuration values. If set both Access Key ID and Secret Access
+ // Key must be provided. Session Token and optionally also be provided, but is
+ // not required.
+ //
+ // # Access Key ID
+ // AWS_ACCESS_KEY_ID=AKID
+ // AWS_ACCESS_KEY=AKID # only read if AWS_ACCESS_KEY_ID is not set.
+ //
+ // # Secret Access Key
+ // AWS_SECRET_ACCESS_KEY=SECRET
+ // AWS_SECRET_KEY=SECRET # only read if AWS_SECRET_ACCESS_KEY is not set.
+ //
+ // # Session Token
+ // AWS_SESSION_TOKEN=TOKEN
+ Credentials aws.Credentials
+
+ // ContainerCredentialsEndpoint value is the HTTP enabled endpoint to retrieve credentials
+ // using the endpointcreds.Provider
+ ContainerCredentialsEndpoint string
+
+ // ContainerCredentialsRelativePath is the relative URI path that will be used when attempting to retrieve
+ // credentials from the container endpoint.
+ ContainerCredentialsRelativePath string
+
+ // ContainerAuthorizationToken is the authorization token that will be included in the HTTP Authorization
+ // header when attempting to retrieve credentials from the container credentials endpoint.
+ ContainerAuthorizationToken string
+
+ // Region value will instruct the SDK where to make service API requests to. If is
+ // not provided in the environment the region must be provided before a service
+ // client request is made.
+ //
+ // AWS_REGION=us-west-2
+ // AWS_DEFAULT_REGION=us-west-2
+ Region string
+
+ // Profile name the SDK should load use when loading shared configuration from the
+ // shared configuration files. If not provided "default" will be used as the
+ // profile name.
+ //
+ // AWS_PROFILE=my_profile
+ // AWS_DEFAULT_PROFILE=my_profile
+ SharedConfigProfile string
+
+ // Shared credentials file path can be set to instruct the SDK to use an alternate
+ // file for the shared credentials. If not set the file will be loaded from
+ // $HOME/.aws/credentials on Linux/Unix based systems, and
+ // %USERPROFILE%\.aws\credentials on Windows.
+ //
+ // AWS_SHARED_CREDENTIALS_FILE=$HOME/my_shared_credentials
+ SharedCredentialsFile string
+
+ // Shared config file path can be set to instruct the SDK to use an alternate
+ // file for the shared config. If not set the file will be loaded from
+ // $HOME/.aws/config on Linux/Unix based systems, and
+ // %USERPROFILE%\.aws\config on Windows.
+ //
+ // AWS_CONFIG_FILE=$HOME/my_shared_config
+ SharedConfigFile string
+
+ // Sets the path to a custom Credentials Authority (CA) Bundle PEM file
+ // that the SDK will use instead of the system's root CA bundle.
+ // Only use this if you want to configure the SDK to use a custom set
+ // of CAs.
+ //
+ // Enabling this option will attempt to merge the Transport
+ // into the SDK's HTTP client. If the client's Transport is
+ // not a http.Transport an error will be returned. If the
+ // Transport's TLS config is set this option will cause the
+ // SDK to overwrite the Transport's TLS config's RootCAs value.
+ //
+ // Setting a custom HTTPClient in the aws.Config options will override this setting.
+ // To use this option and custom HTTP client, the HTTP client needs to be provided
+ // when creating the config. Not the service client.
+ //
+ // AWS_CA_BUNDLE=$HOME/my_custom_ca_bundle
+ CustomCABundle string
+
+ // Enables endpoint discovery via environment variables.
+ //
+ // AWS_ENABLE_ENDPOINT_DISCOVERY=true
+ EnableEndpointDiscovery *bool
+
+ // Specifies the WebIdentity token the SDK should use to assume a role
+ // with.
+ //
+ // AWS_WEB_IDENTITY_TOKEN_FILE=file_path
+ WebIdentityTokenFilePath string
+
+ // Specifies the IAM role arn to use when assuming an role.
+ //
+ // AWS_ROLE_ARN=role_arn
+ RoleARN string
+
+ // Specifies the IAM role session name to use when assuming a role.
+ //
+ // AWS_ROLE_SESSION_NAME=session_name
+ RoleSessionName string
+
+ // Specifies if the S3 service should allow ARNs to direct the region
+ // the client's requests are sent to.
+ //
+ // AWS_S3_USE_ARN_REGION=true
+ S3UseARNRegion *bool
+}
+
+// loadEnvConfig reads configuration values from the OS's environment variables.
+// Returning the a Config typed EnvConfig to satisfy the ConfigLoader func type.
+func loadEnvConfig(ctx context.Context, cfgs configs) (Config, error) {
+ return NewEnvConfig()
+}
+
+// NewEnvConfig retrieves the SDK's environment configuration.
+// See `EnvConfig` for the values that will be retrieved.
+func NewEnvConfig() (EnvConfig, error) {
+ var cfg EnvConfig
+
+ creds := aws.Credentials{
+ Source: CredentialsSourceName,
+ }
+ setStringFromEnvVal(&creds.AccessKeyID, credAccessEnvKeys)
+ setStringFromEnvVal(&creds.SecretAccessKey, credSecretEnvKeys)
+ if creds.HasKeys() {
+ creds.SessionToken = os.Getenv(awsSessionTokenEnvVar)
+ cfg.Credentials = creds
+ }
+
+ cfg.ContainerCredentialsEndpoint = os.Getenv(awsContainerCredentialsEndpointEnvVar)
+ cfg.ContainerCredentialsRelativePath = os.Getenv(awsContainerCredentialsRelativePathEnvVar)
+ cfg.ContainerAuthorizationToken = os.Getenv(awsContainerPProviderAuthorizationEnvVar)
+
+ setStringFromEnvVal(&cfg.Region, regionEnvKeys)
+ setStringFromEnvVal(&cfg.SharedConfigProfile, profileEnvKeys)
+
+ cfg.SharedCredentialsFile = os.Getenv(awsSharedCredentialsFileEnvVar)
+ cfg.SharedConfigFile = os.Getenv(awsConfigFileEnvVar)
+
+ cfg.CustomCABundle = os.Getenv(awsCustomCABundleEnvVar)
+
+ cfg.WebIdentityTokenFilePath = os.Getenv(awsWebIdentityTokenFilePathEnvKey)
+
+ cfg.RoleARN = os.Getenv(awsRoleARNEnvKey)
+ cfg.RoleSessionName = os.Getenv(awsRoleSessionNameEnvKey)
+
+ if err := setBoolPtrFromEnvVal(&cfg.EnableEndpointDiscovery, []string{awsEnableEndpointDiscoveryEnvKey}); err != nil {
+ return cfg, err
+ }
+
+ if err := setBoolPtrFromEnvVal(&cfg.S3UseARNRegion, []string{awsS3UseARNRegionEnvVar}); err != nil {
+ return cfg, err
+ }
+
+ return cfg, nil
+}
+
+// GetRegion returns the AWS Region if set in the environment. Returns an empty
+// string if not set.
+func (c EnvConfig) getRegion(ctx context.Context) (string, bool, error) {
+ if len(c.Region) == 0 {
+ return "", false, nil
+ }
+ return c.Region, true, nil
+}
+
+// GetSharedConfigProfile returns the shared config profile if set in the
+// environment. Returns an empty string if not set.
+func (c EnvConfig) getSharedConfigProfile(ctx context.Context) (string, bool, error) {
+ if len(c.SharedConfigProfile) == 0 {
+ return "", false, nil
+ }
+
+ return c.SharedConfigProfile, true, nil
+}
+
+// getSharedConfigFiles returns a slice of filenames set in the environment.
+//
+// Will return the filenames in the order of:
+// * Shared Config
+func (c EnvConfig) getSharedConfigFiles(context.Context) ([]string, bool, error) {
+ var files []string
+ if v := c.SharedConfigFile; len(v) > 0 {
+ files = append(files, v)
+ }
+
+ if len(files) == 0 {
+ return nil, false, nil
+ }
+ return files, true, nil
+}
+
+// getSharedCredentialsFiles returns a slice of filenames set in the environment.
+//
+// Will return the filenames in the order of:
+// * Shared Credentials
+func (c EnvConfig) getSharedCredentialsFiles(context.Context) ([]string, bool, error) {
+ var files []string
+ if v := c.SharedCredentialsFile; len(v) > 0 {
+ files = append(files, v)
+ }
+ if len(files) == 0 {
+ return nil, false, nil
+ }
+ return files, true, nil
+}
+
+// GetCustomCABundle returns the custom CA bundle's PEM bytes if the file was
+func (c EnvConfig) getCustomCABundle(context.Context) (io.Reader, bool, error) {
+ if len(c.CustomCABundle) == 0 {
+ return nil, false, nil
+ }
+
+ b, err := ioutil.ReadFile(c.CustomCABundle)
+ if err != nil {
+ return nil, false, err
+ }
+ return bytes.NewReader(b), true, nil
+}
+
+// GetS3UseARNRegion returns whether to allow ARNs to direct the region
+// the S3 client's requests are sent to.
+func (c EnvConfig) GetS3UseARNRegion(ctx context.Context) (value, ok bool, err error) {
+ if c.S3UseARNRegion == nil {
+ return false, false, nil
+ }
+
+ return *c.S3UseARNRegion, true, nil
+}
+
+func setStringFromEnvVal(dst *string, keys []string) {
+ for _, k := range keys {
+ if v := os.Getenv(k); len(v) > 0 {
+ *dst = v
+ break
+ }
+ }
+}
+
+func setBoolPtrFromEnvVal(dst **bool, keys []string) error {
+ for _, k := range keys {
+ value := os.Getenv(k)
+ if len(value) == 0 {
+ continue
+ }
+
+ if *dst == nil {
+ *dst = new(bool)
+ }
+
+ switch {
+ case strings.EqualFold(value, "false"):
+ **dst = false
+ case strings.EqualFold(value, "true"):
+ **dst = true
+ default:
+ return fmt.Errorf(
+ "invalid value for environment variable, %s=%s, need true or false",
+ k, value)
+ }
+ break
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/generate.go b/vendor/github.com/aws/aws-sdk-go-v2/config/generate.go
new file mode 100644
index 000000000..654a7a77f
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/config/generate.go
@@ -0,0 +1,4 @@
+package config
+
+//go:generate go run -tags codegen ./codegen -output=provider_assert_test.go
+//go:generate gofmt -s -w ./
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/go.mod b/vendor/github.com/aws/aws-sdk-go-v2/config/go.mod
new file mode 100644
index 000000000..2f04d1762
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/config/go.mod
@@ -0,0 +1,25 @@
+module github.com/aws/aws-sdk-go-v2/config
+
+go 1.15
+
+require (
+ github.com/aws/aws-sdk-go-v2 v1.2.1
+ github.com/aws/aws-sdk-go-v2/credentials v1.1.2
+ github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.0.3
+ github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.0.3
+ github.com/aws/aws-sdk-go-v2/service/sso v1.1.2
+ github.com/aws/aws-sdk-go-v2/service/sts v1.1.2
+ github.com/aws/smithy-go v1.2.0
+ github.com/google/go-cmp v0.5.4
+)
+
+replace (
+ github.com/aws/aws-sdk-go-v2 => ../
+ github.com/aws/aws-sdk-go-v2/credentials => ../credentials/
+ github.com/aws/aws-sdk-go-v2/feature/ec2/imds => ../feature/ec2/imds/
+ github.com/aws/aws-sdk-go-v2/service/sts => ../service/sts/
+)
+
+replace github.com/aws/aws-sdk-go-v2/service/internal/presigned-url => ../service/internal/presigned-url/
+
+replace github.com/aws/aws-sdk-go-v2/service/sso => ../service/sso/
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/go.sum b/vendor/github.com/aws/aws-sdk-go-v2/config/go.sum
new file mode 100644
index 000000000..c3783ae60
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/config/go.sum
@@ -0,0 +1,13 @@
+github.com/aws/smithy-go v1.2.0 h1:0PoGBWXkXDIyVdPaZW9gMhaGzj3UOAgTdiVoHuuZAFA=
+github.com/aws/smithy-go v1.2.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAmR5n+E=
+github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/google/go-cmp v0.5.4 h1:L8R9j+yAqZuZjsqh/z+F1NCffTKKLShY6zXTItVIZ8M=
+github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
+github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U=
+github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4=
+golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/load_options.go b/vendor/github.com/aws/aws-sdk-go-v2/config/load_options.go
new file mode 100644
index 000000000..1b4828e6c
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/config/load_options.go
@@ -0,0 +1,621 @@
+package config
+
+import (
+ "context"
+ "io"
+
+ "github.com/aws/aws-sdk-go-v2/aws"
+ "github.com/aws/aws-sdk-go-v2/credentials/ec2rolecreds"
+ "github.com/aws/aws-sdk-go-v2/credentials/endpointcreds"
+ "github.com/aws/aws-sdk-go-v2/credentials/processcreds"
+ "github.com/aws/aws-sdk-go-v2/credentials/ssocreds"
+ "github.com/aws/aws-sdk-go-v2/credentials/stscreds"
+ "github.com/aws/aws-sdk-go-v2/feature/ec2/imds"
+ "github.com/aws/smithy-go/logging"
+ "github.com/aws/smithy-go/middleware"
+)
+
+// LoadOptionsFunc is a type alias for LoadOptions functional option
+type LoadOptionsFunc func(*LoadOptions) error
+
+// LoadOptions are discrete set of options that are valid for loading the configuration
+type LoadOptions struct {
+
+ // Region is the region to send requests to.
+ Region string
+
+ // Credentials object to use when signing requests.
+ Credentials aws.CredentialsProvider
+
+ // HTTPClient the SDK's API clients will use to invoke HTTP requests.
+ HTTPClient HTTPClient
+
+ // EndpointResolver that can be used to provide or override an endpoint for the given
+ // service and region Please see the `aws.EndpointResolver` documentation on usage.
+ EndpointResolver aws.EndpointResolver
+
+ // Retryer is a function that provides a Retryer implementation. A Retryer guides how HTTP requests should be
+ // retried in case of recoverable failures.
+ Retryer func() aws.Retryer
+
+ // APIOptions provides the set of middleware mutations modify how the API
+ // client requests will be handled. This is useful for adding additional
+ // tracing data to a request, or changing behavior of the SDK's client.
+ APIOptions []func(*middleware.Stack) error
+
+ // Logger writer interface to write logging messages to.
+ Logger logging.Logger
+
+ // ClientLogMode is used to configure the events that will be sent to the configured logger.
+ // This can be used to configure the logging of signing, retries, request, and responses
+ // of the SDK clients.
+ //
+ // See the ClientLogMode type documentation for the complete set of logging modes and available
+ // configuration.
+ ClientLogMode *aws.ClientLogMode
+
+ // SharedConfigProfile is the profile to be used when loading the SharedConfig
+ SharedConfigProfile string
+
+ // SharedConfigFiles is the slice of custom shared config files to use when loading the SharedConfig.
+ // A non-default profile used within config file must have name defined with prefix 'profile '.
+ // eg [profile xyz] indicates a profile with name 'xyz'.
+ // To read more on the format of the config file, please refer the documentation at
+ // https://docs.aws.amazon.com/credref/latest/refdocs/file-format.html#file-format-config
+ //
+ // If duplicate profiles are provided within the same, or across multiple shared config files, the next parsed
+ // profile will override only the properties that conflict with the previously defined profile.
+ // Note that if duplicate profiles are provided within the SharedCredentialsFiles and SharedConfigFiles,
+ // the properties defined in shared credentials file take precedence.
+ SharedConfigFiles []string
+
+ // SharedCredentialsFile is the slice of custom shared credentials files to use when loading the SharedConfig.
+ // The profile name used within credentials file must not prefix 'profile '.
+ // eg [xyz] indicates a profile with name 'xyz'. Profile declared as [profile xyz] will be ignored.
+ // To read more on the format of the credentials file, please refer the documentation at
+ // https://docs.aws.amazon.com/credref/latest/refdocs/file-format.html#file-format-creds
+ //
+ // If duplicate profiles are provided with a same, or across multiple shared credentials files, the next parsed
+ // profile will override only properties that conflict with the previously defined profile.
+ // Note that if duplicate profiles are provided within the SharedCredentialsFiles and SharedConfigFiles,
+ // the properties defined in shared credentials file take precedence.
+ SharedCredentialsFiles []string
+
+ // CustomCABundle is CA bundle PEM bytes reader
+ CustomCABundle io.Reader
+
+ // DefaultRegion is the fall back region, used if a region was not resolved from other sources
+ DefaultRegion string
+
+ // UseEC2IMDSRegion indicates if SDK should retrieve the region
+ // from the EC2 Metadata service
+ UseEC2IMDSRegion *UseEC2IMDSRegion
+
+ // ProcessCredentialOptions is a function for setting
+ // the processcreds.Options
+ ProcessCredentialOptions func(*processcreds.Options)
+
+ // EC2RoleCredentialOptions is a function for setting
+ // the ec2rolecreds.Options
+ EC2RoleCredentialOptions func(*ec2rolecreds.Options)
+
+ // EndpointCredentialOptions is a function for setting
+ // the endpointcreds.Options
+ EndpointCredentialOptions func(*endpointcreds.Options)
+
+ // WebIdentityRoleCredentialOptions is a function for setting
+ // the stscreds.WebIdentityRoleOptions
+ WebIdentityRoleCredentialOptions func(*stscreds.WebIdentityRoleOptions)
+
+ // AssumeRoleCredentialOptions is a function for setting the
+ // stscreds.AssumeRoleOptions
+ AssumeRoleCredentialOptions func(*stscreds.AssumeRoleOptions)
+
+ // SSOProviderOptions is a function for setting
+ // the ssocreds.Options
+ SSOProviderOptions func(options *ssocreds.Options)
+
+ // LogConfigurationWarnings when set to true, enables logging
+ // configuration warnings
+ LogConfigurationWarnings *bool
+
+ // S3UseARNRegion specifies if the S3 service should allow ARNs to direct
+ // the region, the client's requests are sent to.
+ S3UseARNRegion *bool
+}
+
+// getRegion returns Region from config's LoadOptions
+func (o LoadOptions) getRegion(ctx context.Context) (string, bool, error) {
+ if len(o.Region) == 0 {
+ return "", false, nil
+ }
+
+ return o.Region, true, nil
+}
+
+// WithRegion is a helper function to construct functional options
+// that sets Region on config's LoadOptions. Setting the region to
+// an empty string, will result in the region value being ignored.
+// If multiple WithRegion calls are made, the last call overrides
+// the previous call values.
+func WithRegion(v string) LoadOptionsFunc {
+ return func(o *LoadOptions) error {
+ o.Region = v
+ return nil
+ }
+}
+
+// getDefaultRegion returns DefaultRegion from config's LoadOptions
+func (o LoadOptions) getDefaultRegion(ctx context.Context) (string, bool, error) {
+ if len(o.DefaultRegion) == 0 {
+ return "", false, nil
+ }
+
+ return o.DefaultRegion, true, nil
+}
+
+// WithDefaultRegion is a helper function to construct functional options
+// that sets a DefaultRegion on config's LoadOptions. Setting the default
+// region to an empty string, will result in the default region value
+// being ignored. If multiple WithDefaultRegion calls are made, the last
+// call overrides the previous call values. Note that both WithRegion and
+// WithEC2IMDSRegion call takes precedence over WithDefaultRegion call
+// when resolving region.
+func WithDefaultRegion(v string) LoadOptionsFunc {
+ return func(o *LoadOptions) error {
+ o.DefaultRegion = v
+ return nil
+ }
+}
+
+// getSharedConfigProfile returns SharedConfigProfile from config's LoadOptions
+func (o LoadOptions) getSharedConfigProfile(ctx context.Context) (string, bool, error) {
+ if len(o.SharedConfigProfile) == 0 {
+ return "", false, nil
+ }
+
+ return o.SharedConfigProfile, true, nil
+}
+
+// WithSharedConfigProfile is a helper function to construct functional options
+// that sets SharedConfigProfile on config's LoadOptions. Setting the shared
+// config profile to an empty string, will result in the shared config profile
+// value being ignored.
+// If multiple WithSharedConfigProfile calls are made, the last call overrides
+// the previous call values.
+func WithSharedConfigProfile(v string) LoadOptionsFunc {
+ return func(o *LoadOptions) error {
+ o.SharedConfigProfile = v
+ return nil
+ }
+}
+
+// getSharedConfigFiles returns SharedConfigFiles set on config's LoadOptions
+func (o LoadOptions) getSharedConfigFiles(ctx context.Context) ([]string, bool, error) {
+ if o.SharedConfigFiles == nil {
+ return nil, false, nil
+ }
+
+ return o.SharedConfigFiles, true, nil
+}
+
+// WithSharedConfigFiles is a helper function to construct functional options
+// that sets slice of SharedConfigFiles on config's LoadOptions.
+// Setting the shared config files to an nil string slice, will result in the
+// shared config files value being ignored.
+// If multiple WithSharedConfigFiles calls are made, the last call overrides
+// the previous call values.
+func WithSharedConfigFiles(v []string) LoadOptionsFunc {
+ return func(o *LoadOptions) error {
+ o.SharedConfigFiles = v
+ return nil
+ }
+}
+
+// getSharedCredentialsFiles returns SharedCredentialsFiles set on config's LoadOptions
+func (o LoadOptions) getSharedCredentialsFiles(ctx context.Context) ([]string, bool, error) {
+ if o.SharedCredentialsFiles == nil {
+ return nil, false, nil
+ }
+
+ return o.SharedCredentialsFiles, true, nil
+}
+
+// WithSharedCredentialsFiles is a helper function to construct functional options
+// that sets slice of SharedCredentialsFiles on config's LoadOptions.
+// Setting the shared credentials files to an nil string slice, will result in the
+// shared credentials files value being ignored.
+// If multiple WithSharedCredentialsFiles calls are made, the last call overrides
+// the previous call values.
+func WithSharedCredentialsFiles(v []string) LoadOptionsFunc {
+ return func(o *LoadOptions) error {
+ o.SharedCredentialsFiles = v
+ return nil
+ }
+}
+
+// getCustomCABundle returns CustomCABundle from LoadOptions
+func (o LoadOptions) getCustomCABundle(ctx context.Context) (io.Reader, bool, error) {
+ if o.CustomCABundle == nil {
+ return nil, false, nil
+ }
+
+ return o.CustomCABundle, true, nil
+}
+
+// WithCustomCABundle is a helper function to construct functional options
+// that sets CustomCABundle on config's LoadOptions. Setting the custom CA Bundle
+// to nil will result in custom CA Bundle value being ignored.
+// If multiple WithCustomCABundle calls are made, the last call overrides the
+// previous call values.
+func WithCustomCABundle(v io.Reader) LoadOptionsFunc {
+ return func(o *LoadOptions) error {
+ o.CustomCABundle = v
+ return nil
+ }
+}
+
+// UseEC2IMDSRegion provides a regionProvider that retrieves the region
+// from the EC2 Metadata service.
+type UseEC2IMDSRegion struct {
+ // If unset will default to generic EC2 IMDS client.
+ Client *imds.Client
+}
+
+// getRegion attempts to retrieve the region from EC2 Metadata service.
+func (p *UseEC2IMDSRegion) getRegion(ctx context.Context) (string, bool, error) {
+ if ctx == nil {
+ ctx = context.Background()
+ }
+
+ client := p.Client
+ if client == nil {
+ client = imds.New(imds.Options{})
+ }
+
+ result, err := client.GetRegion(ctx, nil)
+ if err != nil {
+ return "", false, err
+ }
+ if len(result.Region) != 0 {
+ return result.Region, true, nil
+ }
+ return "", false, nil
+}
+
+// getEC2IMDSRegion returns the value of EC2 IMDS region.
+func (o LoadOptions) getEC2IMDSRegion(ctx context.Context) (string, bool, error) {
+ if o.UseEC2IMDSRegion == nil {
+ return "", false, nil
+ }
+
+ return o.UseEC2IMDSRegion.getRegion(ctx)
+}
+
+// WithEC2IMDSRegion is a helper function to construct functional options
+// that enables resolving EC2IMDS region. The function takes
+// in a UseEC2IMDSRegion functional option, and can be used to set the
+// EC2IMDS client which will be used to resolve EC2IMDSRegion.
+// If no functional option is provided, an EC2IMDS client is built and used
+// by the resolver. If multiple WithEC2IMDSRegion calls are made, the last
+// call overrides the previous call values. Note that the WithRegion calls takes
+// precedence over WithEC2IMDSRegion when resolving region.
+func WithEC2IMDSRegion(fnOpts ...func(o *UseEC2IMDSRegion)) LoadOptionsFunc {
+ return func(o *LoadOptions) error {
+ o.UseEC2IMDSRegion = &UseEC2IMDSRegion{}
+
+ for _, fn := range fnOpts {
+ fn(o.UseEC2IMDSRegion)
+ }
+ return nil
+ }
+}
+
+// getCredentialsProvider returns the credentials value
+func (o LoadOptions) getCredentialsProvider(ctx context.Context) (aws.CredentialsProvider, bool, error) {
+ if o.Credentials == nil {
+ return nil, false, nil
+ }
+
+ return o.Credentials, true, nil
+}
+
+// WithCredentialsProvider is a helper function to construct functional options
+// that sets Credential provider value on config's LoadOptions. If credentials
+// provider is set to nil, the credentials provider value will be ignored.
+// If multiple WithCredentialsProvider calls are made, the last call overrides
+// the previous call values.
+func WithCredentialsProvider(v aws.CredentialsProvider) LoadOptionsFunc {
+ return func(o *LoadOptions) error {
+ o.Credentials = v
+ return nil
+ }
+}
+
+// getProcessCredentialOptions returns the wrapped function to set processcreds.Options
+func (o LoadOptions) getProcessCredentialOptions(ctx context.Context) (func(*processcreds.Options), bool, error) {
+ if o.ProcessCredentialOptions == nil {
+ return nil, false, nil
+ }
+
+ return o.ProcessCredentialOptions, true, nil
+}
+
+// WithProcessCredentialOptions is a helper function to construct functional options
+// that sets a function to use processcreds.Options on config's LoadOptions.
+// If process credential options is set to nil, the process credential value will
+// be ignored. If multiple WithProcessCredentialOptions calls are made, the last call
+// overrides the previous call values.
+func WithProcessCredentialOptions(v func(*processcreds.Options)) LoadOptionsFunc {
+ return func(o *LoadOptions) error {
+ o.ProcessCredentialOptions = v
+ return nil
+ }
+}
+
+// getEC2RoleCredentialOptions returns the wrapped function to set the ec2rolecreds.Options
+func (o LoadOptions) getEC2RoleCredentialOptions(ctx context.Context) (func(*ec2rolecreds.Options), bool, error) {
+ if o.EC2RoleCredentialOptions == nil {
+ return nil, false, nil
+ }
+
+ return o.EC2RoleCredentialOptions, true, nil
+}
+
+// WithEC2RoleCredentialOptions is a helper function to construct functional options
+// that sets a function to use ec2rolecreds.Options on config's LoadOptions. If
+// EC2 role credential options is set to nil, the EC2 role credential options value
+// will be ignored. If multiple WithEC2RoleCredentialOptions calls are made,
+// the last call overrides the previous call values.
+func WithEC2RoleCredentialOptions(v func(*ec2rolecreds.Options)) LoadOptionsFunc {
+ return func(o *LoadOptions) error {
+ o.EC2RoleCredentialOptions = v
+ return nil
+ }
+}
+
+// getEndpointCredentialOptions returns the wrapped function to set endpointcreds.Options
+func (o LoadOptions) getEndpointCredentialOptions(context.Context) (func(*endpointcreds.Options), bool, error) {
+ if o.EndpointCredentialOptions == nil {
+ return nil, false, nil
+ }
+
+ return o.EndpointCredentialOptions, true, nil
+}
+
+// WithEndpointCredentialOptions is a helper function to construct functional options
+// that sets a function to use endpointcreds.Options on config's LoadOptions. If
+// endpoint credential options is set to nil, the endpoint credential options
+// value will be ignored. If multiple WithEndpointCredentialOptions calls are made,
+// the last call overrides the previous call values.
+func WithEndpointCredentialOptions(v func(*endpointcreds.Options)) LoadOptionsFunc {
+ return func(o *LoadOptions) error {
+ o.EndpointCredentialOptions = v
+ return nil
+ }
+}
+
+// getWebIdentityRoleCredentialOptions returns the wrapped function
+func (o LoadOptions) getWebIdentityRoleCredentialOptions(context.Context) (func(*stscreds.WebIdentityRoleOptions), bool, error) {
+ if o.WebIdentityRoleCredentialOptions == nil {
+ return nil, false, nil
+ }
+
+ return o.WebIdentityRoleCredentialOptions, true, nil
+}
+
+// WithWebIdentityRoleCredentialOptions is a helper function to construct
+// functional options that sets a function to use stscreds.WebIdentityRoleOptions
+// on config's LoadOptions. If web identity role credentials options is set to nil,
+// the web identity role credentials value will be ignored. If multiple
+// WithWebIdentityRoleCredentialOptions calls are made, the last call
+// overrides the previous call values.
+func WithWebIdentityRoleCredentialOptions(v func(*stscreds.WebIdentityRoleOptions)) LoadOptionsFunc {
+ return func(o *LoadOptions) error {
+ o.WebIdentityRoleCredentialOptions = v
+ return nil
+ }
+}
+
+// getAssumeRoleCredentialOptions returns AssumeRoleCredentialOptions from LoadOptions
+func (o LoadOptions) getAssumeRoleCredentialOptions(context.Context) (func(options *stscreds.AssumeRoleOptions), bool, error) {
+ if o.AssumeRoleCredentialOptions == nil {
+ return nil, false, nil
+ }
+
+ return o.AssumeRoleCredentialOptions, true, nil
+}
+
+// WithAssumeRoleCredentialOptions is a helper function to construct
+// functional options that sets a function to use stscreds.AssumeRoleOptions
+// on config's LoadOptions. If assume role credentials options is set to nil,
+// the assume role credentials value will be ignored. If multiple
+// WithAssumeRoleCredentialOptions calls are made, the last call overrides
+// the previous call values.
+func WithAssumeRoleCredentialOptions(v func(*stscreds.AssumeRoleOptions)) LoadOptionsFunc {
+ return func(o *LoadOptions) error {
+ o.AssumeRoleCredentialOptions = v
+ return nil
+ }
+}
+
+func (o LoadOptions) getHTTPClient(ctx context.Context) (HTTPClient, bool, error) {
+ if o.HTTPClient == nil {
+ return nil, false, nil
+ }
+
+ return o.HTTPClient, true, nil
+}
+
+// WithHTTPClient is a helper function to construct functional options
+// that sets HTTPClient on LoadOptions. If HTTPClient is set to nil,
+// the HTTPClient value will be ignored.
+// If multiple WithHTTPClient calls are made, the last call overrides
+// the previous call values.
+func WithHTTPClient(v HTTPClient) LoadOptionsFunc {
+ return func(o *LoadOptions) error {
+ o.HTTPClient = v
+ return nil
+ }
+}
+
+func (o LoadOptions) getAPIOptions(ctx context.Context) ([]func(*middleware.Stack) error, bool, error) {
+ if o.APIOptions == nil {
+ return nil, false, nil
+ }
+
+ return o.APIOptions, true, nil
+}
+
+// WithAPIOptions is a helper function to construct functional options
+// that sets APIOptions on LoadOptions. If APIOptions is set to nil, the
+// APIOptions value is ignored. If multiple WithAPIOptions calls are
+// made, the last call overrides the previous call values.
+func WithAPIOptions(v []func(*middleware.Stack) error) LoadOptionsFunc {
+ return func(o *LoadOptions) error {
+ if v == nil {
+ return nil
+ }
+
+ o.APIOptions = append(o.APIOptions, v...)
+ return nil
+ }
+}
+
+func (o LoadOptions) getRetryer(ctx context.Context) (func() aws.Retryer, bool, error) {
+ if o.Retryer == nil {
+ return nil, false, nil
+ }
+
+ return o.Retryer, true, nil
+}
+
+// WithRetryer is a helper function to construct functional options
+// that sets Retryer on LoadOptions. If Retryer is set to nil, the
+// Retryer value is ignored. If multiple WithRetryer calls are
+// made, the last call overrides the previous call values.
+func WithRetryer(v func() aws.Retryer) LoadOptionsFunc {
+ return func(o *LoadOptions) error {
+ o.Retryer = v
+ return nil
+ }
+}
+
+func (o LoadOptions) getEndpointResolver(ctx context.Context) (aws.EndpointResolver, bool, error) {
+ if o.EndpointResolver == nil {
+ return nil, false, nil
+ }
+
+ return o.EndpointResolver, true, nil
+}
+
+// WithEndpointResolver is a helper function to construct functional options
+// that sets endpoint resolver on LoadOptions. The EndpointResolver is set to nil,
+// the EndpointResolver value is ignored. If multiple WithEndpointResolver calls
+// are made, the last call overrides the previous call values.
+func WithEndpointResolver(v aws.EndpointResolver) LoadOptionsFunc {
+ return func(o *LoadOptions) error {
+ o.EndpointResolver = v
+ return nil
+ }
+}
+
+func (o LoadOptions) getLogger(ctx context.Context) (logging.Logger, bool, error) {
+ if o.Logger == nil {
+ return nil, false, nil
+ }
+
+ return o.Logger, true, nil
+}
+
+// WithLogger is a helper function to construct functional options
+// that sets Logger on LoadOptions. If Logger is set to nil, the
+// Logger value will be ignored. If multiple WithLogger calls are made,
+// the last call overrides the previous call values.
+func WithLogger(v logging.Logger) LoadOptionsFunc {
+ return func(o *LoadOptions) error {
+ o.Logger = v
+ return nil
+ }
+}
+
+func (o LoadOptions) getClientLogMode(ctx context.Context) (aws.ClientLogMode, bool, error) {
+ if o.ClientLogMode == nil {
+ return 0, false, nil
+ }
+
+ return *o.ClientLogMode, true, nil
+}
+
+// WithClientLogMode is a helper function to construct functional options
+// that sets client log mode on LoadOptions. If client log mode is set to nil,
+// the client log mode value will be ignored. If multiple WithClientLogMode calls are made,
+// the last call overrides the previous call values.
+func WithClientLogMode(v aws.ClientLogMode) LoadOptionsFunc {
+ return func(o *LoadOptions) error {
+ o.ClientLogMode = &v
+ return nil
+ }
+}
+
+func (o LoadOptions) getLogConfigurationWarnings(ctx context.Context) (v bool, found bool, err error) {
+ if o.LogConfigurationWarnings == nil {
+ return false, false, nil
+ }
+ return *o.LogConfigurationWarnings, true, nil
+}
+
+// WithLogConfigurationWarnings is a helper function to construct
+// functional options that can be used to set LogConfigurationWarnings
+// on LoadOptions.
+//
+// If multiple WithLogConfigurationWarnings calls are made, the last call
+// overrides the previous call values.
+func WithLogConfigurationWarnings(v bool) LoadOptionsFunc {
+ return func(o *LoadOptions) error {
+ o.LogConfigurationWarnings = &v
+ return nil
+ }
+}
+
+// GetS3UseARNRegion returns whether to allow ARNs to direct the region
+// the S3 client's requests are sent to.
+func (o LoadOptions) GetS3UseARNRegion(ctx context.Context) (v bool, found bool, err error) {
+ if o.S3UseARNRegion == nil {
+ return false, false, nil
+ }
+ return *o.S3UseARNRegion, true, nil
+}
+
+// WithS3UseARNRegion is a helper function to construct functional options
+// that can be used to set S3UseARNRegion on LoadOptions.
+// If multiple WithS3UseARNRegion calls are made, the last call overrides
+// the previous call values.
+func WithS3UseARNRegion(v bool) LoadOptionsFunc {
+ return func(o *LoadOptions) error {
+ o.S3UseARNRegion = &v
+ return nil
+ }
+}
+
+// getSSOProviderOptions returns AssumeRoleCredentialOptions from LoadOptions
+func (o LoadOptions) getSSOProviderOptions(context.Context) (func(options *ssocreds.Options), bool, error) {
+ if o.SSOProviderOptions == nil {
+ return nil, false, nil
+ }
+
+ return o.SSOProviderOptions, true, nil
+}
+
+// WithSSOProviderOptions is a helper function to construct
+// functional options that sets a function to use ssocreds.Options
+// on config's LoadOptions. If the SSO credential provider options is set to nil,
+// the sso provider options value will be ignored. If multiple
+// WithSSOProviderOptions calls are made, the last call overrides
+// the previous call values.
+func WithSSOProviderOptions(v func(*ssocreds.Options)) LoadOptionsFunc {
+ return func(o *LoadOptions) error {
+ o.SSOProviderOptions = v
+ return nil
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/local.go b/vendor/github.com/aws/aws-sdk-go-v2/config/local.go
new file mode 100644
index 000000000..b629137c8
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/config/local.go
@@ -0,0 +1,51 @@
+package config
+
+import (
+ "fmt"
+ "net"
+ "net/url"
+)
+
+var lookupHostFn = net.LookupHost
+
+func isLoopbackHost(host string) (bool, error) {
+ ip := net.ParseIP(host)
+ if ip != nil {
+ return ip.IsLoopback(), nil
+ }
+
+ // Host is not an ip, perform lookup
+ addrs, err := lookupHostFn(host)
+ if err != nil {
+ return false, err
+ }
+ if len(addrs) == 0 {
+ return false, fmt.Errorf("no addrs found for host, %s", host)
+ }
+
+ for _, addr := range addrs {
+ if !net.ParseIP(addr).IsLoopback() {
+ return false, nil
+ }
+ }
+
+ return true, nil
+}
+
+func validateLocalURL(v string) error {
+ u, err := url.Parse(v)
+ if err != nil {
+ return err
+ }
+
+ host := u.Hostname()
+ if len(host) == 0 {
+ return fmt.Errorf("unable to parse host from local HTTP cred provider URL")
+ } else if isLoopback, err := isLoopbackHost(host); err != nil {
+ return fmt.Errorf("failed to resolve host %q, %v", host, err)
+ } else if !isLoopback {
+ return fmt.Errorf("invalid endpoint host, %q, only host resolving to loopback addresses are allowed", host)
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/provider.go b/vendor/github.com/aws/aws-sdk-go-v2/config/provider.go
new file mode 100644
index 000000000..a4308368d
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/config/provider.go
@@ -0,0 +1,427 @@
+package config
+
+import (
+ "context"
+ "io"
+ "net/http"
+
+ "github.com/aws/aws-sdk-go-v2/aws"
+ "github.com/aws/aws-sdk-go-v2/credentials/ec2rolecreds"
+ "github.com/aws/aws-sdk-go-v2/credentials/endpointcreds"
+ "github.com/aws/aws-sdk-go-v2/credentials/processcreds"
+ "github.com/aws/aws-sdk-go-v2/credentials/ssocreds"
+ "github.com/aws/aws-sdk-go-v2/credentials/stscreds"
+ "github.com/aws/smithy-go/logging"
+ "github.com/aws/smithy-go/middleware"
+)
+
+// sharedConfigProfileProvider provides access to the shared config profile
+// name external configuration value.
+type sharedConfigProfileProvider interface {
+ getSharedConfigProfile(ctx context.Context) (string, bool, error)
+}
+
+// getSharedConfigProfile searches the configs for a sharedConfigProfileProvider
+// and returns the value if found. Returns an error if a provider fails before a
+// value is found.
+func getSharedConfigProfile(ctx context.Context, configs configs) (value string, found bool, err error) {
+ for _, cfg := range configs {
+ if p, ok := cfg.(sharedConfigProfileProvider); ok {
+ value, found, err = p.getSharedConfigProfile(ctx)
+ if err != nil || found {
+ break
+ }
+ }
+ }
+ return
+}
+
+// sharedConfigFilesProvider provides access to the shared config filesnames
+// external configuration value.
+type sharedConfigFilesProvider interface {
+ getSharedConfigFiles(ctx context.Context) ([]string, bool, error)
+}
+
+// getSharedConfigFiles searches the configs for a sharedConfigFilesProvider
+// and returns the value if found. Returns an error if a provider fails before a
+// value is found.
+func getSharedConfigFiles(ctx context.Context, configs configs) (value []string, found bool, err error) {
+ for _, cfg := range configs {
+ if p, ok := cfg.(sharedConfigFilesProvider); ok {
+ value, found, err = p.getSharedConfigFiles(ctx)
+ if err != nil || found {
+ break
+ }
+ }
+ }
+
+ return
+}
+
+// sharedCredentialsFilesProvider provides access to the shared credentials filesnames
+// external configuration value.
+type sharedCredentialsFilesProvider interface {
+ getSharedCredentialsFiles(ctx context.Context) ([]string, bool, error)
+}
+
+// getSharedCredentialsFiles searches the configs for a sharedCredentialsFilesProvider
+// and returns the value if found. Returns an error if a provider fails before a
+// value is found.
+func getSharedCredentialsFiles(ctx context.Context, configs configs) (value []string, found bool, err error) {
+ for _, cfg := range configs {
+ if p, ok := cfg.(sharedCredentialsFilesProvider); ok {
+ value, found, err = p.getSharedCredentialsFiles(ctx)
+ if err != nil || found {
+ break
+ }
+ }
+ }
+
+ return
+}
+
+// customCABundleProvider provides access to the custom CA bundle PEM bytes.
+type customCABundleProvider interface {
+ getCustomCABundle(ctx context.Context) (io.Reader, bool, error)
+}
+
+// getCustomCABundle searches the configs for a customCABundleProvider
+// and returns the value if found. Returns an error if a provider fails before a
+// value is found.
+func getCustomCABundle(ctx context.Context, configs configs) (value io.Reader, found bool, err error) {
+ for _, cfg := range configs {
+ if p, ok := cfg.(customCABundleProvider); ok {
+ value, found, err = p.getCustomCABundle(ctx)
+ if err != nil || found {
+ break
+ }
+ }
+ }
+
+ return
+}
+
+// regionProvider provides access to the region external configuration value.
+type regionProvider interface {
+ getRegion(ctx context.Context) (string, bool, error)
+}
+
+// getRegion searches the configs for a regionProvider and returns the value
+// if found. Returns an error if a provider fails before a value is found.
+func getRegion(ctx context.Context, configs configs) (value string, found bool, err error) {
+ for _, cfg := range configs {
+ if p, ok := cfg.(regionProvider); ok {
+ value, found, err = p.getRegion(ctx)
+ if err != nil || found {
+ break
+ }
+ }
+ }
+ return
+}
+
+// ec2IMDSRegionProvider provides access to the ec2 imds region
+// configuration value
+type ec2IMDSRegionProvider interface {
+ getEC2IMDSRegion(ctx context.Context) (string, bool, error)
+}
+
+// getEC2IMDSRegion searches the configs for a ec2IMDSRegionProvider and
+// returns the value if found. Returns an error if a provider fails before
+// a value is found.
+func getEC2IMDSRegion(ctx context.Context, configs configs) (region string, found bool, err error) {
+ for _, cfg := range configs {
+ if provider, ok := cfg.(ec2IMDSRegionProvider); ok {
+ region, found, err = provider.getEC2IMDSRegion(ctx)
+ if err != nil || found {
+ break
+ }
+ }
+ }
+ return
+}
+
+// credentialsProviderProvider provides access to the credentials external
+// configuration value.
+type credentialsProviderProvider interface {
+ getCredentialsProvider(ctx context.Context) (aws.CredentialsProvider, bool, error)
+}
+
+// getCredentialsProvider searches the configs for a credentialsProviderProvider
+// and returns the value if found. Returns an error if a provider fails before a
+// value is found.
+func getCredentialsProvider(ctx context.Context, configs configs) (p aws.CredentialsProvider, found bool, err error) {
+ for _, cfg := range configs {
+ if provider, ok := cfg.(credentialsProviderProvider); ok {
+ p, found, err = provider.getCredentialsProvider(ctx)
+ if err != nil || found {
+ break
+ }
+ }
+ }
+ return
+}
+
+// processCredentialOptions is an interface for retrieving a function for setting
+// the processcreds.Options.
+type processCredentialOptions interface {
+ getProcessCredentialOptions(ctx context.Context) (func(*processcreds.Options), bool, error)
+}
+
+// getProcessCredentialOptions searches the slice of configs and returns the first function found
+func getProcessCredentialOptions(ctx context.Context, configs configs) (f func(*processcreds.Options), found bool, err error) {
+ for _, config := range configs {
+ if p, ok := config.(processCredentialOptions); ok {
+ f, found, err = p.getProcessCredentialOptions(ctx)
+ if err != nil || found {
+ break
+ }
+ }
+ }
+ return
+}
+
+// ec2RoleCredentialOptionsProvider is an interface for retrieving a function
+// for setting the ec2rolecreds.Provider options.
+type ec2RoleCredentialOptionsProvider interface {
+ getEC2RoleCredentialOptions(ctx context.Context) (func(*ec2rolecreds.Options), bool, error)
+}
+
+// getEC2RoleCredentialProviderOptions searches the slice of configs and returns the first function found
+func getEC2RoleCredentialProviderOptions(ctx context.Context, configs configs) (f func(*ec2rolecreds.Options), found bool, err error) {
+ for _, config := range configs {
+ if p, ok := config.(ec2RoleCredentialOptionsProvider); ok {
+ f, found, err = p.getEC2RoleCredentialOptions(ctx)
+ if err != nil || found {
+ break
+ }
+ }
+ }
+ return
+}
+
+// defaultRegionProvider is an interface for retrieving a default region if a region was not resolved from other sources
+type defaultRegionProvider interface {
+ getDefaultRegion(ctx context.Context) (string, bool, error)
+}
+
+// getDefaultRegion searches the slice of configs and returns the first fallback region found
+func getDefaultRegion(ctx context.Context, configs configs) (value string, found bool, err error) {
+ for _, config := range configs {
+ if p, ok := config.(defaultRegionProvider); ok {
+ value, found, err = p.getDefaultRegion(ctx)
+ if err != nil || found {
+ break
+ }
+ }
+ }
+ return
+}
+
+// endpointCredentialOptionsProvider is an interface for retrieving a function for setting
+// the endpointcreds.ProviderOptions.
+type endpointCredentialOptionsProvider interface {
+ getEndpointCredentialOptions(ctx context.Context) (func(*endpointcreds.Options), bool, error)
+}
+
+// getEndpointCredentialProviderOptions searches the slice of configs and returns the first function found
+func getEndpointCredentialProviderOptions(ctx context.Context, configs configs) (f func(*endpointcreds.Options), found bool, err error) {
+ for _, config := range configs {
+ if p, ok := config.(endpointCredentialOptionsProvider); ok {
+ f, found, err = p.getEndpointCredentialOptions(ctx)
+ if err != nil || found {
+ break
+ }
+ }
+ }
+ return
+}
+
+// webIdentityRoleCredentialOptionsProvider is an interface for retrieving a function for setting
+// the stscreds.WebIdentityRoleProvider.
+type webIdentityRoleCredentialOptionsProvider interface {
+ getWebIdentityRoleCredentialOptions(ctx context.Context) (func(*stscreds.WebIdentityRoleOptions), bool, error)
+}
+
+// getWebIdentityCredentialProviderOptions searches the slice of configs and returns the first function found
+func getWebIdentityCredentialProviderOptions(ctx context.Context, configs configs) (f func(*stscreds.WebIdentityRoleOptions), found bool, err error) {
+ for _, config := range configs {
+ if p, ok := config.(webIdentityRoleCredentialOptionsProvider); ok {
+ f, found, err = p.getWebIdentityRoleCredentialOptions(ctx)
+ if err != nil || found {
+ break
+ }
+ }
+ }
+ return
+}
+
+// assumeRoleCredentialOptionsProvider is an interface for retrieving a function for setting
+// the stscreds.AssumeRoleOptions.
+type assumeRoleCredentialOptionsProvider interface {
+ getAssumeRoleCredentialOptions(ctx context.Context) (func(*stscreds.AssumeRoleOptions), bool, error)
+}
+
+// getAssumeRoleCredentialProviderOptions searches the slice of configs and returns the first function found
+func getAssumeRoleCredentialProviderOptions(ctx context.Context, configs configs) (f func(*stscreds.AssumeRoleOptions), found bool, err error) {
+ for _, config := range configs {
+ if p, ok := config.(assumeRoleCredentialOptionsProvider); ok {
+ f, found, err = p.getAssumeRoleCredentialOptions(ctx)
+ if err != nil || found {
+ break
+ }
+ }
+ }
+ return
+}
+
+// HTTPClient is an HTTP client implementation
+type HTTPClient interface {
+ Do(*http.Request) (*http.Response, error)
+}
+
+// httpClientProvider is an interface for retrieving HTTPClient
+type httpClientProvider interface {
+ getHTTPClient(ctx context.Context) (HTTPClient, bool, error)
+}
+
+// getHTTPClient searches the slice of configs and returns the HTTPClient set on configs
+func getHTTPClient(ctx context.Context, configs configs) (client HTTPClient, found bool, err error) {
+ for _, config := range configs {
+ if p, ok := config.(httpClientProvider); ok {
+ client, found, err = p.getHTTPClient(ctx)
+ if err != nil || found {
+ break
+ }
+ }
+ }
+ return
+}
+
+// apiOptionsProvider is an interface for retrieving APIOptions
+type apiOptionsProvider interface {
+ getAPIOptions(ctx context.Context) ([]func(*middleware.Stack) error, bool, error)
+}
+
+// getAPIOptions searches the slice of configs and returns the APIOptions set on configs
+func getAPIOptions(ctx context.Context, configs configs) (apiOptions []func(*middleware.Stack) error, found bool, err error) {
+ for _, config := range configs {
+ if p, ok := config.(apiOptionsProvider); ok {
+ // retrieve APIOptions from configs and set it on cfg
+ apiOptions, found, err = p.getAPIOptions(ctx)
+ if err != nil || found {
+ break
+ }
+ }
+ }
+ return
+}
+
+// endpointResolverProvider is an interface for retrieving an aws.EndpointResolver from a configuration source
+type endpointResolverProvider interface {
+ getEndpointResolver(ctx context.Context) (aws.EndpointResolver, bool, error)
+}
+
+// getEndpointResolver searches the provided config sources for a EndpointResolverFunc that can be used
+// to configure the aws.Config.EndpointResolver value.
+func getEndpointResolver(ctx context.Context, configs configs) (f aws.EndpointResolver, found bool, err error) {
+ for _, c := range configs {
+ if p, ok := c.(endpointResolverProvider); ok {
+ f, found, err = p.getEndpointResolver(ctx)
+ if err != nil || found {
+ break
+ }
+ }
+ }
+ return
+}
+
+// loggerProvider is an interface for retrieving a logging.Logger from a configuration source.
+type loggerProvider interface {
+ getLogger(ctx context.Context) (logging.Logger, bool, error)
+}
+
+// getLogger searches the provided config sources for a logging.Logger that can be used
+// to configure the aws.Config.Logger value.
+func getLogger(ctx context.Context, configs configs) (l logging.Logger, found bool, err error) {
+ for _, c := range configs {
+ if p, ok := c.(loggerProvider); ok {
+ l, found, err = p.getLogger(ctx)
+ if err != nil || found {
+ break
+ }
+ }
+ }
+ return
+}
+
+// clientLogModeProvider is an interface for retrieving the aws.ClientLogMode from a configuration source.
+type clientLogModeProvider interface {
+ getClientLogMode(ctx context.Context) (aws.ClientLogMode, bool, error)
+}
+
+func getClientLogMode(ctx context.Context, configs configs) (m aws.ClientLogMode, found bool, err error) {
+ for _, c := range configs {
+ if p, ok := c.(clientLogModeProvider); ok {
+ m, found, err = p.getClientLogMode(ctx)
+ if err != nil || found {
+ break
+ }
+ }
+ }
+ return
+}
+
+// retryProvider is an configuration provider for custom Retryer.
+type retryProvider interface {
+ getRetryer(ctx context.Context) (func() aws.Retryer, bool, error)
+}
+
+func getRetryer(ctx context.Context, configs configs) (v func() aws.Retryer, found bool, err error) {
+ for _, c := range configs {
+ if p, ok := c.(retryProvider); ok {
+ v, found, err = p.getRetryer(ctx)
+ if err != nil || found {
+ break
+ }
+ }
+ }
+ return
+}
+
+// logConfigurationWarningsProvider is an configuration provider for
+// retrieving a boolean indicating whether configuration issues should
+// be logged when loading from config sources
+type logConfigurationWarningsProvider interface {
+ getLogConfigurationWarnings(ctx context.Context) (bool, bool, error)
+}
+
+func getLogConfigurationWarnings(ctx context.Context, configs configs) (v bool, found bool, err error) {
+ for _, c := range configs {
+ if p, ok := c.(logConfigurationWarningsProvider); ok {
+ v, found, err = p.getLogConfigurationWarnings(ctx)
+ if err != nil || found {
+ break
+ }
+ }
+ }
+ return
+}
+
+// ssoCredentialOptionsProvider is an interface for retrieving a function for setting
+// the ssocreds.Options.
+type ssoCredentialOptionsProvider interface {
+ getSSOProviderOptions(context.Context) (func(*ssocreds.Options), bool, error)
+}
+
+func getSSOProviderOptions(ctx context.Context, configs configs) (v func(options *ssocreds.Options), found bool, err error) {
+ for _, c := range configs {
+ if p, ok := c.(ssoCredentialOptionsProvider); ok {
+ v, found, err = p.getSSOProviderOptions(ctx)
+ if err != nil || found {
+ break
+ }
+ }
+ }
+ return
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/resolve.go b/vendor/github.com/aws/aws-sdk-go-v2/config/resolve.go
new file mode 100644
index 000000000..550ca4e71
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/config/resolve.go
@@ -0,0 +1,227 @@
+package config
+
+import (
+ "context"
+ "crypto/tls"
+ "crypto/x509"
+ "fmt"
+ "io/ioutil"
+ "net/http"
+ "os"
+
+ "github.com/aws/aws-sdk-go-v2/aws"
+ awshttp "github.com/aws/aws-sdk-go-v2/aws/transport/http"
+ "github.com/aws/smithy-go/logging"
+)
+
+// resolveDefaultAWSConfig will write default configuration values into the cfg
+// value. It will write the default values, overwriting any previous value.
+//
+// This should be used as the first resolver in the slice of resolvers when
+// resolving external configuration.
+func resolveDefaultAWSConfig(ctx context.Context, cfg *aws.Config, cfgs configs) error {
+ *cfg = aws.Config{
+ Credentials: aws.AnonymousCredentials{},
+ Logger: logging.NewStandardLogger(os.Stderr),
+ }
+ return nil
+}
+
+// resolveCustomCABundle extracts the first instance of a custom CA bundle filename
+// from the external configurations. It will update the HTTP Client's builder
+// to be configured with the custom CA bundle.
+//
+// Config provider used:
+// * customCABundleProvider
+func resolveCustomCABundle(ctx context.Context, cfg *aws.Config, cfgs configs) error {
+ pemCerts, found, err := getCustomCABundle(ctx, cfgs)
+ if err != nil {
+ // TODO error handling, What is the best way to handle this?
+ // capture previous errors continue. error out if all errors
+ return err
+ }
+ if !found {
+ return nil
+ }
+
+ if cfg.HTTPClient == nil {
+ cfg.HTTPClient = awshttp.NewBuildableClient()
+ }
+
+ trOpts, ok := cfg.HTTPClient.(*awshttp.BuildableClient)
+ if !ok {
+ return fmt.Errorf("unable to add custom RootCAs HTTPClient, "+
+ "has no WithTransportOptions, %T", cfg.HTTPClient)
+ }
+
+ var appendErr error
+ client := trOpts.WithTransportOptions(func(tr *http.Transport) {
+ if tr.TLSClientConfig == nil {
+ tr.TLSClientConfig = &tls.Config{}
+ }
+ if tr.TLSClientConfig.RootCAs == nil {
+ tr.TLSClientConfig.RootCAs = x509.NewCertPool()
+ }
+
+ b, err := ioutil.ReadAll(pemCerts)
+ if err != nil {
+ appendErr = fmt.Errorf("failed to read custom CA bundle PEM file")
+ }
+
+ if !tr.TLSClientConfig.RootCAs.AppendCertsFromPEM(b) {
+ appendErr = fmt.Errorf("failed to load custom CA bundle PEM file")
+ }
+ })
+ if appendErr != nil {
+ return appendErr
+ }
+
+ cfg.HTTPClient = client
+ return err
+}
+
+// resolveRegion extracts the first instance of a Region from the configs slice.
+//
+// Config providers used:
+// * regionProvider
+func resolveRegion(ctx context.Context, cfg *aws.Config, configs configs) error {
+ v, found, err := getRegion(ctx, configs)
+ if err != nil {
+ // TODO error handling, What is the best way to handle this?
+ // capture previous errors continue. error out if all errors
+ return err
+ }
+ if !found {
+ return nil
+ }
+
+ cfg.Region = v
+ return nil
+}
+
+// resolveDefaultRegion extracts the first instance of a default region and sets `aws.Config.Region` to the default
+// region if region had not been resolved from other sources.
+func resolveDefaultRegion(ctx context.Context, cfg *aws.Config, configs configs) error {
+ if len(cfg.Region) > 0 {
+ return nil
+ }
+
+ v, found, err := getDefaultRegion(ctx, configs)
+ if err != nil {
+ return err
+ }
+ if !found {
+ return nil
+ }
+
+ cfg.Region = v
+
+ return nil
+}
+
+// resolveHTTPClient extracts the first instance of a HTTPClient and sets `aws.Config.HTTPClient` to the HTTPClient instance
+// if one has not been resolved from other sources.
+func resolveHTTPClient(ctx context.Context, cfg *aws.Config, configs configs) error {
+ c, found, err := getHTTPClient(ctx, configs)
+ if err != nil {
+ return err
+ }
+ if !found {
+ return nil
+ }
+
+ cfg.HTTPClient = c
+ return nil
+}
+
+// resolveAPIOptions extracts the first instance of APIOptions and sets `aws.Config.APIOptions` to the resolved API options
+// if one has not been resolved from other sources.
+func resolveAPIOptions(ctx context.Context, cfg *aws.Config, configs configs) error {
+ o, found, err := getAPIOptions(ctx, configs)
+ if err != nil {
+ return err
+ }
+ if !found {
+ return nil
+ }
+
+ cfg.APIOptions = o
+
+ return nil
+}
+
+// resolveEndpointResolver extracts the first instance of a EndpointResolverFunc from the config slice
+// and sets the functions result on the aws.Config.EndpointResolver
+func resolveEndpointResolver(ctx context.Context, cfg *aws.Config, configs configs) error {
+ endpointResolver, found, err := getEndpointResolver(ctx, configs)
+ if err != nil {
+ return err
+ }
+ if !found {
+ return nil
+ }
+
+ cfg.EndpointResolver = endpointResolver
+
+ return nil
+}
+
+func resolveLogger(ctx context.Context, cfg *aws.Config, configs configs) error {
+ logger, found, err := getLogger(ctx, configs)
+ if err != nil {
+ return err
+ }
+ if !found {
+ return nil
+ }
+
+ cfg.Logger = logger
+
+ return nil
+}
+
+func resolveClientLogMode(ctx context.Context, cfg *aws.Config, configs configs) error {
+ mode, found, err := getClientLogMode(ctx, configs)
+ if err != nil {
+ return err
+ }
+ if !found {
+ return nil
+ }
+
+ cfg.ClientLogMode = mode
+
+ return nil
+}
+
+func resolveRetryer(ctx context.Context, cfg *aws.Config, configs configs) error {
+ retryer, found, err := getRetryer(ctx, configs)
+ if err != nil {
+ return err
+ }
+ if !found {
+ return nil
+ }
+
+ cfg.Retryer = retryer
+
+ return nil
+}
+
+func resolveEC2IMDSRegion(ctx context.Context, cfg *aws.Config, configs configs) error {
+ if len(cfg.Region) > 0 {
+ return nil
+ }
+
+ region, found, err := getEC2IMDSRegion(ctx, configs)
+ if err != nil {
+ return err
+ }
+ if !found {
+ return nil
+ }
+
+ cfg.Region = region
+
+ return nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/resolve_credentials.go b/vendor/github.com/aws/aws-sdk-go-v2/config/resolve_credentials.go
new file mode 100644
index 000000000..449fc393d
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/config/resolve_credentials.go
@@ -0,0 +1,447 @@
+package config
+
+import (
+ "context"
+ "fmt"
+ "net/url"
+ "time"
+
+ "github.com/aws/aws-sdk-go-v2/aws"
+ "github.com/aws/aws-sdk-go-v2/credentials"
+ "github.com/aws/aws-sdk-go-v2/credentials/ec2rolecreds"
+ "github.com/aws/aws-sdk-go-v2/credentials/endpointcreds"
+ "github.com/aws/aws-sdk-go-v2/credentials/processcreds"
+ "github.com/aws/aws-sdk-go-v2/credentials/ssocreds"
+ "github.com/aws/aws-sdk-go-v2/credentials/stscreds"
+ "github.com/aws/aws-sdk-go-v2/feature/ec2/imds"
+ "github.com/aws/aws-sdk-go-v2/service/sso"
+ "github.com/aws/aws-sdk-go-v2/service/sts"
+)
+
+const (
+ // valid credential source values
+ credSourceEc2Metadata = "Ec2InstanceMetadata"
+ credSourceEnvironment = "Environment"
+ credSourceECSContainer = "EcsContainer"
+)
+
+var (
+ ecsContainerEndpoint = "http://169.254.170.2" // not constant to allow for swapping during unit-testing
+)
+
+// resolveCredentials extracts a credential provider from slice of config sources.
+//
+// If an explict credential provider is not found the resolver will fallback to resolving
+// credentials by extracting a credential provider from EnvConfig and SharedConfig.
+func resolveCredentials(ctx context.Context, cfg *aws.Config, configs configs) error {
+ found, err := resolveCredentialProvider(ctx, cfg, configs)
+ if err != nil {
+ return err
+ }
+ if found {
+ return nil
+ }
+
+ err = resolveCredentialChain(ctx, cfg, configs)
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// resolveCredentialProvider extracts the first instance of Credentials from the
+// config slices.
+//
+// The resolved CredentialProvider will be wrapped in a cache to ensure the
+// credentials are only refreshed when needed. This also protects the
+// credential provider to be used concurrently.
+//
+// Config providers used:
+// * credentialsProviderProvider
+func resolveCredentialProvider(ctx context.Context, cfg *aws.Config, cfgs configs) (bool, error) {
+ credProvider, found, err := getCredentialsProvider(ctx, cfgs)
+ if err != nil {
+ return false, err
+ }
+ if !found {
+ return false, nil
+ }
+
+ cfg.Credentials = wrapWithCredentialsCache(credProvider)
+
+ return true, nil
+}
+
+// resolveCredentialChain resolves a credential provider chain using EnvConfig
+// and SharedConfig if present in the slice of provided configs.
+//
+// The resolved CredentialProvider will be wrapped in a cache to ensure the
+// credentials are only refreshed when needed. This also protects the
+// credential provider to be used concurrently.
+func resolveCredentialChain(ctx context.Context, cfg *aws.Config, configs configs) (err error) {
+ envConfig, sharedConfig, other := getAWSConfigSources(configs)
+
+ // When checking if a profile was specified programmatically we should only consider the "other"
+ // configuration sources that have been provided. This ensures we correctly honor the expected credential
+ // hierarchy.
+ _, sharedProfileSet, err := getSharedConfigProfile(ctx, other)
+ if err != nil {
+ return err
+ }
+
+ switch {
+ case sharedProfileSet:
+ err = resolveCredsFromProfile(ctx, cfg, envConfig, sharedConfig, other)
+ case envConfig.Credentials.HasKeys():
+ cfg.Credentials = credentials.StaticCredentialsProvider{Value: envConfig.Credentials}
+ case len(envConfig.WebIdentityTokenFilePath) > 0:
+ err = assumeWebIdentity(ctx, cfg, envConfig.WebIdentityTokenFilePath, envConfig.RoleARN, envConfig.RoleSessionName, configs)
+ default:
+ err = resolveCredsFromProfile(ctx, cfg, envConfig, sharedConfig, other)
+ }
+ if err != nil {
+ return err
+ }
+
+ // Wrap the resolved provider in a cache so the SDK will cache credentials.
+ cfg.Credentials = wrapWithCredentialsCache(cfg.Credentials)
+
+ return nil
+}
+
+func resolveCredsFromProfile(ctx context.Context, cfg *aws.Config, envConfig *EnvConfig, sharedConfig *SharedConfig, configs configs) (err error) {
+
+ switch {
+ case sharedConfig.Source != nil:
+ // Assume IAM role with credentials source from a different profile.
+ err = resolveCredsFromProfile(ctx, cfg, envConfig, sharedConfig.Source, configs)
+
+ case sharedConfig.Credentials.HasKeys():
+ // Static Credentials from Shared Config/Credentials file.
+ cfg.Credentials = credentials.StaticCredentialsProvider{
+ Value: sharedConfig.Credentials,
+ }
+
+ case sharedConfig.hasSSOConfiguration():
+ err = resolveSSOCredentials(ctx, cfg, sharedConfig, configs)
+
+ case len(sharedConfig.CredentialProcess) != 0:
+ // Get credentials from CredentialProcess
+ err = processCredentials(ctx, cfg, sharedConfig, configs)
+
+ case len(sharedConfig.CredentialSource) != 0:
+ err = resolveCredsFromSource(ctx, cfg, envConfig, sharedConfig, configs)
+
+ case len(sharedConfig.WebIdentityTokenFile) != 0:
+ // Credentials from Assume Web Identity token require an IAM Role, and
+ // that roll will be assumed. May be wrapped with another assume role
+ // via SourceProfile.
+ err = assumeWebIdentity(ctx, cfg, sharedConfig.WebIdentityTokenFile, sharedConfig.RoleARN, sharedConfig.RoleSessionName, configs)
+
+ case len(envConfig.ContainerCredentialsEndpoint) != 0:
+ err = resolveLocalHTTPCredProvider(ctx, cfg, envConfig.ContainerCredentialsEndpoint, envConfig.ContainerAuthorizationToken, configs)
+
+ case len(envConfig.ContainerCredentialsRelativePath) != 0:
+ err = resolveHTTPCredProvider(ctx, cfg, ecsContainerURI(envConfig.ContainerCredentialsRelativePath), envConfig.ContainerAuthorizationToken, configs)
+
+ default:
+ err = resolveEC2RoleCredentials(ctx, cfg, configs)
+ }
+ if err != nil {
+ return err
+ }
+
+ if len(sharedConfig.RoleARN) > 0 {
+ return credsFromAssumeRole(ctx, cfg, sharedConfig, configs)
+ }
+
+ return nil
+}
+
+func resolveSSOCredentials(ctx context.Context, cfg *aws.Config, sharedConfig *SharedConfig, configs configs) error {
+ if err := sharedConfig.validateSSOConfiguration(); err != nil {
+ return err
+ }
+
+ var options []func(*ssocreds.Options)
+ v, found, err := getSSOProviderOptions(ctx, configs)
+ if err != nil {
+ return err
+ }
+ if found {
+ options = append(options, v)
+ }
+
+ cfgCopy := cfg.Copy()
+ cfgCopy.Region = sharedConfig.SSORegion
+
+ cfg.Credentials = ssocreds.New(sso.NewFromConfig(cfgCopy), sharedConfig.SSOAccountID, sharedConfig.SSORoleName, sharedConfig.SSOStartURL, options...)
+
+ return nil
+}
+
+func ecsContainerURI(path string) string {
+ return fmt.Sprintf("%s%s", ecsContainerEndpoint, path)
+}
+
+func processCredentials(ctx context.Context, cfg *aws.Config, sharedConfig *SharedConfig, configs configs) error {
+ var opts []func(*processcreds.Options)
+
+ options, found, err := getProcessCredentialOptions(ctx, configs)
+ if err != nil {
+ return err
+ }
+ if found {
+ opts = append(opts, options)
+ }
+
+ cfg.Credentials = processcreds.NewProvider(sharedConfig.CredentialProcess, opts...)
+
+ return nil
+}
+
+func resolveLocalHTTPCredProvider(ctx context.Context, cfg *aws.Config, endpointURL, authToken string, configs configs) error {
+ var resolveErr error
+
+ parsed, err := url.Parse(endpointURL)
+ if err != nil {
+ resolveErr = fmt.Errorf("invalid URL, %w", err)
+ } else {
+ host := parsed.Hostname()
+ if len(host) == 0 {
+ resolveErr = fmt.Errorf("unable to parse host from local HTTP cred provider URL")
+ } else if isLoopback, loopbackErr := isLoopbackHost(host); loopbackErr != nil {
+ resolveErr = fmt.Errorf("failed to resolve host %q, %v", host, loopbackErr)
+ } else if !isLoopback {
+ resolveErr = fmt.Errorf("invalid endpoint host, %q, only loopback hosts are allowed", host)
+ }
+ }
+
+ if resolveErr != nil {
+ return resolveErr
+ }
+
+ return resolveHTTPCredProvider(ctx, cfg, endpointURL, authToken, configs)
+}
+
+func resolveHTTPCredProvider(ctx context.Context, cfg *aws.Config, url, authToken string, configs configs) error {
+ optFns := []func(*endpointcreds.Options){
+ func(options *endpointcreds.Options) {
+ if len(authToken) != 0 {
+ options.AuthorizationToken = authToken
+ }
+ options.APIOptions = cfg.APIOptions
+ if cfg.Retryer != nil {
+ options.Retryer = cfg.Retryer()
+ }
+ },
+ }
+
+ optFn, found, err := getEndpointCredentialProviderOptions(ctx, configs)
+ if err != nil {
+ return err
+ }
+ if found {
+ optFns = append(optFns, optFn)
+ }
+
+ provider := endpointcreds.New(url, optFns...)
+
+ cfg.Credentials = wrapWithCredentialsCache(provider, func(options *aws.CredentialsCacheOptions) {
+ options.ExpiryWindow = 5 * time.Minute
+ })
+
+ return nil
+}
+
+func resolveCredsFromSource(ctx context.Context, cfg *aws.Config, envConfig *EnvConfig, sharedCfg *SharedConfig, configs configs) (err error) {
+ switch sharedCfg.CredentialSource {
+ case credSourceEc2Metadata:
+ return resolveEC2RoleCredentials(ctx, cfg, configs)
+
+ case credSourceEnvironment:
+ cfg.Credentials = credentials.StaticCredentialsProvider{Value: envConfig.Credentials}
+
+ case credSourceECSContainer:
+ if len(envConfig.ContainerCredentialsRelativePath) == 0 {
+ return fmt.Errorf("EcsContainer was specified as the credential_source, but 'AWS_CONTAINER_CREDENTIALS_RELATIVE_URI' was not set")
+ }
+ return resolveHTTPCredProvider(ctx, cfg, ecsContainerURI(envConfig.ContainerCredentialsRelativePath), envConfig.ContainerAuthorizationToken, configs)
+
+ default:
+ return fmt.Errorf("credential_source values must be EcsContainer, Ec2InstanceMetadata, or Environment")
+ }
+
+ return nil
+}
+
+func resolveEC2RoleCredentials(ctx context.Context, cfg *aws.Config, configs configs) error {
+ optFns := make([]func(*ec2rolecreds.Options), 0, 2)
+
+ optFn, found, err := getEC2RoleCredentialProviderOptions(ctx, configs)
+ if err != nil {
+ return err
+ }
+ if found {
+ optFns = append(optFns, optFn)
+ }
+
+ optFns = append(optFns, func(o *ec2rolecreds.Options) {
+ // Only define a client from config if not already defined.
+ if o.Client != nil {
+ options := imds.Options{
+ HTTPClient: cfg.HTTPClient,
+ }
+ if cfg.Retryer != nil {
+ options.Retryer = cfg.Retryer()
+ }
+ o.Client = imds.New(options)
+ }
+ })
+
+ provider := ec2rolecreds.New(optFns...)
+
+ cfg.Credentials = wrapWithCredentialsCache(provider, func(options *aws.CredentialsCacheOptions) {
+ options.ExpiryWindow = 5 * time.Minute
+ })
+
+ return nil
+}
+
+func getAWSConfigSources(cfgs configs) (*EnvConfig, *SharedConfig, configs) {
+ var (
+ envConfig *EnvConfig
+ sharedConfig *SharedConfig
+ other configs
+ )
+
+ for i := range cfgs {
+ switch c := cfgs[i].(type) {
+ case EnvConfig:
+ if envConfig == nil {
+ envConfig = &c
+ }
+ case *EnvConfig:
+ if envConfig == nil {
+ envConfig = c
+ }
+ case SharedConfig:
+ if sharedConfig == nil {
+ sharedConfig = &c
+ }
+ case *SharedConfig:
+ if envConfig == nil {
+ sharedConfig = c
+ }
+ default:
+ other = append(other, c)
+ }
+ }
+
+ if envConfig == nil {
+ envConfig = &EnvConfig{}
+ }
+
+ if sharedConfig == nil {
+ sharedConfig = &SharedConfig{}
+ }
+
+ return envConfig, sharedConfig, other
+}
+
+// AssumeRoleTokenProviderNotSetError is an error returned when creating a
+// session when the MFAToken option is not set when shared config is configured
+// load assume a role with an MFA token.
+type AssumeRoleTokenProviderNotSetError struct{}
+
+// Error is the error message
+func (e AssumeRoleTokenProviderNotSetError) Error() string {
+ return fmt.Sprintf("assume role with MFA enabled, but AssumeRoleTokenProvider session option not set.")
+}
+
+func assumeWebIdentity(ctx context.Context, cfg *aws.Config, filepath string, roleARN, sessionName string, configs configs) error {
+ if len(filepath) == 0 {
+ return fmt.Errorf("token file path is not set")
+ }
+
+ if len(roleARN) == 0 {
+ return fmt.Errorf("role ARN is not set")
+ }
+
+ optFns := []func(*stscreds.WebIdentityRoleOptions){
+ func(options *stscreds.WebIdentityRoleOptions) {
+ options.RoleSessionName = sessionName
+ },
+ }
+
+ optFn, found, err := getWebIdentityCredentialProviderOptions(ctx, configs)
+ if err != nil {
+ return err
+ }
+ if found {
+ optFns = append(optFns, optFn)
+ }
+
+ provider := stscreds.NewWebIdentityRoleProvider(sts.NewFromConfig(*cfg), roleARN, stscreds.IdentityTokenFile(filepath), optFns...)
+
+ cfg.Credentials = provider
+
+ return nil
+}
+
+func credsFromAssumeRole(ctx context.Context, cfg *aws.Config, sharedCfg *SharedConfig, configs configs) (err error) {
+ optFns := []func(*stscreds.AssumeRoleOptions){
+ func(options *stscreds.AssumeRoleOptions) {
+ options.RoleSessionName = sharedCfg.RoleSessionName
+ if sharedCfg.RoleDurationSeconds != nil {
+ if *sharedCfg.RoleDurationSeconds/time.Minute > 15 {
+ options.Duration = *sharedCfg.RoleDurationSeconds
+ }
+ }
+ // Assume role with external ID
+ if len(sharedCfg.ExternalID) > 0 {
+ options.ExternalID = aws.String(sharedCfg.ExternalID)
+ }
+
+ // Assume role with MFA
+ if len(sharedCfg.MFASerial) != 0 {
+ options.SerialNumber = aws.String(sharedCfg.MFASerial)
+ }
+ },
+ }
+
+ optFn, found, err := getAssumeRoleCredentialProviderOptions(ctx, configs)
+ if err != nil {
+ return err
+ }
+ if found {
+ optFns = append(optFns, optFn)
+ }
+
+ {
+ // Synthesize options early to validate configuration errors sooner to ensure a token provider
+ // is present if the SerialNumber was set.
+ var o stscreds.AssumeRoleOptions
+ for _, fn := range optFns {
+ fn(&o)
+ }
+ if o.TokenProvider == nil && o.SerialNumber != nil {
+ return AssumeRoleTokenProviderNotSetError{}
+ }
+ }
+
+ cfg.Credentials = stscreds.NewAssumeRoleProvider(sts.NewFromConfig(*cfg), sharedCfg.RoleARN, optFns...)
+
+ return nil
+}
+
+// wrapWithCredentialsCache will wrap provider with an aws.CredentialsCache with the provided options if the provider is not already a aws.CredentialsCache.
+func wrapWithCredentialsCache(provider aws.CredentialsProvider, optFns ...func(options *aws.CredentialsCacheOptions)) aws.CredentialsProvider {
+ _, ok := provider.(*aws.CredentialsCache)
+ if ok {
+ return provider
+ }
+
+ return aws.NewCredentialsCache(provider, optFns...)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/shared_config.go b/vendor/github.com/aws/aws-sdk-go-v2/config/shared_config.go
new file mode 100644
index 000000000..8c34a61b6
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/config/shared_config.go
@@ -0,0 +1,1117 @@
+package config
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "os"
+ "path/filepath"
+ "runtime"
+ "strings"
+ "time"
+
+ "github.com/aws/aws-sdk-go-v2/aws"
+ "github.com/aws/aws-sdk-go-v2/internal/ini"
+ "github.com/aws/smithy-go/logging"
+)
+
+const (
+ // Prefix to use for filtering profiles
+ profilePrefix = `profile `
+
+ // Static Credentials group
+ accessKeyIDKey = `aws_access_key_id` // group required
+ secretAccessKey = `aws_secret_access_key` // group required
+ sessionTokenKey = `aws_session_token` // optional
+
+ // Assume Role Credentials group
+ roleArnKey = `role_arn` // group required
+ sourceProfileKey = `source_profile` // group required
+ credentialSourceKey = `credential_source` // group required (or source_profile)
+ externalIDKey = `external_id` // optional
+ mfaSerialKey = `mfa_serial` // optional
+ roleSessionNameKey = `role_session_name` // optional
+ roleDurationSecondsKey = "duration_seconds" // optional
+
+ // AWS Single Sign-On (AWS SSO) group
+ ssoAccountIDKey = "sso_account_id"
+ ssoRegionKey = "sso_region"
+ ssoRoleNameKey = "sso_role_name"
+ ssoStartURL = "sso_start_url"
+
+ // Additional Config fields
+ regionKey = `region`
+
+ // endpoint discovery group
+ enableEndpointDiscoveryKey = `endpoint_discovery_enabled` // optional
+
+ // External Credential process
+ credentialProcessKey = `credential_process` // optional
+
+ // Web Identity Token File
+ webIdentityTokenFileKey = `web_identity_token_file` // optional
+
+ // S3 ARN Region Usage
+ s3UseARNRegionKey = "s3_use_arn_region"
+
+ // DefaultSharedConfigProfile is the default profile to be used when
+ // loading configuration from the config files if another profile name
+ // is not provided.
+ DefaultSharedConfigProfile = `default`
+)
+
+// defaultSharedConfigProfile allows for swapping the default profile for testing
+var defaultSharedConfigProfile = DefaultSharedConfigProfile
+
+// DefaultSharedCredentialsFilename returns the SDK's default file path
+// for the shared credentials file.
+//
+// Builds the shared config file path based on the OS's platform.
+//
+// - Linux/Unix: $HOME/.aws/credentials
+// - Windows: %USERPROFILE%\.aws\credentials
+func DefaultSharedCredentialsFilename() string {
+ return filepath.Join(userHomeDir(), ".aws", "credentials")
+}
+
+// DefaultSharedConfigFilename returns the SDK's default file path for
+// the shared config file.
+//
+// Builds the shared config file path based on the OS's platform.
+//
+// - Linux/Unix: $HOME/.aws/config
+// - Windows: %USERPROFILE%\.aws\config
+func DefaultSharedConfigFilename() string {
+ return filepath.Join(userHomeDir(), ".aws", "config")
+}
+
+// DefaultSharedConfigFiles is a slice of the default shared config files that
+// the will be used in order to load the SharedConfig.
+var DefaultSharedConfigFiles = []string{
+ DefaultSharedConfigFilename(),
+}
+
+// DefaultSharedCredentialsFiles is a slice of the default shared credentials files that
+// the will be used in order to load the SharedConfig.
+var DefaultSharedCredentialsFiles = []string{
+ DefaultSharedCredentialsFilename(),
+}
+
+// SharedConfig represents the configuration fields of the SDK config files.
+type SharedConfig struct {
+ Profile string
+
+ // Credentials values from the config file. Both aws_access_key_id
+ // and aws_secret_access_key must be provided together in the same file
+ // to be considered valid. The values will be ignored if not a complete group.
+ // aws_session_token is an optional field that can be provided if both of the
+ // other two fields are also provided.
+ //
+ // aws_access_key_id
+ // aws_secret_access_key
+ // aws_session_token
+ Credentials aws.Credentials
+
+ CredentialSource string
+ CredentialProcess string
+ WebIdentityTokenFile string
+
+ SSOAccountID string
+ SSORegion string
+ SSORoleName string
+ SSOStartURL string
+
+ RoleARN string
+ ExternalID string
+ MFASerial string
+ RoleSessionName string
+ RoleDurationSeconds *time.Duration
+
+ SourceProfileName string
+ Source *SharedConfig
+
+ // Region is the region the SDK should use for looking up AWS service endpoints
+ // and signing requests.
+ //
+ // region
+ Region string
+
+ // EnableEndpointDiscovery can be enabled in the shared config by setting
+ // endpoint_discovery_enabled to true
+ //
+ // endpoint_discovery_enabled = true
+ EnableEndpointDiscovery *bool
+
+ // Specifies if the S3 service should allow ARNs to direct the region
+ // the client's requests are sent to.
+ //
+ // s3_use_arn_region=true
+ S3UseARNRegion *bool
+}
+
+// GetS3UseARNRegion returns if the S3 service should allow ARNs to direct the region
+// the client's requests are sent to.
+func (c SharedConfig) GetS3UseARNRegion(ctx context.Context) (value, ok bool, err error) {
+ if c.S3UseARNRegion == nil {
+ return false, false, nil
+ }
+
+ return *c.S3UseARNRegion, true, nil
+}
+
+// GetRegion returns the region for the profile if a region is set.
+func (c SharedConfig) getRegion(ctx context.Context) (string, bool, error) {
+ if len(c.Region) == 0 {
+ return "", false, nil
+ }
+ return c.Region, true, nil
+}
+
+// GetCredentialsProvider returns the credentials for a profile if they were set.
+func (c SharedConfig) getCredentialsProvider() (aws.Credentials, bool, error) {
+ return c.Credentials, true, nil
+}
+
+// loadSharedConfigIgnoreNotExist is an alias for loadSharedConfig with the
+// addition of ignoring when none of the files exist or when the profile
+// is not found in any of the files.
+func loadSharedConfigIgnoreNotExist(ctx context.Context, configs configs) (Config, error) {
+ cfg, err := loadSharedConfig(ctx, configs)
+ if err != nil {
+ if _, ok := err.(SharedConfigProfileNotExistError); ok {
+ return SharedConfig{}, nil
+ }
+ return nil, err
+ }
+
+ return cfg, nil
+}
+
+// loadSharedConfig uses the configs passed in to load the SharedConfig from file
+// The file names and profile name are sourced from the configs.
+//
+// If profile name is not provided DefaultSharedConfigProfile (default) will
+// be used.
+//
+// If shared config filenames are not provided DefaultSharedConfigFiles will
+// be used.
+//
+// Config providers used:
+// * sharedConfigProfileProvider
+// * sharedConfigFilesProvider
+func loadSharedConfig(ctx context.Context, configs configs) (Config, error) {
+ var profile string
+ var configFiles []string
+ var credentialsFiles []string
+ var ok bool
+ var err error
+
+ profile, ok, err = getSharedConfigProfile(ctx, configs)
+ if err != nil {
+ return nil, err
+ }
+ if !ok {
+ profile = defaultSharedConfigProfile
+ }
+
+ configFiles, ok, err = getSharedConfigFiles(ctx, configs)
+ if err != nil {
+ return nil, err
+ }
+
+ credentialsFiles, ok, err = getSharedCredentialsFiles(ctx, configs)
+ if err != nil {
+ return nil, err
+ }
+
+ // setup logger if log configuration warning is seti
+ var logger logging.Logger
+ logWarnings, found, err := getLogConfigurationWarnings(ctx, configs)
+ if err != nil {
+ return SharedConfig{}, err
+ }
+ if found && logWarnings {
+ logger, found, err = getLogger(ctx, configs)
+ if err != nil {
+ return SharedConfig{}, err
+ }
+ if !found {
+ logger = logging.NewStandardLogger(os.Stderr)
+ }
+ }
+
+ return LoadSharedConfigProfile(ctx, profile,
+ func(o *LoadSharedConfigOptions) {
+ o.Logger = logger
+ o.ConfigFiles = configFiles
+ o.CredentialsFiles = credentialsFiles
+ },
+ )
+}
+
+// LoadSharedConfigOptions struct contains optional values that can be used to load the config.
+type LoadSharedConfigOptions struct {
+
+ // CredentialsFiles are the shared credentials files
+ CredentialsFiles []string
+
+ // ConfigFiles are the shared config files
+ ConfigFiles []string
+
+ // Logger is the logger used to log shared config behavior
+ Logger logging.Logger
+}
+
+// LoadSharedConfigProfile retrieves the configuration from the list of files
+// using the profile provided. The order the files are listed will determine
+// precedence. Values in subsequent files will overwrite values defined in
+// earlier files.
+//
+// For example, given two files A and B. Both define credentials. If the order
+// of the files are A then B, B's credential values will be used instead of A's.
+//
+// If config files are not set, SDK will default to using a file at location `.aws/config` if present.
+// If credentials files are not set, SDK will default to using a file at location `.aws/credentials` if present.
+// No default files are set, if files set to an empty slice.
+//
+// You can read more about shared config and credentials file location at
+// https://docs.aws.amazon.com/credref/latest/refdocs/file-location.html#file-location
+//
+func LoadSharedConfigProfile(ctx context.Context, profile string, optFns ...func(*LoadSharedConfigOptions)) (SharedConfig, error) {
+ var option LoadSharedConfigOptions
+ for _, fn := range optFns {
+ fn(&option)
+ }
+
+ if option.ConfigFiles == nil {
+ option.ConfigFiles = DefaultSharedConfigFiles
+ }
+
+ if option.CredentialsFiles == nil {
+ option.CredentialsFiles = DefaultSharedCredentialsFiles
+ }
+
+ // load shared configuration sections from shared configuration INI options
+ configSections, err := loadIniFiles(option.ConfigFiles)
+ if err != nil {
+ return SharedConfig{}, err
+ }
+
+ // check for profile prefix and drop duplicates or invalid profiles
+ err = processConfigSections(ctx, configSections, option.Logger)
+ if err != nil {
+ return SharedConfig{}, err
+ }
+
+ // load shared credentials sections from shared credentials INI options
+ credentialsSections, err := loadIniFiles(option.CredentialsFiles)
+ if err != nil {
+ return SharedConfig{}, err
+ }
+
+ // check for profile prefix and drop duplicates or invalid profiles
+ err = processCredentialsSections(ctx, credentialsSections, option.Logger)
+ if err != nil {
+ return SharedConfig{}, err
+ }
+
+ err = mergeSections(configSections, credentialsSections)
+ if err != nil {
+ return SharedConfig{}, err
+ }
+
+ // profile should be lower-cased to standardize
+ profile = strings.ToLower(profile)
+
+ cfg := SharedConfig{}
+ profiles := map[string]struct{}{}
+ if err = cfg.setFromIniSections(profiles, profile, configSections, option.Logger); err != nil {
+ return SharedConfig{}, err
+ }
+
+ return cfg, nil
+}
+
+func processConfigSections(ctx context.Context, sections ini.Sections, logger logging.Logger) error {
+ for _, section := range sections.List() {
+ // drop profiles without prefix for config files
+ if !strings.HasPrefix(section, profilePrefix) && !strings.EqualFold(section, "default") {
+ // drop this section, as invalid profile name
+ sections.DeleteSection(section)
+
+ if logger != nil {
+ logger.Logf(logging.Debug,
+ "A profile defined with name `%v` is ignored. For use within a shared configuration file, "+
+ "a non-default profile must have `profile ` prefixed to the profile name.\n",
+ section,
+ )
+ }
+ }
+ }
+
+ // rename sections to remove `profile ` prefixing to match with credentials file.
+ // if default is already present, it will be dropped.
+ for _, section := range sections.List() {
+ if strings.HasPrefix(section, profilePrefix) {
+ v, ok := sections.GetSection(section)
+ if !ok {
+ return fmt.Errorf("error processing profiles within the shared configuration files")
+ }
+
+ // delete section with profile as prefix
+ sections.DeleteSection(section)
+
+ // set the value to non-prefixed name in sections.
+ section = strings.TrimPrefix(section, profilePrefix)
+ if sections.HasSection(section) {
+ oldSection, _ := sections.GetSection(section)
+ v.Logs = append(v.Logs,
+ fmt.Sprintf("A default profile prefixed with `profile ` found in %s, "+
+ "overrided non-prefixed default profile from %s", v.SourceFile, oldSection.SourceFile))
+ }
+
+ // assign non-prefixed name to section
+ v.Name = section
+ sections.SetSection(section, v)
+ }
+ }
+ return nil
+}
+
+func processCredentialsSections(ctx context.Context, sections ini.Sections, logger logging.Logger) error {
+ for _, section := range sections.List() {
+ // drop profiles with prefix for credential files
+ if strings.HasPrefix(section, profilePrefix) {
+ // drop this section, as invalid profile name
+ sections.DeleteSection(section)
+
+ if logger != nil {
+ logger.Logf(logging.Debug,
+ "The profile defined with name `%v` is ignored. A profile with the `profile ` prefix is invalid "+
+ "for the shared credentials file.\n",
+ section,
+ )
+ }
+ }
+ }
+ return nil
+}
+
+func loadIniFiles(filenames []string) (ini.Sections, error) {
+ mergedSections := ini.NewSections()
+
+ for _, filename := range filenames {
+ sections, err := ini.OpenFile(filename)
+ var v *ini.UnableToReadFile
+ if ok := errors.As(err, &v); ok {
+ // Skip files which can't be opened and read for whatever reason.
+ // We treat such files as empty, and do not fall back to other locations.
+ continue
+ } else if err != nil {
+ return ini.Sections{}, SharedConfigLoadError{Filename: filename, Err: err}
+ }
+
+ // mergeSections into mergedSections
+ err = mergeSections(mergedSections, sections)
+ if err != nil {
+ return ini.Sections{}, SharedConfigLoadError{Filename: filename, Err: err}
+ }
+ }
+
+ return mergedSections, nil
+}
+
+// mergeSections merges source section properties into destination section properties
+func mergeSections(dst, src ini.Sections) error {
+ for _, sectionName := range src.List() {
+ srcSection, _ := src.GetSection(sectionName)
+
+ if (!srcSection.Has(accessKeyIDKey) && srcSection.Has(secretAccessKey)) ||
+ (srcSection.Has(accessKeyIDKey) && !srcSection.Has(secretAccessKey)) {
+ srcSection.Errors = append(srcSection.Errors,
+ fmt.Errorf("partial credentials found for profile %v", sectionName))
+ }
+
+ if !dst.HasSection(sectionName) {
+ dst.SetSection(sectionName, srcSection)
+ continue
+ }
+
+ // merge with destination srcSection
+ dstSection, _ := dst.GetSection(sectionName)
+
+ // errors should be overriden if any
+ dstSection.Errors = srcSection.Errors
+
+ // Access key id update
+ if srcSection.Has(accessKeyIDKey) && srcSection.Has(secretAccessKey) {
+ accessKey := srcSection.String(accessKeyIDKey)
+ secretKey := srcSection.String(secretAccessKey)
+
+ if dstSection.Has(accessKeyIDKey) {
+ dstSection.Logs = append(dstSection.Logs,
+ fmt.Sprintf("For profile: %v, overriding credentials value for aws access key id, "+
+ "and aws secret access key, defined in %v, with values found in a duplicate profile "+
+ "defined at file %v. \n",
+ sectionName, dstSection.SourceFile[accessKeyIDKey],
+ srcSection.SourceFile[accessKeyIDKey]))
+ }
+
+ // update access key
+ v, err := ini.NewStringValue(accessKey)
+ if err != nil {
+ return fmt.Errorf("error merging access key, %w", err)
+ }
+ dstSection.UpdateValue(accessKeyIDKey, v)
+
+ // update secret key
+ v, err = ini.NewStringValue(secretKey)
+ if err != nil {
+ return fmt.Errorf("error merging secret key, %w", err)
+ }
+ dstSection.UpdateValue(secretAccessKey, v)
+
+ // update session token
+ if srcSection.Has(sessionTokenKey) {
+ sessionKey := srcSection.String(sessionTokenKey)
+
+ val, e := ini.NewStringValue(sessionKey)
+ if e != nil {
+ return fmt.Errorf("error merging session key, %w", e)
+ }
+
+ if dstSection.Has(sessionTokenKey) {
+ dstSection.Logs = append(dstSection.Logs,
+ fmt.Sprintf("For profile: %v, overriding %v value, defined in %v "+
+ "with a %v value found in a duplicate profile defined at file %v. \n",
+ sectionName, sessionTokenKey, dstSection.SourceFile[sessionTokenKey],
+ sessionTokenKey, srcSection.SourceFile[sessionTokenKey]))
+ }
+
+ dstSection.UpdateValue(sessionTokenKey, val)
+ dstSection.UpdateSourceFile(sessionTokenKey, srcSection.SourceFile[sessionTokenKey])
+ }
+
+ // update source file to reflect where the static creds came from
+ dstSection.UpdateSourceFile(accessKeyIDKey, srcSection.SourceFile[accessKeyIDKey])
+ dstSection.UpdateSourceFile(secretAccessKey, srcSection.SourceFile[secretAccessKey])
+ }
+
+ if srcSection.Has(roleArnKey) {
+ key := srcSection.String(roleArnKey)
+ val, err := ini.NewStringValue(key)
+ if err != nil {
+ return fmt.Errorf("error merging roleArnKey, %w", err)
+ }
+
+ if dstSection.Has(roleArnKey) {
+ dstSection.Logs = append(dstSection.Logs,
+ fmt.Sprintf("For profile: %v, overriding %v value, defined in %v "+
+ "with a %v value found in a duplicate profile defined at file %v. \n",
+ sectionName, roleArnKey, dstSection.SourceFile[roleArnKey],
+ roleArnKey, srcSection.SourceFile[roleArnKey]))
+ }
+
+ dstSection.UpdateValue(roleArnKey, val)
+ dstSection.UpdateSourceFile(roleArnKey, srcSection.SourceFile[roleArnKey])
+ }
+
+ if srcSection.Has(sourceProfileKey) {
+ key := srcSection.String(sourceProfileKey)
+ val, err := ini.NewStringValue(key)
+ if err != nil {
+ return fmt.Errorf("error merging sourceProfileKey, %w", err)
+ }
+
+ if dstSection.Has(sourceProfileKey) {
+ dstSection.Logs = append(dstSection.Logs,
+ fmt.Sprintf("For profile: %v, overriding %v value, defined in %v "+
+ "with a %v value found in a duplicate profile defined at file %v. \n",
+ sectionName, sourceProfileKey, dstSection.SourceFile[sourceProfileKey],
+ sourceProfileKey, srcSection.SourceFile[sourceProfileKey]))
+ }
+
+ dstSection.UpdateValue(sourceProfileKey, val)
+ dstSection.UpdateSourceFile(sourceProfileKey, srcSection.SourceFile[sourceProfileKey])
+ }
+
+ if srcSection.Has(credentialSourceKey) {
+ key := srcSection.String(credentialSourceKey)
+ val, err := ini.NewStringValue(key)
+ if err != nil {
+ return fmt.Errorf("error merging credentialSourceKey, %w", err)
+ }
+
+ if dstSection.Has(credentialSourceKey) {
+ dstSection.Logs = append(dstSection.Logs,
+ fmt.Sprintf("For profile: %v, overriding %v value, defined in %v "+
+ "with a %v value found in a duplicate profile defined at file %v. \n",
+ sectionName, credentialSourceKey, dstSection.SourceFile[credentialSourceKey],
+ credentialSourceKey, srcSection.SourceFile[credentialSourceKey]))
+ }
+
+ dstSection.UpdateValue(credentialSourceKey, val)
+ dstSection.UpdateSourceFile(credentialSourceKey, srcSection.SourceFile[credentialSourceKey])
+ }
+
+ if srcSection.Has(externalIDKey) {
+ key := srcSection.String(externalIDKey)
+ val, err := ini.NewStringValue(key)
+ if err != nil {
+ return fmt.Errorf("error merging externalIDKey, %w", err)
+ }
+
+ if dstSection.Has(externalIDKey) {
+ dstSection.Logs = append(dstSection.Logs,
+ fmt.Sprintf("For profile: %v, overriding %v value, defined in %v "+
+ "with a %v value found in a duplicate profile defined at file %v. \n",
+ sectionName, externalIDKey, dstSection.SourceFile[externalIDKey],
+ externalIDKey, srcSection.SourceFile[externalIDKey]))
+ }
+
+ dstSection.UpdateValue(externalIDKey, val)
+ dstSection.UpdateSourceFile(externalIDKey, srcSection.SourceFile[externalIDKey])
+ }
+
+ if srcSection.Has(mfaSerialKey) {
+ key := srcSection.String(mfaSerialKey)
+ val, err := ini.NewStringValue(key)
+ if err != nil {
+ return fmt.Errorf("error merging mfaSerialKey, %w", err)
+ }
+
+ if dstSection.Has(mfaSerialKey) {
+ dstSection.Logs = append(dstSection.Logs,
+ fmt.Sprintf("For profile: %v, overriding %v value, defined in %v "+
+ "with a %v value found in a duplicate profile defined at file %v. \n",
+ sectionName, mfaSerialKey, dstSection.SourceFile[mfaSerialKey],
+ mfaSerialKey, srcSection.SourceFile[mfaSerialKey]))
+ }
+
+ dstSection.UpdateValue(mfaSerialKey, val)
+ dstSection.UpdateSourceFile(mfaSerialKey, srcSection.SourceFile[mfaSerialKey])
+ }
+
+ if srcSection.Has(roleSessionNameKey) {
+ key := srcSection.String(roleSessionNameKey)
+ val, err := ini.NewStringValue(key)
+ if err != nil {
+ return fmt.Errorf("error merging roleSessionNameKey, %w", err)
+ }
+
+ if dstSection.Has(roleSessionNameKey) {
+ dstSection.Logs = append(dstSection.Logs,
+ fmt.Sprintf("For profile: %v, overriding %v value, defined in %v "+
+ "with a %v value found in a duplicate profile defined at file %v. \n",
+ sectionName, roleSessionNameKey, dstSection.SourceFile[roleSessionNameKey],
+ roleSessionNameKey, srcSection.SourceFile[roleSessionNameKey]))
+ }
+
+ dstSection.UpdateValue(roleSessionNameKey, val)
+ dstSection.UpdateSourceFile(roleSessionNameKey, srcSection.SourceFile[roleSessionNameKey])
+ }
+
+ // role duration seconds key update
+ if srcSection.Has(roleDurationSecondsKey) {
+ roleDurationSeconds := srcSection.Int(roleDurationSecondsKey)
+ v, err := ini.NewIntValue(roleDurationSeconds)
+ if err != nil {
+ return fmt.Errorf("error merging role duration seconds key, %w", err)
+ }
+ dstSection.UpdateValue(roleDurationSecondsKey, v)
+
+ dstSection.UpdateSourceFile(roleDurationSecondsKey, srcSection.SourceFile[roleDurationSecondsKey])
+ }
+
+ if srcSection.Has(regionKey) {
+ key := srcSection.String(regionKey)
+ val, err := ini.NewStringValue(key)
+ if err != nil {
+ return fmt.Errorf("error merging regionKey, %w", err)
+ }
+
+ if dstSection.Has(regionKey) {
+ dstSection.Logs = append(dstSection.Logs,
+ fmt.Sprintf("For profile: %v, overriding %v value, defined in %v "+
+ "with a %v value found in a duplicate profile defined at file %v. \n",
+ sectionName, regionKey, dstSection.SourceFile[regionKey],
+ regionKey, srcSection.SourceFile[regionKey]))
+ }
+
+ dstSection.UpdateValue(regionKey, val)
+ dstSection.UpdateSourceFile(regionKey, srcSection.SourceFile[regionKey])
+ }
+
+ if srcSection.Has(enableEndpointDiscoveryKey) {
+ key := srcSection.String(enableEndpointDiscoveryKey)
+ val, err := ini.NewStringValue(key)
+ if err != nil {
+ return fmt.Errorf("error merging enableEndpointDiscoveryKey, %w", err)
+ }
+
+ if dstSection.Has(enableEndpointDiscoveryKey) {
+ dstSection.Logs = append(dstSection.Logs,
+ fmt.Sprintf("For profile: %v, overriding %v value, defined in %v "+
+ "with a %v value found in a duplicate profile defined at file %v. \n",
+ sectionName, enableEndpointDiscoveryKey, dstSection.SourceFile[enableEndpointDiscoveryKey],
+ enableEndpointDiscoveryKey, srcSection.SourceFile[enableEndpointDiscoveryKey]))
+ }
+
+ dstSection.UpdateValue(enableEndpointDiscoveryKey, val)
+ dstSection.UpdateSourceFile(enableEndpointDiscoveryKey, srcSection.SourceFile[enableEndpointDiscoveryKey])
+ }
+
+ if srcSection.Has(credentialProcessKey) {
+ key := srcSection.String(credentialProcessKey)
+ val, err := ini.NewStringValue(key)
+ if err != nil {
+ return fmt.Errorf("error merging credentialProcessKey, %w", err)
+ }
+
+ if dstSection.Has(credentialProcessKey) {
+ dstSection.Logs = append(dstSection.Logs,
+ fmt.Sprintf("For profile: %v, overriding %v value, defined in %v "+
+ "with a %v value found in a duplicate profile defined at file %v. \n",
+ sectionName, credentialProcessKey, dstSection.SourceFile[credentialProcessKey],
+ credentialProcessKey, srcSection.SourceFile[credentialProcessKey]))
+ }
+
+ dstSection.UpdateValue(credentialProcessKey, val)
+ dstSection.UpdateSourceFile(credentialProcessKey, srcSection.SourceFile[credentialProcessKey])
+ }
+
+ if srcSection.Has(webIdentityTokenFileKey) {
+ key := srcSection.String(webIdentityTokenFileKey)
+ val, err := ini.NewStringValue(key)
+ if err != nil {
+ return fmt.Errorf("error merging webIdentityTokenFileKey, %w", err)
+ }
+
+ if dstSection.Has(webIdentityTokenFileKey) {
+ dstSection.Logs = append(dstSection.Logs,
+ fmt.Sprintf("For profile: %v, overriding %v value, defined in %v "+
+ "with a %v value found in a duplicate profile defined at file %v. \n",
+ sectionName, webIdentityTokenFileKey, dstSection.SourceFile[webIdentityTokenFileKey],
+ webIdentityTokenFileKey, srcSection.SourceFile[webIdentityTokenFileKey]))
+ }
+
+ dstSection.UpdateValue(webIdentityTokenFileKey, val)
+ dstSection.UpdateSourceFile(webIdentityTokenFileKey, srcSection.SourceFile[webIdentityTokenFileKey])
+ }
+
+ if srcSection.Has(s3UseARNRegionKey) {
+ key := srcSection.String(s3UseARNRegionKey)
+ val, err := ini.NewStringValue(key)
+ if err != nil {
+ return fmt.Errorf("error merging s3UseARNRegionKey, %w", err)
+ }
+
+ if dstSection.Has(s3UseARNRegionKey) {
+ dstSection.Logs = append(dstSection.Logs,
+ fmt.Sprintf("For profile: %v, overriding %v value, defined in %v "+
+ "with a %v value found in a duplicate profile defined at file %v. \n",
+ sectionName, s3UseARNRegionKey, dstSection.SourceFile[s3UseARNRegionKey],
+ s3UseARNRegionKey, srcSection.SourceFile[s3UseARNRegionKey]))
+ }
+
+ dstSection.UpdateValue(s3UseARNRegionKey, val)
+ dstSection.UpdateSourceFile(s3UseARNRegionKey, srcSection.SourceFile[s3UseARNRegionKey])
+ }
+
+ // set srcSection on dst srcSection
+ dst = dst.SetSection(sectionName, dstSection)
+ }
+
+ return nil
+}
+
+// Returns an error if all of the files fail to load. If at least one file is
+// successfully loaded and contains the profile, no error will be returned.
+func (c *SharedConfig) setFromIniSections(profiles map[string]struct{}, profile string,
+ sections ini.Sections, logger logging.Logger) error {
+ c.Profile = profile
+
+ section, ok := sections.GetSection(profile)
+ if !ok {
+ return SharedConfigProfileNotExistError{
+ Profile: profile,
+ }
+ }
+
+ // if logs are appended to the section, log them
+ if section.Logs != nil && logger != nil {
+ for _, log := range section.Logs {
+ logger.Logf(logging.Debug, log)
+ }
+ }
+
+ // set config from the provided ini section
+ err := c.setFromIniSection(profile, section)
+ if err != nil {
+ return fmt.Errorf("error fetching config from profile, %v, %w", profile, err)
+ }
+
+ if _, ok := profiles[profile]; ok {
+ // if this is the second instance of the profile the Assume Role
+ // options must be cleared because they are only valid for the
+ // first reference of a profile. The self linked instance of the
+ // profile only have credential provider options.
+ c.clearAssumeRoleOptions()
+ } else {
+ // First time a profile has been seen, It must either be a assume role
+ // credentials, or SSO. Assert if the credential type requires a role ARN,
+ // the ARN is also set, or validate that the SSO configuration is complete.
+ if err := c.validateCredentialsConfig(profile); err != nil {
+ return err
+ }
+ }
+
+ // if not top level profile and has credentials, return with credentials.
+ if len(profiles) != 0 && c.Credentials.HasKeys() {
+ return nil
+ }
+
+ profiles[profile] = struct{}{}
+
+ // validate no colliding credentials type are present
+ if err := c.validateCredentialType(); err != nil {
+ return err
+ }
+
+ // Link source profiles for assume roles
+ if len(c.SourceProfileName) != 0 {
+ // Linked profile via source_profile ignore credential provider
+ // options, the source profile must provide the credentials.
+ c.clearCredentialOptions()
+
+ srcCfg := &SharedConfig{}
+ err := srcCfg.setFromIniSections(profiles, c.SourceProfileName, sections, logger)
+ if err != nil {
+ // SourceProfileName that doesn't exist is an error in configuration.
+ if _, ok := err.(SharedConfigProfileNotExistError); ok {
+ err = SharedConfigAssumeRoleError{
+ RoleARN: c.RoleARN,
+ Profile: c.SourceProfileName,
+ Err: err,
+ }
+ }
+ return err
+ }
+
+ if !srcCfg.hasCredentials() {
+ return SharedConfigAssumeRoleError{
+ RoleARN: c.RoleARN,
+ Profile: c.SourceProfileName,
+ }
+ }
+
+ c.Source = srcCfg
+ }
+
+ return nil
+}
+
+// setFromIniSection loads the configuration from the profile section defined in
+// the provided ini file. A SharedConfig pointer type value is used so that
+// multiple config file loadings can be chained.
+//
+// Only loads complete logically grouped values, and will not set fields in cfg
+// for incomplete grouped values in the config. Such as credentials. For example
+// if a config file only includes aws_access_key_id but no aws_secret_access_key
+// the aws_access_key_id will be ignored.
+func (c *SharedConfig) setFromIniSection(profile string, section ini.Section) error {
+ if len(section.Name) == 0 {
+ sources := make([]string, 0)
+ for _, v := range section.SourceFile {
+ sources = append(sources, v)
+ }
+
+ return fmt.Errorf("parsing error : could not find profile section name after processing files: %v", sources)
+ }
+
+ if len(section.Errors) != 0 {
+ var errStatement string
+ for i, e := range section.Errors {
+ errStatement = fmt.Sprintf("%d, %v\n", i+1, e.Error())
+ }
+ return fmt.Errorf("Error using profile: \n %v", errStatement)
+ }
+
+ // Assume Role
+ updateString(&c.RoleARN, section, roleArnKey)
+ updateString(&c.ExternalID, section, externalIDKey)
+ updateString(&c.MFASerial, section, mfaSerialKey)
+ updateString(&c.RoleSessionName, section, roleSessionNameKey)
+ updateString(&c.SourceProfileName, section, sourceProfileKey)
+ updateString(&c.CredentialSource, section, credentialSourceKey)
+ updateString(&c.Region, section, regionKey)
+
+ // AWS Single Sign-On (AWS SSO)
+ updateString(&c.SSOAccountID, section, ssoAccountIDKey)
+ updateString(&c.SSORegion, section, ssoRegionKey)
+ updateString(&c.SSORoleName, section, ssoRoleNameKey)
+ updateString(&c.SSOStartURL, section, ssoStartURL)
+
+ if section.Has(roleDurationSecondsKey) {
+ d := time.Duration(section.Int(roleDurationSecondsKey)) * time.Second
+ c.RoleDurationSeconds = &d
+ }
+
+ updateString(&c.CredentialProcess, section, credentialProcessKey)
+ updateString(&c.WebIdentityTokenFile, section, webIdentityTokenFileKey)
+
+ updateBoolPtr(&c.EnableEndpointDiscovery, section, enableEndpointDiscoveryKey)
+ updateBoolPtr(&c.S3UseARNRegion, section, s3UseARNRegionKey)
+
+ // Shared Credentials
+ creds := aws.Credentials{
+ AccessKeyID: section.String(accessKeyIDKey),
+ SecretAccessKey: section.String(secretAccessKey),
+ SessionToken: section.String(sessionTokenKey),
+ Source: fmt.Sprintf("SharedConfigCredentials: %s", section.SourceFile[accessKeyIDKey]),
+ }
+
+ if creds.HasKeys() {
+ c.Credentials = creds
+ }
+
+ return nil
+}
+
+func (c *SharedConfig) validateCredentialsConfig(profile string) error {
+ if err := c.validateCredentialsRequireARN(profile); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (c *SharedConfig) validateCredentialsRequireARN(profile string) error {
+ var credSource string
+
+ switch {
+ case len(c.SourceProfileName) != 0:
+ credSource = sourceProfileKey
+ case len(c.CredentialSource) != 0:
+ credSource = credentialSourceKey
+ case len(c.WebIdentityTokenFile) != 0:
+ credSource = webIdentityTokenFileKey
+ }
+
+ if len(credSource) != 0 && len(c.RoleARN) == 0 {
+ return CredentialRequiresARNError{
+ Type: credSource,
+ Profile: profile,
+ }
+ }
+
+ return nil
+}
+
+func (c *SharedConfig) validateCredentialType() error {
+ // Only one or no credential type can be defined.
+ if !oneOrNone(
+ len(c.SourceProfileName) != 0,
+ len(c.CredentialSource) != 0,
+ len(c.CredentialProcess) != 0,
+ len(c.WebIdentityTokenFile) != 0,
+ c.hasSSOConfiguration(),
+ ) {
+ return fmt.Errorf("only one credential type may be specified per profile: source profile, credential source, credential process, web identity token, or sso")
+ }
+
+ return nil
+}
+
+func (c *SharedConfig) validateSSOConfiguration() error {
+ if !c.hasSSOConfiguration() {
+ return nil
+ }
+
+ var missing []string
+ if len(c.SSOAccountID) == 0 {
+ missing = append(missing, ssoAccountIDKey)
+ }
+
+ if len(c.SSORegion) == 0 {
+ missing = append(missing, ssoRegionKey)
+ }
+
+ if len(c.SSORoleName) == 0 {
+ missing = append(missing, ssoRoleNameKey)
+ }
+
+ if len(c.SSOStartURL) == 0 {
+ missing = append(missing, ssoStartURL)
+ }
+
+ if len(missing) > 0 {
+ return fmt.Errorf("profile %q is configured to use SSO but is missing required configuration: %s",
+ c.Profile, strings.Join(missing, ", "))
+ }
+
+ return nil
+}
+
+func (c *SharedConfig) hasCredentials() bool {
+ switch {
+ case len(c.SourceProfileName) != 0:
+ case len(c.CredentialSource) != 0:
+ case len(c.CredentialProcess) != 0:
+ case len(c.WebIdentityTokenFile) != 0:
+ case c.hasSSOConfiguration():
+ case c.Credentials.HasKeys():
+ default:
+ return false
+ }
+
+ return true
+}
+
+func (c *SharedConfig) hasSSOConfiguration() bool {
+ switch {
+ case len(c.SSOAccountID) != 0:
+ case len(c.SSORegion) != 0:
+ case len(c.SSORoleName) != 0:
+ case len(c.SSOStartURL) != 0:
+ default:
+ return false
+ }
+ return true
+}
+
+func (c *SharedConfig) clearAssumeRoleOptions() {
+ c.RoleARN = ""
+ c.ExternalID = ""
+ c.MFASerial = ""
+ c.RoleSessionName = ""
+ c.SourceProfileName = ""
+}
+
+func (c *SharedConfig) clearCredentialOptions() {
+ c.CredentialSource = ""
+ c.CredentialProcess = ""
+ c.WebIdentityTokenFile = ""
+ c.Credentials = aws.Credentials{}
+}
+
+// SharedConfigLoadError is an error for the shared config file failed to load.
+type SharedConfigLoadError struct {
+ Filename string
+ Err error
+}
+
+// Unwrap returns the underlying error that caused the failure.
+func (e SharedConfigLoadError) Unwrap() error {
+ return e.Err
+}
+
+func (e SharedConfigLoadError) Error() string {
+ return fmt.Sprintf("failed to load shared config file, %s, %v", e.Filename, e.Err)
+}
+
+// SharedConfigProfileNotExistError is an error for the shared config when
+// the profile was not find in the config file.
+type SharedConfigProfileNotExistError struct {
+ Filename []string
+ Profile string
+ Err error
+}
+
+// Unwrap returns the underlying error that caused the failure.
+func (e SharedConfigProfileNotExistError) Unwrap() error {
+ return e.Err
+}
+
+func (e SharedConfigProfileNotExistError) Error() string {
+ return fmt.Sprintf("failed to get shared config profile, %s", e.Profile)
+}
+
+// SharedConfigAssumeRoleError is an error for the shared config when the
+// profile contains assume role information, but that information is invalid
+// or not complete.
+type SharedConfigAssumeRoleError struct {
+ Profile string
+ RoleARN string
+ Err error
+}
+
+// Unwrap returns the underlying error that caused the failure.
+func (e SharedConfigAssumeRoleError) Unwrap() error {
+ return e.Err
+}
+
+func (e SharedConfigAssumeRoleError) Error() string {
+ return fmt.Sprintf("failed to load assume role %s, of profile %s, %v",
+ e.RoleARN, e.Profile, e.Err)
+}
+
+// CredentialRequiresARNError provides the error for shared config credentials
+// that are incorrectly configured in the shared config or credentials file.
+type CredentialRequiresARNError struct {
+ // type of credentials that were configured.
+ Type string
+
+ // Profile name the credentials were in.
+ Profile string
+}
+
+// Error satisfies the error interface.
+func (e CredentialRequiresARNError) Error() string {
+ return fmt.Sprintf(
+ "credential type %s requires role_arn, profile %s",
+ e.Type, e.Profile,
+ )
+}
+
+func userHomeDir() string {
+ if runtime.GOOS == "windows" { // Windows
+ return os.Getenv("USERPROFILE")
+ }
+
+ // *nix
+ return os.Getenv("HOME")
+}
+
+func oneOrNone(bs ...bool) bool {
+ var count int
+
+ for _, b := range bs {
+ if b {
+ count++
+ if count > 1 {
+ return false
+ }
+ }
+ }
+
+ return true
+}
+
+// updateString will only update the dst with the value in the section key, key
+// is present in the section.
+func updateString(dst *string, section ini.Section, key string) {
+ if !section.Has(key) {
+ return
+ }
+ *dst = section.String(key)
+}
+
+// updateBool will only update the dst with the value in the section key, key
+// is present in the section.
+func updateBool(dst *bool, section ini.Section, key string) {
+ if !section.Has(key) {
+ return
+ }
+ *dst = section.Bool(key)
+}
+
+// updateBoolPtr will only update the dst with the value in the section key,
+// key is present in the section.
+func updateBoolPtr(dst **bool, section ini.Section, key string) {
+ if !section.Has(key) {
+ return
+ }
+ *dst = new(bool)
+ **dst = section.Bool(key)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/LICENSE.txt b/vendor/github.com/aws/aws-sdk-go-v2/credentials/LICENSE.txt
new file mode 100644
index 000000000..d64569567
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/LICENSE.txt
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/doc.go
new file mode 100644
index 000000000..f6e2873ab
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/doc.go
@@ -0,0 +1,4 @@
+/*
+Package credentials provides types for retrieving credentials from credentials sources.
+*/
+package credentials
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/ec2rolecreds/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/ec2rolecreds/doc.go
new file mode 100644
index 000000000..0bec6526b
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/ec2rolecreds/doc.go
@@ -0,0 +1,58 @@
+// Package ec2rolecreds provides the credentials provider implementation for
+// retrieving AWS credentials from Amazon EC2 Instance Roles via Amazon EC2 IMDS.
+//
+// Concurrency and caching
+//
+// The Provider is not safe to be used concurrently, and does not provide any
+// caching of credentials retrieved. You should wrap the Provider with a
+// `aws.CredentialsCache` to provide concurrency safety, and caching of
+// credentials.
+//
+// Loading credentials with the SDK's AWS Config
+//
+// The EC2 Instance role credentials provider will automatically be the resolved
+// credential provider int he credential chain if no other credential provider is
+// resolved first.
+//
+// To explicitly instruct the SDK's credentials resolving to use the EC2 Instance
+// role for credentials, you specify a `credentials_source` property in the config
+// profile the SDK will load.
+//
+// [default]
+// credential_source = Ec2InstanceMetadata
+//
+// Loading credentials with the Provider directly
+//
+// Another way to use the EC2 Instance role credentials provider is to create it
+// directly and assign it as the credentials provider for an API client.
+//
+// The following example creates a credentials provider for a command, and wraps
+// it with the CredentialsCache before assigning the provider to the Amazon S3 API
+// client's Credentials option.
+//
+// provider := imds.New(imds.Options{})
+//
+// // Create the service client value configured for credentials.
+// svc := s3.New(s3.Options{
+// Credentials: &aws.CredentialsCache{Provider: provider},
+// })
+//
+// If you need more control, you can set the configuration options on the
+// credentials provider using the imds.Options type to configure the EC2 IMDS
+// API Client and ExpiryWindow of the retrieved credentials.
+//
+// provider := imds.New(imds.Options{
+// // See imds.Options type's documentation for more options available.
+// Client: imds.New(Options{
+// HTTPClient: customHTTPClient,
+// }),
+//
+// // Modify how soon credentials expire prior to their original expiry time.
+// ExpiryWindow: 5 * time.Minute,
+// })
+//
+// EC2 IMDS API Client
+//
+// See the github.com/aws/aws-sdk-go-v2/feature/ec2/imds module for more details on
+// configuring the client, and options available.
+package ec2rolecreds
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/ec2rolecreds/provider.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/ec2rolecreds/provider.go
new file mode 100644
index 000000000..901132a32
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/ec2rolecreds/provider.go
@@ -0,0 +1,174 @@
+package ec2rolecreds
+
+import (
+ "bufio"
+ "context"
+ "encoding/json"
+ "fmt"
+ "path"
+ "strings"
+ "time"
+
+ "github.com/aws/aws-sdk-go-v2/aws"
+ "github.com/aws/aws-sdk-go-v2/feature/ec2/imds"
+ "github.com/aws/smithy-go"
+)
+
+// ProviderName provides a name of EC2Role provider
+const ProviderName = "EC2RoleProvider"
+
+// GetMetadataAPIClient provides the interface for an EC2 IMDS API client for the
+// GetMetadata operation.
+type GetMetadataAPIClient interface {
+ GetMetadata(context.Context, *imds.GetMetadataInput, ...func(*imds.Options)) (*imds.GetMetadataOutput, error)
+}
+
+// A Provider retrieves credentials from the EC2 service, and keeps track if
+// those credentials are expired.
+//
+// The New function must be used to create the Provider.
+//
+// p := &ec2rolecreds.New(ec2rolecreds.Options{
+// Client: imds.New(imds.Options{}),
+//
+// // Expire the credentials 10 minutes before IAM states they should.
+// // Proactively refreshing the credentials.
+// ExpiryWindow: 10 * time.Minute
+// })
+type Provider struct {
+ options Options
+}
+
+// Options is a list of user settable options for setting the behavior of the Provider.
+type Options struct {
+ // The API client that will be used by the provider to make GetMetadata API
+ // calls to EC2 IMDS.
+ //
+ // If nil, the provider will default to the EC2 IMDS client.
+ Client GetMetadataAPIClient
+}
+
+// New returns an initialized Provider value configured to retrieve
+// credentials from EC2 Instance Metadata service.
+func New(optFns ...func(*Options)) *Provider {
+ options := Options{}
+
+ for _, fn := range optFns {
+ fn(&options)
+ }
+
+ if options.Client == nil {
+ options.Client = imds.New(imds.Options{})
+ }
+
+ return &Provider{
+ options: options,
+ }
+}
+
+// Retrieve retrieves credentials from the EC2 service.
+// Error will be returned if the request fails, or unable to extract
+// the desired credentials.
+func (p *Provider) Retrieve(ctx context.Context) (aws.Credentials, error) {
+ credsList, err := requestCredList(ctx, p.options.Client)
+ if err != nil {
+ return aws.Credentials{Source: ProviderName}, err
+ }
+
+ if len(credsList) == 0 {
+ return aws.Credentials{Source: ProviderName},
+ fmt.Errorf("unexpected empty EC2 IMDS role list")
+ }
+ credsName := credsList[0]
+
+ roleCreds, err := requestCred(ctx, p.options.Client, credsName)
+ if err != nil {
+ return aws.Credentials{Source: ProviderName}, err
+ }
+
+ creds := aws.Credentials{
+ AccessKeyID: roleCreds.AccessKeyID,
+ SecretAccessKey: roleCreds.SecretAccessKey,
+ SessionToken: roleCreds.Token,
+ Source: ProviderName,
+
+ CanExpire: true,
+ Expires: roleCreds.Expiration,
+ }
+
+ return creds, nil
+}
+
+// A ec2RoleCredRespBody provides the shape for unmarshaling credential
+// request responses.
+type ec2RoleCredRespBody struct {
+ // Success State
+ Expiration time.Time
+ AccessKeyID string
+ SecretAccessKey string
+ Token string
+
+ // Error state
+ Code string
+ Message string
+}
+
+const iamSecurityCredsPath = "/iam/security-credentials/"
+
+// requestCredList requests a list of credentials from the EC2 service. If
+// there are no credentials, or there is an error making or receiving the
+// request
+func requestCredList(ctx context.Context, client GetMetadataAPIClient) ([]string, error) {
+ resp, err := client.GetMetadata(ctx, &imds.GetMetadataInput{
+ Path: iamSecurityCredsPath,
+ })
+ if err != nil {
+ return nil, fmt.Errorf("no EC2 IMDS role found, %w", err)
+ }
+ defer resp.Content.Close()
+
+ credsList := []string{}
+ s := bufio.NewScanner(resp.Content)
+ for s.Scan() {
+ credsList = append(credsList, s.Text())
+ }
+
+ if err := s.Err(); err != nil {
+ return nil, fmt.Errorf("failed to read EC2 IMDS role, %w", err)
+ }
+
+ return credsList, nil
+}
+
+// requestCred requests the credentials for a specific credentials from the EC2 service.
+//
+// If the credentials cannot be found, or there is an error reading the response
+// and error will be returned.
+func requestCred(ctx context.Context, client GetMetadataAPIClient, credsName string) (ec2RoleCredRespBody, error) {
+ resp, err := client.GetMetadata(ctx, &imds.GetMetadataInput{
+ Path: path.Join(iamSecurityCredsPath, credsName),
+ })
+ if err != nil {
+ return ec2RoleCredRespBody{},
+ fmt.Errorf("failed to get %s EC2 IMDS role credentials, %w",
+ credsName, err)
+ }
+ defer resp.Content.Close()
+
+ var respCreds ec2RoleCredRespBody
+ if err := json.NewDecoder(resp.Content).Decode(&respCreds); err != nil {
+ return ec2RoleCredRespBody{},
+ fmt.Errorf("failed to decode %s EC2 IMDS role credentials, %w",
+ credsName, err)
+ }
+
+ if !strings.EqualFold(respCreds.Code, "Success") {
+ // If an error code was returned something failed requesting the role.
+ return ec2RoleCredRespBody{},
+ fmt.Errorf("failed to get %s EC2 IMDS role credentials, %w",
+ credsName,
+ &smithy.GenericAPIError{Code: respCreds.Code, Message: respCreds.Message})
+ }
+
+ return respCreds, nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client/client.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client/client.go
new file mode 100644
index 000000000..60b8298f8
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client/client.go
@@ -0,0 +1,148 @@
+package client
+
+import (
+ "context"
+ "fmt"
+ "net/http"
+ "time"
+
+ "github.com/aws/aws-sdk-go-v2/aws"
+ "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/aws/retry"
+ awshttp "github.com/aws/aws-sdk-go-v2/aws/transport/http"
+ "github.com/aws/smithy-go"
+ smithymiddleware "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// ServiceID is the client identifer
+const ServiceID = "endpoint-credentials"
+
+// HTTPClient is a client for sending HTTP requests
+type HTTPClient interface {
+ Do(*http.Request) (*http.Response, error)
+}
+
+// Options is the endpoint client configurable options
+type Options struct {
+ // The endpoint to retrieve credentials from
+ Endpoint string
+
+ // The HTTP client to invoke API calls with. Defaults to client's default HTTP
+ // implementation if nil.
+ HTTPClient HTTPClient
+
+ // Retryer guides how HTTP requests should be retried in case of recoverable
+ // failures. When nil the API client will use a default retryer.
+ Retryer aws.Retryer
+
+ // Set of options to modify how the credentials operation is invoked.
+ APIOptions []func(*smithymiddleware.Stack) error
+}
+
+// Copy creates a copy of the API options.
+func (o Options) Copy() Options {
+ to := o
+ to.APIOptions = make([]func(*smithymiddleware.Stack) error, len(o.APIOptions))
+ copy(to.APIOptions, o.APIOptions)
+ return to
+}
+
+// Client is an client for retrieving AWS credentials from an endpoint
+type Client struct {
+ options Options
+}
+
+// New constructs a new Client from the given options
+func New(options Options, optFns ...func(*Options)) *Client {
+ options = options.Copy()
+
+ if options.HTTPClient == nil {
+ options.HTTPClient = awshttp.NewBuildableClient()
+ }
+
+ if options.Retryer == nil {
+ options.Retryer = retry.NewStandard()
+ }
+
+ for _, fn := range optFns {
+ fn(&options)
+ }
+
+ client := &Client{
+ options: options,
+ }
+
+ return client
+}
+
+// GetCredentialsInput is the input to send with the endpoint service to receive credentials.
+type GetCredentialsInput struct {
+ AuthorizationToken string
+}
+
+// GetCredentials retrieves credentials from credential endpoint
+func (c *Client) GetCredentials(ctx context.Context, params *GetCredentialsInput, optFns ...func(*Options)) (*GetCredentialsOutput, error) {
+ stack := smithymiddleware.NewStack("GetCredentials", smithyhttp.NewStackRequest)
+ options := c.options.Copy()
+ for _, fn := range optFns {
+ fn(&options)
+ }
+
+ stack.Serialize.Add(&serializeOpGetCredential{}, smithymiddleware.After)
+ stack.Build.Add(&buildEndpoint{Endpoint: options.Endpoint}, smithymiddleware.After)
+ stack.Deserialize.Add(&deserializeOpGetCredential{}, smithymiddleware.After)
+ retry.AddRetryMiddlewares(stack, retry.AddRetryMiddlewaresOptions{Retryer: options.Retryer})
+ middleware.AddSDKAgentKey(middleware.FeatureMetadata, ServiceID)
+ smithyhttp.AddErrorCloseResponseBodyMiddleware(stack)
+ smithyhttp.AddCloseResponseBodyMiddleware(stack)
+
+ for _, fn := range options.APIOptions {
+ if err := fn(stack); err != nil {
+ return nil, err
+ }
+ }
+
+ handler := smithymiddleware.DecorateHandler(smithyhttp.NewClientHandler(options.HTTPClient), stack)
+ result, _, err := handler.Handle(ctx, params)
+ if err != nil {
+ return nil, err
+ }
+
+ return result.(*GetCredentialsOutput), err
+}
+
+// GetCredentialsOutput is the response from the credential endpoint
+type GetCredentialsOutput struct {
+ Expiration *time.Time
+ AccessKeyID string
+ SecretAccessKey string
+ Token string
+}
+
+// EndpointError is an error returned from the endpoint service
+type EndpointError struct {
+ Code string `json:"code"`
+ Message string `json:"message"`
+ Fault smithy.ErrorFault `json:"-"`
+}
+
+// Error is the error mesage string
+func (e *EndpointError) Error() string {
+ return fmt.Sprintf("%s: %s", e.Code, e.Message)
+}
+
+// ErrorCode is the error code returned by the endpoint
+func (e *EndpointError) ErrorCode() string {
+ return e.Code
+}
+
+// ErrorMessage is the error message returned by the endpoint
+func (e *EndpointError) ErrorMessage() string {
+ return e.Message
+}
+
+// ErrorFault indicates error fault classification
+func (e *EndpointError) ErrorFault() smithy.ErrorFault {
+ return e.Fault
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client/middleware.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client/middleware.go
new file mode 100644
index 000000000..050f09979
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client/middleware.go
@@ -0,0 +1,120 @@
+package client
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "net/url"
+
+ smithymiddleware "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+ "github.com/aws/smithy-go"
+)
+
+type buildEndpoint struct {
+ Endpoint string
+}
+
+func (b *buildEndpoint) ID() string {
+ return "BuildEndpoint"
+}
+
+func (b *buildEndpoint) HandleBuild(ctx context.Context, in smithymiddleware.BuildInput, next smithymiddleware.BuildHandler) (
+ out smithymiddleware.BuildOutput, metadata smithymiddleware.Metadata, err error,
+) {
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown transport, %T", in.Request)
+ }
+
+ if len(b.Endpoint) == 0 {
+ return out, metadata, fmt.Errorf("endpoint not provided")
+ }
+
+ parsed, err := url.Parse(b.Endpoint)
+ if err != nil {
+ return out, metadata, fmt.Errorf("failed to parse endpoint, %w", err)
+ }
+
+ request.URL = parsed
+
+ return next.HandleBuild(ctx, in)
+}
+
+type serializeOpGetCredential struct{}
+
+func (s *serializeOpGetCredential) ID() string {
+ return "OperationSerializer"
+}
+
+func (s *serializeOpGetCredential) HandleSerialize(ctx context.Context, in smithymiddleware.SerializeInput, next smithymiddleware.SerializeHandler) (
+ out smithymiddleware.SerializeOutput, metadata smithymiddleware.Metadata, err error,
+) {
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown transport type, %T", in.Request)
+ }
+
+ params, ok := in.Parameters.(*GetCredentialsInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters, %T", in.Parameters)
+ }
+
+ const acceptHeader = "Accept"
+ request.Header[acceptHeader] = append(request.Header[acceptHeader][:0], "application/json")
+
+ if len(params.AuthorizationToken) > 0 {
+ const authHeader = "Authorization"
+ request.Header[authHeader] = append(request.Header[authHeader][:0], params.AuthorizationToken)
+ }
+
+ return next.HandleSerialize(ctx, in)
+}
+
+type deserializeOpGetCredential struct{}
+
+func (d *deserializeOpGetCredential) ID() string {
+ return "OperationDeserializer"
+}
+
+func (d *deserializeOpGetCredential) HandleDeserialize(ctx context.Context, in smithymiddleware.DeserializeInput, next smithymiddleware.DeserializeHandler) (
+ out smithymiddleware.DeserializeOutput, metadata smithymiddleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, deserializeError(response)
+ }
+
+ var shape *GetCredentialsOutput
+ if err = json.NewDecoder(response.Body).Decode(&shape); err != nil {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to deserialize json response, %w", err)}
+ }
+
+ out.Result = shape
+ return out, metadata, err
+}
+
+func deserializeError(response *smithyhttp.Response) error {
+ var errShape *EndpointError
+ err := json.NewDecoder(response.Body).Decode(&errShape)
+ if err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to decode error message, %w", err)}
+ }
+
+ if response.StatusCode >= 500 {
+ errShape.Fault = smithy.FaultServer
+ } else {
+ errShape.Fault = smithy.FaultClient
+ }
+
+ return errShape
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/provider.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/provider.go
new file mode 100644
index 000000000..40cd7addb
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/provider.go
@@ -0,0 +1,133 @@
+// Package endpointcreds provides support for retrieving credentials from an
+// arbitrary HTTP endpoint.
+//
+// The credentials endpoint Provider can receive both static and refreshable
+// credentials that will expire. Credentials are static when an "Expiration"
+// value is not provided in the endpoint's response.
+//
+// Static credentials will never expire once they have been retrieved. The format
+// of the static credentials response:
+// {
+// "AccessKeyId" : "MUA...",
+// "SecretAccessKey" : "/7PC5om....",
+// }
+//
+// Refreshable credentials will expire within the "ExpiryWindow" of the Expiration
+// value in the response. The format of the refreshable credentials response:
+// {
+// "AccessKeyId" : "MUA...",
+// "SecretAccessKey" : "/7PC5om....",
+// "Token" : "AQoDY....=",
+// "Expiration" : "2016-02-25T06:03:31Z"
+// }
+//
+// Errors should be returned in the following format and only returned with 400
+// or 500 HTTP status codes.
+// {
+// "code": "ErrorCode",
+// "message": "Helpful error message."
+// }
+package endpointcreds
+
+import (
+ "context"
+ "fmt"
+ "net/http"
+
+ "github.com/aws/aws-sdk-go-v2/aws"
+ "github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client"
+ "github.com/aws/smithy-go/middleware"
+)
+
+// ProviderName is the name of the credentials provider.
+const ProviderName = `CredentialsEndpointProvider`
+
+type getCredentialsAPIClient interface {
+ GetCredentials(context.Context, *client.GetCredentialsInput, ...func(*client.Options)) (*client.GetCredentialsOutput, error)
+}
+
+// Provider satisfies the aws.CredentialsProvider interface, and is a client to
+// retrieve credentials from an arbitrary endpoint.
+type Provider struct {
+ // The AWS Client to make HTTP requests to the endpoint with. The endpoint
+ // the request will be made to is provided by the aws.Config's
+ // EndpointResolver.
+ client getCredentialsAPIClient
+
+ options Options
+}
+
+// HTTPClient is a client for sending HTTP requests
+type HTTPClient interface {
+ Do(*http.Request) (*http.Response, error)
+}
+
+// Options is structure of configurable options for Provider
+type Options struct {
+ // Endpoint to retrieve credentials from. Required
+ Endpoint string
+
+ // HTTPClient to handle sending HTTP requests to the target endpoint.
+ HTTPClient HTTPClient
+
+ // Set of options to modify how the credentials operation is invoked.
+ APIOptions []func(*middleware.Stack) error
+
+ // The Retryer to be used for determining whether a failed requested should be retried
+ Retryer aws.Retryer
+
+ // Optional authorization token value if set will be used as the value of
+ // the Authorization header of the endpoint credential request.
+ AuthorizationToken string
+}
+
+// New returns a credentials Provider for retrieving AWS credentials
+// from arbitrary endpoint.
+func New(endpoint string, optFns ...func(*Options)) *Provider {
+ o := Options{
+ Endpoint: endpoint,
+ }
+
+ for _, fn := range optFns {
+ fn(&o)
+ }
+
+ p := &Provider{
+ client: client.New(client.Options{
+ HTTPClient: o.HTTPClient,
+ Endpoint: o.Endpoint,
+ APIOptions: o.APIOptions,
+ Retryer: o.Retryer,
+ }),
+ options: o,
+ }
+
+ return p
+}
+
+// Retrieve will attempt to request the credentials from the endpoint the Provider
+// was configured for. And error will be returned if the retrieval fails.
+func (p *Provider) Retrieve(ctx context.Context) (aws.Credentials, error) {
+ resp, err := p.getCredentials(ctx)
+ if err != nil {
+ return aws.Credentials{}, fmt.Errorf("failed to load credentials, %w", err)
+ }
+
+ creds := aws.Credentials{
+ AccessKeyID: resp.AccessKeyID,
+ SecretAccessKey: resp.SecretAccessKey,
+ SessionToken: resp.Token,
+ Source: ProviderName,
+ }
+
+ if resp.Expiration != nil {
+ creds.CanExpire = true
+ creds.Expires = *resp.Expiration
+ }
+
+ return creds, nil
+}
+
+func (p *Provider) getCredentials(ctx context.Context) (*client.GetCredentialsOutput, error) {
+ return p.client.GetCredentials(ctx, &client.GetCredentialsInput{AuthorizationToken: p.options.AuthorizationToken})
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/go.mod b/vendor/github.com/aws/aws-sdk-go-v2/credentials/go.mod
new file mode 100644
index 000000000..ffbe291fc
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/go.mod
@@ -0,0 +1,23 @@
+module github.com/aws/aws-sdk-go-v2/credentials
+
+go 1.15
+
+require (
+ github.com/aws/aws-sdk-go-v2 v1.2.1
+ github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.0.3
+ github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.0.3 // indirect
+ github.com/aws/aws-sdk-go-v2/service/sso v1.1.2
+ github.com/aws/aws-sdk-go-v2/service/sts v1.1.2
+ github.com/aws/smithy-go v1.2.0
+ github.com/google/go-cmp v0.5.4
+)
+
+replace (
+ github.com/aws/aws-sdk-go-v2 => ../
+ github.com/aws/aws-sdk-go-v2/feature/ec2/imds => ../feature/ec2/imds/
+ github.com/aws/aws-sdk-go-v2/service/sts => ../service/sts/
+)
+
+replace github.com/aws/aws-sdk-go-v2/service/internal/presigned-url => ../service/internal/presigned-url/
+
+replace github.com/aws/aws-sdk-go-v2/service/sso => ../service/sso/
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/go.sum b/vendor/github.com/aws/aws-sdk-go-v2/credentials/go.sum
new file mode 100644
index 000000000..c3783ae60
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/go.sum
@@ -0,0 +1,13 @@
+github.com/aws/smithy-go v1.2.0 h1:0PoGBWXkXDIyVdPaZW9gMhaGzj3UOAgTdiVoHuuZAFA=
+github.com/aws/smithy-go v1.2.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAmR5n+E=
+github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/google/go-cmp v0.5.4 h1:L8R9j+yAqZuZjsqh/z+F1NCffTKKLShY6zXTItVIZ8M=
+github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
+github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U=
+github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4=
+golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/processcreds/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/processcreds/doc.go
new file mode 100644
index 000000000..b028bea42
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/processcreds/doc.go
@@ -0,0 +1,92 @@
+// Package processcreds is a credentials provider to retrieve credentials from a
+// external CLI invoked process.
+//
+// WARNING: The following describes a method of sourcing credentials from an external
+// process. This can potentially be dangerous, so proceed with caution. Other
+// credential providers should be preferred if at all possible. If using this
+// option, you should make sure that the config file is as locked down as possible
+// using security best practices for your operating system.
+//
+// Concurrency and caching
+//
+// The Provider is not safe to be used concurrently, and does not provide any
+// caching of credentials retrieved. You should wrap the Provider with a
+// `aws.CredentialsCache` to provide concurrency safety, and caching of
+// credentials.
+//
+// Loading credentials with the SDKs AWS Config
+//
+// You can use credentials from a AWS shared config `credential_process` in a
+// variety of ways.
+//
+// One way is to setup your shared config file, located in the default
+// location, with the `credential_process` key and the command you want to be
+// called. You also need to set the AWS_SDK_LOAD_CONFIG environment variable
+// (e.g., `export AWS_SDK_LOAD_CONFIG=1`) to use the shared config file.
+//
+// [default]
+// credential_process = /command/to/call
+//
+// Loading configuration using external will use the credential process to
+// retrieve credentials. NOTE: If there are credentials in the profile you are
+// using, the credential process will not be used.
+//
+// // Initialize a session to load credentials.
+// cfg, _ := config.LoadDefaultConfig(context.TODO())
+//
+// // Create S3 service client to use the credentials.
+// svc := s3.NewFromConfig(cfg)
+//
+// Loading credentials with the Provider directly
+//
+// Another way to use the credentials process provider is by using the
+// `NewProvider` constructor to create the provider and providing a it with a
+// command to be executed to retrieve credentials.
+//
+// The following example creates a credentials provider for a command, and wraps
+// it with the CredentialsCache before assigning the provider to the Amazon S3 API
+// client's Credentials option.
+//
+// // Create credentials using the Provider.
+// provider := processcreds.NewProvider("/path/to/command")
+//
+// // Create the service client value configured for credentials.
+// svc := s3.New(s3.Options{
+// Credentials: &aws.CredentialsCache{Provider: provider},
+// })
+//
+// If you need more control, you can set any configurable options in the
+// credentials using one or more option functions.
+//
+// provider := processcreds.NewProvider("/path/to/command",
+// func(o *processcreds.Options) {
+// // Override the provider's default timeout
+// o.Timeout = 2 * time.Minute
+// })
+//
+// You can also use your own `exec.Cmd` value by satisfying a value that satisfies
+// the `NewCommandBuilder` interface and use the `NewProviderCommand` constructor.
+//
+// // Create an exec.Cmd
+// cmdBuilder := processcreds.NewCommandBuilderFunc(
+// func(ctx context.Context) (*exec.Cmd, error) {
+// cmd := exec.CommandContext(ctx,
+// "customCLICommand",
+// "-a", "argument",
+// )
+// cmd.Env = []string{
+// "ENV_VAR_FOO=value",
+// "ENV_VAR_BAR=other_value",
+// }
+//
+// return cmd, nil
+// },
+// )
+//
+// // Create credentials using your exec.Cmd and custom timeout
+// provider := processcreds.NewProviderCommand(cmdBuilder,
+// func(opt *processcreds.Provider) {
+// // optionally override the provider's default timeout
+// opt.Timeout = 1 * time.Second
+// })
+package processcreds
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/processcreds/provider.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/processcreds/provider.go
new file mode 100644
index 000000000..3921da34c
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/processcreds/provider.go
@@ -0,0 +1,269 @@
+package processcreds
+
+import (
+ "bytes"
+ "context"
+ "encoding/json"
+ "fmt"
+ "io"
+ "os"
+ "os/exec"
+ "runtime"
+ "time"
+
+ "github.com/aws/aws-sdk-go-v2/aws"
+ "github.com/aws/aws-sdk-go-v2/internal/sdkio"
+)
+
+const (
+ // ProviderName is the name this credentials provider will label any
+ // returned credentials Value with.
+ ProviderName = `ProcessProvider`
+
+ // DefaultTimeout default limit on time a process can run.
+ DefaultTimeout = time.Duration(1) * time.Minute
+)
+
+// ProviderError is an error indicating failure initializing or executing the
+// process credentials provider
+type ProviderError struct {
+ Err error
+}
+
+// Error returns the error message.
+func (e *ProviderError) Error() string {
+ return fmt.Sprintf("process provider error: %v", e.Err)
+}
+
+// Unwrap returns the underlying error the provider error wraps.
+func (e *ProviderError) Unwrap() error {
+ return e.Err
+}
+
+// Provider satisfies the credentials.Provider interface, and is a
+// client to retrieve credentials from a process.
+type Provider struct {
+ // Provides a constructor for exec.Cmd that are invoked by the provider for
+ // retrieving credentials. Use this to provide custom creation of exec.Cmd
+ // with things like environment variables, or other configuration.
+ //
+ // The provider defaults to the DefaultNewCommand function.
+ commandBuilder NewCommandBuilder
+
+ options Options
+}
+
+// Options is the configuration options for configuring the Provider.
+type Options struct {
+ // Timeout limits the time a process can run.
+ Timeout time.Duration
+}
+
+// NewCommandBuilder provides the interface for specifying how command will be
+// created that the Provider will use to retrieve credentials with.
+type NewCommandBuilder interface {
+ NewCommand(context.Context) (*exec.Cmd, error)
+}
+
+// NewCommandBuilderFunc provides a wrapper type around a function pointer to
+// satisfy the NewCommandBuilder interface.
+type NewCommandBuilderFunc func(context.Context) (*exec.Cmd, error)
+
+// NewCommand calls the underlying function pointer the builder was initialized with.
+func (fn NewCommandBuilderFunc) NewCommand(ctx context.Context) (*exec.Cmd, error) {
+ return fn(ctx)
+}
+
+// DefaultNewCommandBuilder provides the default NewCommandBuilder
+// implementation used by the provider. It takes a command and arguments to
+// invoke. The command will also be initialized with the current process
+// environment variables, stderr, and stdin pipes.
+type DefaultNewCommandBuilder struct {
+ Args []string
+}
+
+// NewCommand returns an initialized exec.Cmd with the builder's initialized
+// Args. The command is also initialized current process environment variables,
+// stderr, and stdin pipes.
+func (b DefaultNewCommandBuilder) NewCommand(ctx context.Context) (*exec.Cmd, error) {
+ var cmdArgs []string
+ if runtime.GOOS == "windows" {
+ cmdArgs = []string{"cmd.exe", "/C"}
+ } else {
+ cmdArgs = []string{"sh", "-c"}
+ }
+
+ if len(b.Args) == 0 {
+ return nil, &ProviderError{
+ Err: fmt.Errorf("failed to prepare command: command must not be empty"),
+ }
+ }
+
+ cmdArgs = append(cmdArgs, b.Args...)
+ cmd := exec.CommandContext(ctx, cmdArgs[0], cmdArgs[1:]...)
+ cmd.Env = os.Environ()
+
+ cmd.Stderr = os.Stderr // display stderr on console for MFA
+ cmd.Stdin = os.Stdin // enable stdin for MFA
+
+ return cmd, nil
+}
+
+// NewProvider returns a pointer to a new Credentials object wrapping the
+// Provider.
+//
+// The provider defaults to the DefaultNewCommandBuilder for creating command
+// the Provider will use to retrieve credentials with.
+func NewProvider(command string, options ...func(*Options)) *Provider {
+ var args []string
+
+ // Ensure that the command arguments are not set if the provided command is
+ // empty. This will error out when the command is executed since no
+ // arguments are specified.
+ if len(command) > 0 {
+ args = []string{command}
+ }
+
+ commanBuilder := DefaultNewCommandBuilder{
+ Args: args,
+ }
+ return NewProviderCommand(commanBuilder, options...)
+}
+
+// NewProviderCommand returns a pointer to a new Credentials object with the
+// specified command, and default timeout duration. Use this to provide custom
+// creation of exec.Cmd for options like environment variables, or other
+// configuration.
+func NewProviderCommand(builder NewCommandBuilder, options ...func(*Options)) *Provider {
+ p := &Provider{
+ commandBuilder: builder,
+ options: Options{
+ Timeout: DefaultTimeout,
+ },
+ }
+
+ for _, option := range options {
+ option(&p.options)
+ }
+
+ return p
+}
+
+type credentialProcessResponse struct {
+ Version int
+ AccessKeyID string `json:"AccessKeyId"`
+ SecretAccessKey string
+ SessionToken string
+ Expiration *time.Time
+}
+
+// Retrieve executes the credential process command and returns the
+// credentials, or error if the command fails.
+func (p *Provider) Retrieve(ctx context.Context) (aws.Credentials, error) {
+ out, err := p.executeCredentialProcess(ctx)
+ if err != nil {
+ return aws.Credentials{Source: ProviderName}, err
+ }
+
+ // Serialize and validate response
+ resp := &credentialProcessResponse{}
+ if err = json.Unmarshal(out, resp); err != nil {
+ return aws.Credentials{Source: ProviderName}, &ProviderError{
+ Err: fmt.Errorf("parse failed of process output: %s, error: %w", out, err),
+ }
+ }
+
+ if resp.Version != 1 {
+ return aws.Credentials{Source: ProviderName}, &ProviderError{
+ Err: fmt.Errorf("wrong version in process output (not 1)"),
+ }
+ }
+
+ if len(resp.AccessKeyID) == 0 {
+ return aws.Credentials{Source: ProviderName}, &ProviderError{
+ Err: fmt.Errorf("missing AccessKeyId in process output"),
+ }
+ }
+
+ if len(resp.SecretAccessKey) == 0 {
+ return aws.Credentials{Source: ProviderName}, &ProviderError{
+ Err: fmt.Errorf("missing SecretAccessKey in process output"),
+ }
+ }
+
+ creds := aws.Credentials{
+ Source: ProviderName,
+ AccessKeyID: resp.AccessKeyID,
+ SecretAccessKey: resp.SecretAccessKey,
+ SessionToken: resp.SessionToken,
+ }
+
+ // Handle expiration
+ if resp.Expiration != nil {
+ creds.CanExpire = true
+ creds.Expires = *resp.Expiration
+ }
+
+ return creds, nil
+}
+
+// executeCredentialProcess starts the credential process on the OS and
+// returns the results or an error.
+func (p *Provider) executeCredentialProcess(ctx context.Context) ([]byte, error) {
+ if p.options.Timeout >= 0 {
+ var cancelFunc func()
+ ctx, cancelFunc = context.WithTimeout(ctx, p.options.Timeout)
+ defer cancelFunc()
+ }
+
+ cmd, err := p.commandBuilder.NewCommand(ctx)
+ if err != nil {
+ return nil, err
+ }
+
+ // get creds json on process's stdout
+ output := bytes.NewBuffer(make([]byte, 0, int(8*sdkio.KibiByte)))
+ if cmd.Stdout != nil {
+ cmd.Stdout = io.MultiWriter(cmd.Stdout, output)
+ } else {
+ cmd.Stdout = output
+ }
+
+ execCh := make(chan error, 1)
+ go executeCommand(cmd, execCh)
+
+ select {
+ case execError := <-execCh:
+ if execError == nil {
+ break
+ }
+ select {
+ case <-ctx.Done():
+ return output.Bytes(), &ProviderError{
+ Err: fmt.Errorf("credential process timed out: %w", execError),
+ }
+ default:
+ return output.Bytes(), &ProviderError{
+ Err: fmt.Errorf("error in credential_process: %w", execError),
+ }
+ }
+ }
+
+ out := output.Bytes()
+ if runtime.GOOS == "windows" {
+ // windows adds slashes to quotes
+ out = bytes.ReplaceAll(out, []byte(`\"`), []byte(`"`))
+ }
+
+ return out, nil
+}
+
+func executeCommand(cmd *exec.Cmd, exec chan error) {
+ // Start the command
+ err := cmd.Start()
+ if err == nil {
+ err = cmd.Wait()
+ }
+
+ exec <- err
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/ssocreds/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/ssocreds/doc.go
new file mode 100644
index 000000000..2f396c0a1
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/ssocreds/doc.go
@@ -0,0 +1,63 @@
+// Package ssocreds provides a credential provider for retrieving temporary AWS credentials using an SSO access token.
+//
+// IMPORTANT: The provider in this package does not initiate or perform the AWS SSO login flow. The SDK provider
+// expects that you have already performed the SSO login flow using AWS CLI using the "aws sso login" command, or by
+// some other mechanism. The provider must find a valid non-expired access token for the AWS SSO user portal URL in
+// ~/.aws/sso/cache. If a cached token is not found, it is expired, or the file is malformed an error will be returned.
+//
+// Loading AWS SSO credentials with the AWS shared configuration file
+//
+// You can use configure AWS SSO credentials from the AWS shared configuration file by
+// providing the specifying the required keys in the profile:
+//
+// sso_account_id
+// sso_region
+// sso_role_name
+// sso_start_url
+//
+// For example, the following defines a profile "devsso" and specifies the AWS SSO parameters that defines the target
+// account, role, sign-on portal, and the region where the user portal is located. Note: all SSO arguments must be
+// provided, or an error will be returned.
+//
+// [profile devsso]
+// sso_start_url = https://my-sso-portal.awsapps.com/start
+// sso_role_name = SSOReadOnlyRole
+// sso_region = us-east-1
+// sso_account_id = 123456789012
+//
+// Using the config module, you can load the AWS SDK shared configuration, and specify that this profile be used to
+// retrieve credentials. For example:
+//
+// config, err := config.LoadDefaultConfig(context.TODO(), config.WithSharedConfigProfile("devsso"))
+// if err != nil {
+// return err
+// }
+//
+// Programmatically loading AWS SSO credentials directly
+//
+// You can programmatically construct the AWS SSO Provider in your application, and provide the necessary information
+// to load and retrieve temporary credentials using an access token from ~/.aws/sso/cache.
+//
+// client := sso.NewFromConfig(cfg)
+//
+// var provider aws.CredentialsProvider
+// provider = ssocreds.New(client, "123456789012", "SSOReadOnlyRole", "us-east-1", "https://my-sso-portal.awsapps.com/start")
+//
+// // Wrap the provider with aws.CredentialsCache to cache the credentials until their expire time
+// provider = aws.NewCredentialsCache(provider)
+//
+// credentials, err := provider.Retrieve(context.TODO())
+// if err != nil {
+// return err
+// }
+//
+// It is important that you wrap the Provider with aws.CredentialsCache if you are programmatically constructing the
+// provider directly. This prevents your application from accessing the cached access token and requesting new
+// credentials each time the credentials are used.
+//
+// Additional Resources
+//
+// Configuring the AWS CLI to use AWS Single Sign-On: https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-sso.html
+//
+// AWS Single Sign-On User Guide: https://docs.aws.amazon.com/singlesignon/latest/userguide/what-is.html
+package ssocreds
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/ssocreds/os.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/ssocreds/os.go
new file mode 100644
index 000000000..ceca7dcee
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/ssocreds/os.go
@@ -0,0 +1,9 @@
+// +build !windows
+
+package ssocreds
+
+import "os"
+
+func getHomeDirectory() string {
+ return os.Getenv("HOME")
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/ssocreds/os_windows.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/ssocreds/os_windows.go
new file mode 100644
index 000000000..eb48f61e5
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/ssocreds/os_windows.go
@@ -0,0 +1,7 @@
+package ssocreds
+
+import "os"
+
+func getHomeDirectory() string {
+ return os.Getenv("USERPROFILE")
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/ssocreds/provider.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/ssocreds/provider.go
new file mode 100644
index 000000000..279df7a13
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/ssocreds/provider.go
@@ -0,0 +1,184 @@
+package ssocreds
+
+import (
+ "context"
+ "crypto/sha1"
+ "encoding/hex"
+ "encoding/json"
+ "fmt"
+ "io/ioutil"
+ "path/filepath"
+ "strings"
+ "time"
+
+ "github.com/aws/aws-sdk-go-v2/aws"
+ "github.com/aws/aws-sdk-go-v2/internal/sdk"
+ "github.com/aws/aws-sdk-go-v2/service/sso"
+)
+
+// ProviderName is the name of the provider used to specify the source of credentials.
+const ProviderName = "SSOProvider"
+
+var defaultCacheLocation func() string
+
+func defaultCacheLocationImpl() string {
+ return filepath.Join(getHomeDirectory(), ".aws", "sso", "cache")
+}
+
+func init() {
+ defaultCacheLocation = defaultCacheLocationImpl
+}
+
+// GetRoleCredentialsAPIClient is a API client that implements the GetRoleCredentials operation.
+type GetRoleCredentialsAPIClient interface {
+ GetRoleCredentials(ctx context.Context, params *sso.GetRoleCredentialsInput, optFns ...func(*sso.Options)) (*sso.GetRoleCredentialsOutput, error)
+}
+
+// Options is the Provider options structure.
+type Options struct {
+ // The Client which is configured for the AWS Region where the AWS SSO user portal is located.
+ Client GetRoleCredentialsAPIClient
+
+ // The AWS account that is assigned to the user.
+ AccountID string
+
+ // The role name that is assigned to the user.
+ RoleName string
+
+ // The URL that points to the organization's AWS Single Sign-On (AWS SSO) user portal.
+ StartURL string
+}
+
+// Provider is an AWS credential provider that retrieves temporary AWS credentials by exchanging an SSO login token.
+type Provider struct {
+ options Options
+}
+
+// New returns a new AWS Single Sign-On (AWS SSO) credential provider. The provided client is expected to be configured
+// for the AWS Region where the AWS SSO user portal is located.
+func New(client GetRoleCredentialsAPIClient, accountID, roleName, startURL string, optFns ...func(options *Options)) *Provider {
+ options := Options{
+ Client: client,
+ AccountID: accountID,
+ RoleName: roleName,
+ StartURL: startURL,
+ }
+
+ for _, fn := range optFns {
+ fn(&options)
+ }
+
+ return &Provider{
+ options: options,
+ }
+}
+
+// Retrieve retrieves temporary AWS credentials from the configured Amazon Single Sign-On (AWS SSO) user portal
+// by exchanging the accessToken present in ~/.aws/sso/cache.
+func (p *Provider) Retrieve(ctx context.Context) (aws.Credentials, error) {
+ tokenFile, err := loadTokenFile(p.options.StartURL)
+ if err != nil {
+ return aws.Credentials{}, err
+ }
+
+ output, err := p.options.Client.GetRoleCredentials(ctx, &sso.GetRoleCredentialsInput{
+ AccessToken: &tokenFile.AccessToken,
+ AccountId: &p.options.AccountID,
+ RoleName: &p.options.RoleName,
+ })
+ if err != nil {
+ return aws.Credentials{}, err
+ }
+
+ return aws.Credentials{
+ AccessKeyID: aws.ToString(output.RoleCredentials.AccessKeyId),
+ SecretAccessKey: aws.ToString(output.RoleCredentials.SecretAccessKey),
+ SessionToken: aws.ToString(output.RoleCredentials.SessionToken),
+ Expires: time.Unix(0, output.RoleCredentials.Expiration*int64(time.Millisecond)).UTC(),
+ CanExpire: true,
+ Source: ProviderName,
+ }, nil
+}
+
+func getCacheFileName(url string) (string, error) {
+ hash := sha1.New()
+ _, err := hash.Write([]byte(url))
+ if err != nil {
+ return "", err
+ }
+ return strings.ToLower(hex.EncodeToString(hash.Sum(nil))) + ".json", nil
+}
+
+type rfc3339 time.Time
+
+func (r *rfc3339) UnmarshalJSON(bytes []byte) error {
+ var value string
+
+ if err := json.Unmarshal(bytes, &value); err != nil {
+ return err
+ }
+
+ parse, err := time.Parse(time.RFC3339, value)
+ if err != nil {
+ return fmt.Errorf("expected RFC3339 timestamp: %w", err)
+ }
+
+ *r = rfc3339(parse)
+
+ return nil
+}
+
+type token struct {
+ AccessToken string `json:"accessToken"`
+ ExpiresAt rfc3339 `json:"expiresAt"`
+ Region string `json:"region,omitempty"`
+ StartURL string `json:"startUrl,omitempty"`
+}
+
+func (t token) Expired() bool {
+ return sdk.NowTime().Round(0).After(time.Time(t.ExpiresAt))
+}
+
+// InvalidTokenError is the error type that is returned if loaded token has expired or is otherwise invalid.
+// To refresh the SSO session run aws sso login with the corresponding profile.
+type InvalidTokenError struct {
+ Err error
+}
+
+func (i *InvalidTokenError) Unwrap() error {
+ return i.Err
+}
+
+func (i *InvalidTokenError) Error() string {
+ const msg = "the SSO session has expired or is invalid"
+ if i.Err == nil {
+ return msg
+ }
+ return msg + ": " + i.Err.Error()
+}
+
+func loadTokenFile(startURL string) (t token, err error) {
+ key, err := getCacheFileName(startURL)
+ if err != nil {
+ return token{}, &InvalidTokenError{Err: err}
+ }
+
+ fileBytes, err := ioutil.ReadFile(filepath.Join(defaultCacheLocation(), key))
+ if err != nil {
+ return token{}, &InvalidTokenError{Err: err}
+ }
+
+ if err := json.Unmarshal(fileBytes, &t); err != nil {
+ return token{}, &InvalidTokenError{Err: err}
+ }
+
+ if len(t.AccessToken) == 0 {
+ return token{}, &InvalidTokenError{}
+ }
+
+ if t.Expired() {
+ return token{}, &InvalidTokenError{Err: fmt.Errorf("access token is expired")}
+ }
+
+ return t, nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/static_provider.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/static_provider.go
new file mode 100644
index 000000000..d525cac09
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/static_provider.go
@@ -0,0 +1,53 @@
+package credentials
+
+import (
+ "context"
+
+ "github.com/aws/aws-sdk-go-v2/aws"
+)
+
+const (
+ // StaticCredentialsName provides a name of Static provider
+ StaticCredentialsName = "StaticCredentials"
+)
+
+// StaticCredentialsEmptyError is emitted when static credentials are empty.
+type StaticCredentialsEmptyError struct{}
+
+func (*StaticCredentialsEmptyError) Error() string {
+ return "static credentials are empty"
+}
+
+// A StaticCredentialsProvider is a set of credentials which are set, and will
+// never expire.
+type StaticCredentialsProvider struct {
+ Value aws.Credentials
+}
+
+// NewStaticCredentialsProvider return a StaticCredentialsProvider initialized with the AWS
+// credentials passed in.
+func NewStaticCredentialsProvider(key, secret, session string) StaticCredentialsProvider {
+ return StaticCredentialsProvider{
+ Value: aws.Credentials{
+ AccessKeyID: key,
+ SecretAccessKey: secret,
+ SessionToken: session,
+ },
+ }
+}
+
+// Retrieve returns the credentials or error if the credentials are invalid.
+func (s StaticCredentialsProvider) Retrieve(_ context.Context) (aws.Credentials, error) {
+ v := s.Value
+ if v.AccessKeyID == "" || v.SecretAccessKey == "" {
+ return aws.Credentials{
+ Source: StaticCredentialsName,
+ }, &StaticCredentialsEmptyError{}
+ }
+
+ if len(v.Source) == 0 {
+ v.Source = StaticCredentialsName
+ }
+
+ return v, nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/stscreds/assume_role_provider.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/stscreds/assume_role_provider.go
new file mode 100644
index 000000000..1ea13c777
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/stscreds/assume_role_provider.go
@@ -0,0 +1,285 @@
+// Package stscreds are credential Providers to retrieve STS AWS credentials.
+//
+// STS provides multiple ways to retrieve credentials which can be used when making
+// future AWS service API operation calls.
+//
+// The SDK will ensure that per instance of credentials.Credentials all requests
+// to refresh the credentials will be synchronized. But, the SDK is unable to
+// ensure synchronous usage of the AssumeRoleProvider if the value is shared
+// between multiple Credentials or service clients.
+//
+// Assume Role
+//
+// To assume an IAM role using STS with the SDK you can create a new Credentials
+// with the SDKs's stscreds package.
+//
+// // Initial credentials loaded from SDK's default credential chain. Such as
+// // the environment, shared credentials (~/.aws/credentials), or EC2 Instance
+// // Role. These credentials will be used to to make the STS Assume Role API.
+// cfg, err := config.LoadDefaultConfig(context.TODO())
+// if err != nil {
+// panic(err)
+// }
+//
+// // Create the credentials from AssumeRoleProvider to assume the role
+// // referenced by the "myRoleARN" ARN.
+// stsSvc := sts.NewFromConfig(cfg)
+// creds := stscreds.NewAssumeRoleProvider(stsSvc, "myRoleArn")
+//
+// cfg.Credentials = &aws.CredentialsCache{Provider: creds}
+//
+// // Create service client value configured for credentials
+// // from assumed role.
+// svc := s3.NewFromConfig(cfg)
+//
+// Assume Role with static MFA Token
+//
+// To assume an IAM role with a MFA token you can either specify a MFA token code
+// directly or provide a function to prompt the user each time the credentials
+// need to refresh the role's credentials. Specifying the TokenCode should be used
+// for short lived operations that will not need to be refreshed, and when you do
+// not want to have direct control over the user provides their MFA token.
+//
+// With TokenCode the AssumeRoleProvider will be not be able to refresh the role's
+// credentials.
+//
+// cfg, err := config.LoadDefaultConfig(context.TODO())
+// if err != nil {
+// panic(err)
+// }
+//
+// // Create the credentials from AssumeRoleProvider to assume the role
+// // referenced by the "myRoleARN" ARN using the MFA token code provided.
+// creds := stscreds.NewAssumeRoleProvider(sts.NewFromConfig(cfg), "myRoleArn", func(o *stscreds.AssumeRoleOptions) {
+// o.SerialNumber = aws.String("myTokenSerialNumber")
+// o.TokenCode = aws.String("00000000")
+// })
+//
+// cfg.Credentials = &aws.CredentialsCache{Provider: creds}
+//
+// // Create service client value configured for credentials
+// // from assumed role.
+// svc := s3.NewFromConfig(cfg)
+//
+// Assume Role with MFA Token Provider
+//
+// To assume an IAM role with MFA for longer running tasks where the credentials
+// may need to be refreshed setting the TokenProvider field of AssumeRoleProvider
+// will allow the credential provider to prompt for new MFA token code when the
+// role's credentials need to be refreshed.
+//
+// The StdinTokenProvider function is available to prompt on stdin to retrieve
+// the MFA token code from the user. You can also implement custom prompts by
+// satisfying the TokenProvider function signature.
+//
+// Using StdinTokenProvider with multiple AssumeRoleProviders, or Credentials will
+// have undesirable results as the StdinTokenProvider will not be synchronized. A
+// single Credentials with an AssumeRoleProvider can be shared safely.
+//
+// cfg, err := config.LoadDefaultConfig(context.TODO())
+// if err != nil {
+// panic(err)
+// }
+//
+// // Create the credentials from AssumeRoleProvider to assume the role
+// // referenced by the "myRoleARN" ARN using the MFA token code provided.
+// creds := stscreds.NewAssumeRoleProvider(sts.NewFromConfig(cfg), "myRoleArn", func(o *stscreds.AssumeRoleOptions) {
+// o.SerialNumber = aws.String("myTokenSerialNumber")
+// o.TokenProvider = stscreds.StdinTokenProvider
+// })
+//
+// cfg.Credentials = &aws.CredentialsCache{Provider: creds}
+//
+// // Create service client value configured for credentials
+// // from assumed role.
+// svc := s3.NewFromConfig(cfg)
+package stscreds
+
+import (
+ "context"
+ "fmt"
+ "time"
+
+ "github.com/aws/aws-sdk-go-v2/aws"
+ "github.com/aws/aws-sdk-go-v2/service/sts"
+ "github.com/aws/aws-sdk-go-v2/service/sts/types"
+)
+
+// StdinTokenProvider will prompt on stdout and read from stdin for a string value.
+// An error is returned if reading from stdin fails.
+//
+// Use this function go read MFA tokens from stdin. The function makes no attempt
+// to make atomic prompts from stdin across multiple gorouties.
+//
+// Using StdinTokenProvider with multiple AssumeRoleProviders, or Credentials will
+// have undesirable results as the StdinTokenProvider will not be synchronized. A
+// single Credentials with an AssumeRoleProvider can be shared safely
+//
+// Will wait forever until something is provided on the stdin.
+func StdinTokenProvider() (string, error) {
+ var v string
+ fmt.Printf("Assume Role MFA token code: ")
+ _, err := fmt.Scanln(&v)
+
+ return v, err
+}
+
+// ProviderName provides a name of AssumeRole provider
+const ProviderName = "AssumeRoleProvider"
+
+// AssumeRoleAPIClient is a client capable of the STS AssumeRole operation.
+type AssumeRoleAPIClient interface {
+ AssumeRole(ctx context.Context, params *sts.AssumeRoleInput, optFns ...func(*sts.Options)) (*sts.AssumeRoleOutput, error)
+}
+
+// DefaultDuration is the default amount of time in minutes that the credentials
+// will be valid for.
+var DefaultDuration = time.Duration(15) * time.Minute
+
+// AssumeRoleProvider retrieves temporary credentials from the STS service, and
+// keeps track of their expiration time.
+//
+// This credential provider will be used by the SDKs default credential change
+// when shared configuration is enabled, and the shared config or shared credentials
+// file configure assume role. See Session docs for how to do this.
+//
+// AssumeRoleProvider does not provide any synchronization and it is not safe
+// to share this value across multiple Credentials, Sessions, or service clients
+// without also sharing the same Credentials instance.
+type AssumeRoleProvider struct {
+ options AssumeRoleOptions
+}
+
+// AssumeRoleOptions is the configurable options for AssumeRoleProvider
+type AssumeRoleOptions struct {
+ // Client implementation of the AssumeRole operation. Required
+ Client AssumeRoleAPIClient
+
+ // IAM Role ARN to be assumed. Required
+ RoleARN string
+
+ // Session name, if you wish to uniquely identify this session.
+ RoleSessionName string
+
+ // Expiry duration of the STS credentials. Defaults to 15 minutes if not set.
+ Duration time.Duration
+
+ // Optional ExternalID to pass along, defaults to nil if not set.
+ ExternalID *string
+
+ // The policy plain text must be 2048 bytes or shorter. However, an internal
+ // conversion compresses it into a packed binary format with a separate limit.
+ // The PackedPolicySize response element indicates by percentage how close to
+ // the upper size limit the policy is, with 100% equaling the maximum allowed
+ // size.
+ Policy *string
+
+ // The ARNs of IAM managed policies you want to use as managed session policies.
+ // The policies must exist in the same account as the role.
+ //
+ // This parameter is optional. You can provide up to 10 managed policy ARNs.
+ // However, the plain text that you use for both inline and managed session
+ // policies can't exceed 2,048 characters.
+ //
+ // An AWS conversion compresses the passed session policies and session tags
+ // into a packed binary format that has a separate limit. Your request can fail
+ // for this limit even if your plain text meets the other requirements. The
+ // PackedPolicySize response element indicates by percentage how close the policies
+ // and tags for your request are to the upper size limit.
+ //
+ // Passing policies to this operation returns new temporary credentials. The
+ // resulting session's permissions are the intersection of the role's identity-based
+ // policy and the session policies. You can use the role's temporary credentials
+ // in subsequent AWS API calls to access resources in the account that owns
+ // the role. You cannot use session policies to grant more permissions than
+ // those allowed by the identity-based policy of the role that is being assumed.
+ // For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
+ // in the IAM User Guide.
+ PolicyARNs []types.PolicyDescriptorType
+
+ // The identification number of the MFA device that is associated with the user
+ // who is making the AssumeRole call. Specify this value if the trust policy
+ // of the role being assumed includes a condition that requires MFA authentication.
+ // The value is either the serial number for a hardware device (such as GAHT12345678)
+ // or an Amazon Resource Name (ARN) for a virtual device (such as arn:aws:iam::123456789012:mfa/user).
+ SerialNumber *string
+
+ // Async method of providing MFA token code for assuming an IAM role with MFA.
+ // The value returned by the function will be used as the TokenCode in the Retrieve
+ // call. See StdinTokenProvider for a provider that prompts and reads from stdin.
+ //
+ // This token provider will be called when ever the assumed role's
+ // credentials need to be refreshed when SerialNumber is also set and
+ // TokenCode is not set.
+ //
+ // If both TokenCode and TokenProvider is set, TokenProvider will be used and
+ // TokenCode is ignored.
+ TokenProvider func() (string, error)
+}
+
+// NewAssumeRoleProvider constructs and returns a credentials provider that
+// will retrieve credentials by assuming a IAM role using STS.
+func NewAssumeRoleProvider(client AssumeRoleAPIClient, roleARN string, optFns ...func(*AssumeRoleOptions)) *AssumeRoleProvider {
+ o := AssumeRoleOptions{
+ Client: client,
+ RoleARN: roleARN,
+ }
+
+ for _, fn := range optFns {
+ fn(&o)
+ }
+
+ return &AssumeRoleProvider{
+ options: o,
+ }
+}
+
+// Retrieve generates a new set of temporary credentials using STS.
+func (p *AssumeRoleProvider) Retrieve(ctx context.Context) (aws.Credentials, error) {
+ // Apply defaults where parameters are not set.
+ if len(p.options.RoleSessionName) == 0 {
+ // Try to work out a role name that will hopefully end up unique.
+ p.options.RoleSessionName = fmt.Sprintf("aws-go-sdk-%d", time.Now().UTC().UnixNano())
+ }
+ if p.options.Duration == 0 {
+ // Expire as often as AWS permits.
+ p.options.Duration = DefaultDuration
+ }
+ input := &sts.AssumeRoleInput{
+ DurationSeconds: aws.Int32(int32(p.options.Duration / time.Second)),
+ PolicyArns: p.options.PolicyARNs,
+ RoleArn: aws.String(p.options.RoleARN),
+ RoleSessionName: aws.String(p.options.RoleSessionName),
+ ExternalId: p.options.ExternalID,
+ }
+ if p.options.Policy != nil {
+ input.Policy = p.options.Policy
+ }
+ if p.options.SerialNumber != nil {
+ if p.options.TokenProvider != nil {
+ input.SerialNumber = p.options.SerialNumber
+ code, err := p.options.TokenProvider()
+ if err != nil {
+ return aws.Credentials{}, err
+ }
+ input.TokenCode = aws.String(code)
+ } else {
+ return aws.Credentials{}, fmt.Errorf("assume role with MFA enabled, but neither TokenCode nor TokenProvider are set")
+ }
+ }
+
+ resp, err := p.options.Client.AssumeRole(ctx, input)
+ if err != nil {
+ return aws.Credentials{Source: ProviderName}, err
+ }
+
+ return aws.Credentials{
+ AccessKeyID: *resp.Credentials.AccessKeyId,
+ SecretAccessKey: *resp.Credentials.SecretAccessKey,
+ SessionToken: *resp.Credentials.SessionToken,
+ Source: ProviderName,
+
+ CanExpire: true,
+ Expires: *resp.Credentials.Expiration,
+ }, nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/stscreds/web_identity_provider.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/stscreds/web_identity_provider.go
new file mode 100644
index 000000000..7854a3228
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/stscreds/web_identity_provider.go
@@ -0,0 +1,127 @@
+package stscreds
+
+import (
+ "context"
+ "fmt"
+ "io/ioutil"
+ "strconv"
+
+ "github.com/aws/aws-sdk-go-v2/aws"
+ "github.com/aws/aws-sdk-go-v2/aws/retry"
+ "github.com/aws/aws-sdk-go-v2/internal/sdk"
+ "github.com/aws/aws-sdk-go-v2/service/sts"
+ "github.com/aws/aws-sdk-go-v2/service/sts/types"
+)
+
+var invalidIdentityTokenExceptionCode = (&types.InvalidIdentityTokenException{}).ErrorCode()
+
+const (
+ // WebIdentityProviderName is the web identity provider name
+ WebIdentityProviderName = "WebIdentityCredentials"
+)
+
+// AssumeRoleWithWebIdentityAPIClient is a client capable of the STS AssumeRoleWithWebIdentity operation.
+type AssumeRoleWithWebIdentityAPIClient interface {
+ AssumeRoleWithWebIdentity(ctx context.Context, params *sts.AssumeRoleWithWebIdentityInput, optFns ...func(*sts.Options)) (*sts.AssumeRoleWithWebIdentityOutput, error)
+}
+
+// WebIdentityRoleProvider is used to retrieve credentials using
+// an OIDC token.
+type WebIdentityRoleProvider struct {
+ options WebIdentityRoleOptions
+}
+
+// WebIdentityRoleOptions is a structure of configurable options for WebIdentityRoleProvider
+type WebIdentityRoleOptions struct {
+ // Client implementation of the AssumeRoleWithWebIdentity operation. Required
+ Client AssumeRoleWithWebIdentityAPIClient
+
+ // JWT Token Provider. Required
+ TokenRetriever IdentityTokenRetriever
+
+ // IAM Role ARN to assume. Required
+ RoleARN string
+
+ // Session name, if you wish to uniquely identify this session.
+ RoleSessionName string
+
+ // The Amazon Resource Names (ARNs) of the IAM managed policies that you
+ // want to use as managed session policies. The policies must exist in the
+ // same account as the role.
+ PolicyARNs []types.PolicyDescriptorType
+}
+
+// IdentityTokenRetriever is an interface for retrieving a JWT
+type IdentityTokenRetriever interface {
+ GetIdentityToken() ([]byte, error)
+}
+
+// IdentityTokenFile is for retrieving an identity token from the given file name
+type IdentityTokenFile string
+
+// GetIdentityToken retrieves the JWT token from the file and returns the contents as a []byte
+func (j IdentityTokenFile) GetIdentityToken() ([]byte, error) {
+ b, err := ioutil.ReadFile(string(j))
+ if err != nil {
+ return nil, fmt.Errorf("unable to read file at %s: %v", string(j), err)
+ }
+
+ return b, nil
+}
+
+// NewWebIdentityRoleProvider will return a new WebIdentityRoleProvider with the
+// provided stsiface.ClientAPI
+func NewWebIdentityRoleProvider(client AssumeRoleWithWebIdentityAPIClient, roleARN string, tokenRetriever IdentityTokenRetriever, optFns ...func(*WebIdentityRoleOptions)) *WebIdentityRoleProvider {
+ o := WebIdentityRoleOptions{
+ Client: client,
+ RoleARN: roleARN,
+ TokenRetriever: tokenRetriever,
+ }
+
+ for _, fn := range optFns {
+ fn(&o)
+ }
+
+ return &WebIdentityRoleProvider{options: o}
+}
+
+// Retrieve will attempt to assume a role from a token which is located at
+// 'WebIdentityTokenFilePath' specified destination and if that is empty an
+// error will be returned.
+func (p *WebIdentityRoleProvider) Retrieve(ctx context.Context) (aws.Credentials, error) {
+ b, err := p.options.TokenRetriever.GetIdentityToken()
+ if err != nil {
+ return aws.Credentials{}, fmt.Errorf("failed to retrieve jwt from provide source, %w", err)
+ }
+
+ sessionName := p.options.RoleSessionName
+ if len(sessionName) == 0 {
+ // session name is used to uniquely identify a session. This simply
+ // uses unix time in nanoseconds to uniquely identify sessions.
+ sessionName = strconv.FormatInt(sdk.NowTime().UnixNano(), 10)
+ }
+ resp, err := p.options.Client.AssumeRoleWithWebIdentity(ctx, &sts.AssumeRoleWithWebIdentityInput{
+ PolicyArns: p.options.PolicyARNs,
+ RoleArn: &p.options.RoleARN,
+ RoleSessionName: &sessionName,
+ WebIdentityToken: aws.String(string(b)),
+ }, func(options *sts.Options) {
+ options.Retryer = retry.AddWithErrorCodes(options.Retryer, invalidIdentityTokenExceptionCode)
+ })
+ if err != nil {
+ return aws.Credentials{}, fmt.Errorf("failed to retrieve credentials, %w", err)
+ }
+
+ // InvalidIdentityToken error is a temporary error that can occur
+ // when assuming an Role with a JWT web identity token.
+
+ value := aws.Credentials{
+ AccessKeyID: aws.ToString(resp.Credentials.AccessKeyId),
+ SecretAccessKey: aws.ToString(resp.Credentials.SecretAccessKey),
+ SessionToken: aws.ToString(resp.Credentials.SessionToken),
+ Source: WebIdentityProviderName,
+ CanExpire: true,
+ Expires: *resp.Credentials.Expiration,
+ }
+ return value, nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/LICENSE.txt b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/LICENSE.txt
new file mode 100644
index 000000000..d64569567
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/LICENSE.txt
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_client.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_client.go
new file mode 100644
index 000000000..998668d29
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_client.go
@@ -0,0 +1,257 @@
+package imds
+
+import (
+ "context"
+ "fmt"
+ "net"
+ "net/http"
+ "os"
+ "strings"
+ "time"
+
+ "github.com/aws/aws-sdk-go-v2/aws"
+ "github.com/aws/aws-sdk-go-v2/aws/retry"
+ awshttp "github.com/aws/aws-sdk-go-v2/aws/transport/http"
+ "github.com/aws/smithy-go"
+ "github.com/aws/smithy-go/logging"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// ServiceID provides the unique name of this API client
+const ServiceID = "ec2imds"
+
+// Client provides the API client for interacting with the Amazon EC2 Instance
+// Metadata Service API.
+type Client struct {
+ options Options
+}
+
+// ClientEnableState provides an enumeration if the client is enabled,
+// disabled, or default behavior.
+type ClientEnableState uint
+
+// Enumeration values for ClientEnableState
+const (
+ ClientDefaultEnableState ClientEnableState = iota // default behavior
+ ClientDisabled // client disabled
+ ClientEnabled // client enabled
+)
+
+const (
+ disableClientEnvVar = "AWS_EC2_METADATA_DISABLED"
+
+ // Client endpoint options
+ endpointEnvVar = "AWS_EC2_METADATA_SERVICE_ENDPOINT"
+ defaultEndpoint = "http://169.254.169.254"
+)
+
+// New returns an initialized Client based on the functional options. Provide
+// additional functional options to further configure the behavior of the client,
+// such as changing the client's endpoint or adding custom middleware behavior.
+func New(options Options, optFns ...func(*Options)) *Client {
+ options = options.Copy()
+
+ for _, fn := range optFns {
+ fn(&options)
+ }
+
+ options.HTTPClient = resolveHTTPClient(options.HTTPClient)
+
+ if options.Retryer == nil {
+ options.Retryer = retry.NewStandard()
+ }
+ options.Retryer = retry.AddWithMaxBackoffDelay(options.Retryer, 1*time.Second)
+
+ if options.ClientEnableState == ClientDefaultEnableState {
+ if v := os.Getenv(disableClientEnvVar); strings.EqualFold(v, "true") {
+ options.ClientEnableState = ClientDisabled
+ }
+ }
+
+ if len(options.Endpoint) == 0 {
+ if v := os.Getenv(endpointEnvVar); len(v) != 0 {
+ options.Endpoint = v
+ } else {
+ options.Endpoint = defaultEndpoint
+ }
+ }
+
+ client := &Client{
+ options: options,
+ }
+
+ if client.options.tokenProvider == nil && !client.options.disableAPIToken {
+ client.options.tokenProvider = newTokenProvider(client, defaultTokenTTL)
+ }
+
+ return client
+}
+
+// NewFromConfig returns an initialized Client based the AWS SDK config, and
+// functional options. Provide additional functional options to further
+// configure the behavior of the client, such as changing the client's endpoint
+// or adding custom middleware behavior.
+func NewFromConfig(cfg aws.Config, optFns ...func(*Options)) *Client {
+ opts := Options{
+ APIOptions: append([]func(*middleware.Stack) error{}, cfg.APIOptions...),
+ HTTPClient: cfg.HTTPClient,
+ }
+
+ if cfg.Retryer != nil {
+ opts.Retryer = cfg.Retryer()
+ }
+
+ return New(opts, optFns...)
+}
+
+// Options provides the fields for configuring the API client's behavior.
+type Options struct {
+ // Set of options to modify how an operation is invoked. These apply to all
+ // operations invoked for this client. Use functional options on operation
+ // call to modify this list for per operation behavior.
+ APIOptions []func(*middleware.Stack) error
+
+ // The endpoint the client will use to retrieve EC2 instance metadata.
+
+ //
+ // If unset, and the environment variable AWS_EC2_METADATA_SERVICE_ENDPOINT
+ // has a value the client will use the value of the environment variable as
+ // the endpoint for operation calls.
+ //
+ // AWS_EC2_METADATA_SERVICE_ENDPOINT=http://[::1]
+ Endpoint string
+
+ // The HTTP client to invoke API calls with. Defaults to client's default
+ // HTTP implementation if nil.
+ HTTPClient HTTPClient
+
+ // Retryer guides how HTTP requests should be retried in case of recoverable
+ // failures. When nil the API client will use a default retryer.
+ Retryer aws.Retryer
+
+ // Changes if the EC2 Instance Metadata client is enabled or not. Client
+ // will default to enabled if not set to ClientDisabled. When the client is
+ // disabled it will return an error for all operation calls.
+ //
+ // If ClientEnableState value is ClientDefaultEnableState (default value),
+ // and the environment variable "AWS_EC2_METADATA_DISABLED" is set to
+ // "true", the client will be disabled.
+ //
+ // AWS_EC2_METADATA_DISABLED=true
+ ClientEnableState ClientEnableState
+
+ // Configures the events that will be sent to the configured logger.
+ ClientLogMode aws.ClientLogMode
+
+ // The logger writer interface to write logging messages to.
+ Logger logging.Logger
+
+ // provides the caching of API tokens used for operation calls. If unset,
+ // the API token will not be retrieved for the operation.
+ tokenProvider *tokenProvider
+
+ // option to disable the API token provider for testing.
+ disableAPIToken bool
+}
+
+// HTTPClient provides the interface for a client making HTTP requests with the
+// API.
+type HTTPClient interface {
+ Do(*http.Request) (*http.Response, error)
+}
+
+// Copy creates a copy of the API options.
+func (o Options) Copy() Options {
+ to := o
+ to.APIOptions = append([]func(*middleware.Stack) error{}, o.APIOptions...)
+ return to
+}
+
+// WithAPIOptions wraps the API middleware functions, as a functional option
+// for the API Client Options. Use this helper to add additional functional
+// options to the API client, or operation calls.
+func WithAPIOptions(optFns ...func(*middleware.Stack) error) func(*Options) {
+ return func(o *Options) {
+ o.APIOptions = append(o.APIOptions, optFns...)
+ }
+}
+
+func (c *Client) invokeOperation(
+ ctx context.Context, opID string, params interface{}, optFns []func(*Options),
+ stackFns ...func(*middleware.Stack, Options) error,
+) (
+ result interface{}, metadata middleware.Metadata, err error,
+) {
+ stack := middleware.NewStack(opID, smithyhttp.NewStackRequest)
+ options := c.options.Copy()
+ for _, fn := range optFns {
+ fn(&options)
+ }
+
+ if options.ClientEnableState == ClientDisabled {
+ return nil, metadata, &smithy.OperationError{
+ ServiceID: ServiceID,
+ OperationName: opID,
+ Err: fmt.Errorf(
+ "access disabled to EC2 IMDS via client option, or %q environment variable",
+ disableClientEnvVar),
+ }
+ }
+
+ for _, fn := range stackFns {
+ if err := fn(stack, options); err != nil {
+ return nil, metadata, err
+ }
+ }
+
+ for _, fn := range options.APIOptions {
+ if err := fn(stack); err != nil {
+ return nil, metadata, err
+ }
+ }
+
+ handler := middleware.DecorateHandler(smithyhttp.NewClientHandler(options.HTTPClient), stack)
+ result, metadata, err = handler.Handle(ctx, params)
+ if err != nil {
+ return nil, metadata, &smithy.OperationError{
+ ServiceID: ServiceID,
+ OperationName: opID,
+ Err: err,
+ }
+ }
+
+ return result, metadata, err
+}
+
+const (
+ // HTTP client constants
+ defaultDialerTimeout = 250 * time.Millisecond
+ defaultResponseHeaderTimeout = 500 * time.Millisecond
+)
+
+func resolveHTTPClient(client HTTPClient) HTTPClient {
+ if client == nil {
+ client = awshttp.NewBuildableClient()
+ }
+
+ if c, ok := client.(*awshttp.BuildableClient); ok {
+ client = c.
+ WithDialerOptions(func(d *net.Dialer) {
+ // Use a custom Dial timeout for the EC2 Metadata service to account
+ // for the possibility the application might not be running in an
+ // environment with the service present. The client should fail fast in
+ // this case.
+ d.Timeout = defaultDialerTimeout
+ }).
+ WithTransportOptions(func(tr *http.Transport) {
+ // Use a custom Transport timeout for the EC2 Metadata service to
+ // account for the possibility that the application might be running in
+ // a container, and EC2Metadata service drops the connection after a
+ // single IP Hop. The client should fail fast in this case.
+ tr.ResponseHeaderTimeout = defaultResponseHeaderTimeout
+ })
+ }
+
+ return client
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetDynamicData.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetDynamicData.go
new file mode 100644
index 000000000..9e3bdb0e6
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetDynamicData.go
@@ -0,0 +1,76 @@
+package imds
+
+import (
+ "context"
+ "fmt"
+ "io"
+
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+const getDynamicDataPath = "/latest/dynamic"
+
+// GetDynamicData uses the path provided to request information from the EC2
+// instance metadata service for dynamic data. The content will be returned
+// as a string, or error if the request failed.
+func (c *Client) GetDynamicData(ctx context.Context, params *GetDynamicDataInput, optFns ...func(*Options)) (*GetDynamicDataOutput, error) {
+ if params == nil {
+ params = &GetDynamicDataInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "GetDynamicData", params, optFns,
+ addGetDynamicDataMiddleware,
+ )
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*GetDynamicDataOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+// GetDynamicDataInput provides the input parameters for the GetDynamicData
+// operation.
+type GetDynamicDataInput struct {
+ // The relative dynamic data path to retrieve. Can be empty string to
+ // retrieve a response containing a new line separated list of dynamic data
+ // resources available.
+ //
+ // Must not include the dynamic data base path.
+ //
+ // May include leading slash. If Path includes trailing slash the trailing
+ // slash will be included in the request for the resource.
+ Path string
+}
+
+// GetDynamicDataOutput provides the output parameters for the GetDynamicData
+// operation.
+type GetDynamicDataOutput struct {
+ Content io.ReadCloser
+
+ ResultMetadata middleware.Metadata
+}
+
+func addGetDynamicDataMiddleware(stack *middleware.Stack, options Options) error {
+ return addAPIRequestMiddleware(stack,
+ options,
+ buildGetDynamicDataPath,
+ buildGetDynamicDataOutput)
+}
+
+func buildGetDynamicDataPath(params interface{}) (string, error) {
+ p, ok := params.(*GetDynamicDataInput)
+ if !ok {
+ return "", fmt.Errorf("unknown parameter type %T", params)
+ }
+
+ return appendURIPath(getDynamicDataPath, p.Path), nil
+}
+
+func buildGetDynamicDataOutput(resp *smithyhttp.Response) (interface{}, error) {
+ return &GetDynamicDataOutput{
+ Content: resp.Body,
+ }, nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetIAMInfo.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetIAMInfo.go
new file mode 100644
index 000000000..62a466e9a
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetIAMInfo.go
@@ -0,0 +1,95 @@
+package imds
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "io"
+ "strings"
+ "time"
+
+ "github.com/aws/smithy-go"
+ smithyio "github.com/aws/smithy-go/io"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+const getIAMInfoPath = getMetadataPath + "/iam/info"
+
+// GetIAMInfo retrieves an identity document describing an
+// instance. Error is returned if the request fails or is unable to parse
+// the response.
+func (c *Client) GetIAMInfo(
+ ctx context.Context, params *GetIAMInfoInput, optFns ...func(*Options),
+) (
+ *GetIAMInfoOutput, error,
+) {
+ if params == nil {
+ params = &GetIAMInfoInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "GetIAMInfo", params, optFns,
+ addGetIAMInfoMiddleware,
+ )
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*GetIAMInfoOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+// GetIAMInfoInput provides the input parameters for GetIAMInfo operation.
+type GetIAMInfoInput struct{}
+
+// GetIAMInfoOutput provides the output parameters for GetIAMInfo operation.
+type GetIAMInfoOutput struct {
+ IAMInfo
+
+ ResultMetadata middleware.Metadata
+}
+
+func addGetIAMInfoMiddleware(stack *middleware.Stack, options Options) error {
+ return addAPIRequestMiddleware(stack,
+ options,
+ buildGetIAMInfoPath,
+ buildGetIAMInfoOutput,
+ )
+}
+
+func buildGetIAMInfoPath(params interface{}) (string, error) {
+ return getIAMInfoPath, nil
+}
+
+func buildGetIAMInfoOutput(resp *smithyhttp.Response) (interface{}, error) {
+ defer resp.Body.Close()
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+ body := io.TeeReader(resp.Body, ringBuffer)
+
+ imdsResult := &GetIAMInfoOutput{}
+ if err := json.NewDecoder(body).Decode(&imdsResult.IAMInfo); err != nil {
+ return nil, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode instance identity document, %w", err),
+ Snapshot: ringBuffer.Bytes(),
+ }
+ }
+ // Any code other success is an error
+ if !strings.EqualFold(imdsResult.Code, "success") {
+ return nil, fmt.Errorf("failed to get EC2 IMDS IAM info, %s",
+ imdsResult.Code)
+ }
+
+ return imdsResult, nil
+}
+
+// IAMInfo provides the shape for unmarshaling an IAM info from the metadata
+// API.
+type IAMInfo struct {
+ Code string
+ LastUpdated time.Time
+ InstanceProfileArn string
+ InstanceProfileID string
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetInstanceIdentityDocument.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetInstanceIdentityDocument.go
new file mode 100644
index 000000000..9a8dc8360
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetInstanceIdentityDocument.go
@@ -0,0 +1,102 @@
+package imds
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "io"
+ "time"
+
+ "github.com/aws/smithy-go"
+ smithyio "github.com/aws/smithy-go/io"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+const getInstanceIdentityDocumentPath = getDynamicDataPath + "/instance-identity/document"
+
+// GetInstanceIdentityDocument retrieves an identity document describing an
+// instance. Error is returned if the request fails or is unable to parse
+// the response.
+func (c *Client) GetInstanceIdentityDocument(
+ ctx context.Context, params *GetInstanceIdentityDocumentInput, optFns ...func(*Options),
+) (
+ *GetInstanceIdentityDocumentOutput, error,
+) {
+ if params == nil {
+ params = &GetInstanceIdentityDocumentInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "GetInstanceIdentityDocument", params, optFns,
+ addGetInstanceIdentityDocumentMiddleware,
+ )
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*GetInstanceIdentityDocumentOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+// GetInstanceIdentityDocumentInput provides the input parameters for
+// GetInstanceIdentityDocument operation.
+type GetInstanceIdentityDocumentInput struct{}
+
+// GetInstanceIdentityDocumentOutput provides the output parameters for
+// GetInstanceIdentityDocument operation.
+type GetInstanceIdentityDocumentOutput struct {
+ InstanceIdentityDocument
+
+ ResultMetadata middleware.Metadata
+}
+
+func addGetInstanceIdentityDocumentMiddleware(stack *middleware.Stack, options Options) error {
+ return addAPIRequestMiddleware(stack,
+ options,
+ buildGetInstanceIdentityDocumentPath,
+ buildGetInstanceIdentityDocumentOutput,
+ )
+}
+
+func buildGetInstanceIdentityDocumentPath(params interface{}) (string, error) {
+ return getInstanceIdentityDocumentPath, nil
+}
+
+func buildGetInstanceIdentityDocumentOutput(resp *smithyhttp.Response) (interface{}, error) {
+ defer resp.Body.Close()
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+ body := io.TeeReader(resp.Body, ringBuffer)
+
+ output := &GetInstanceIdentityDocumentOutput{}
+ if err := json.NewDecoder(body).Decode(&output.InstanceIdentityDocument); err != nil {
+ return nil, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode instance identity document, %w", err),
+ Snapshot: ringBuffer.Bytes(),
+ }
+ }
+
+ return output, nil
+}
+
+// InstanceIdentityDocument provides the shape for unmarshaling
+// an instance identity document
+type InstanceIdentityDocument struct {
+ DevpayProductCodes []string `json:"devpayProductCodes"`
+ MarketplaceProductCodes []string `json:"marketplaceProductCodes"`
+ AvailabilityZone string `json:"availabilityZone"`
+ PrivateIP string `json:"privateIp"`
+ Version string `json:"version"`
+ Region string `json:"region"`
+ InstanceID string `json:"instanceId"`
+ BillingProducts []string `json:"billingProducts"`
+ InstanceType string `json:"instanceType"`
+ AccountID string `json:"accountId"`
+ PendingTime time.Time `json:"pendingTime"`
+ ImageID string `json:"imageId"`
+ KernelID string `json:"kernelId"`
+ RamdiskID string `json:"ramdiskId"`
+ Architecture string `json:"architecture"`
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetMetadata.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetMetadata.go
new file mode 100644
index 000000000..cb0ce4c00
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetMetadata.go
@@ -0,0 +1,76 @@
+package imds
+
+import (
+ "context"
+ "fmt"
+ "io"
+
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+const getMetadataPath = "/latest/meta-data"
+
+// GetMetadata uses the path provided to request information from the Amazon
+// EC2 Instance Metadata Service. The content will be returned as a string, or
+// error if the request failed.
+func (c *Client) GetMetadata(ctx context.Context, params *GetMetadataInput, optFns ...func(*Options)) (*GetMetadataOutput, error) {
+ if params == nil {
+ params = &GetMetadataInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "GetMetadata", params, optFns,
+ addGetMetadataMiddleware,
+ )
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*GetMetadataOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+// GetMetadataInput provides the input parameters for the GetMetadata
+// operation.
+type GetMetadataInput struct {
+ // The relative metadata path to retrieve. Can be empty string to retrieve
+ // a response containing a new line separated list of metadata resources
+ // available.
+ //
+ // Must not include the metadata base path.
+ //
+ // May include leading slash. If Path includes trailing slash the trailing slash
+ // will be included in the request for the resource.
+ Path string
+}
+
+// GetMetadataOutput provides the output parameters for the GetMetadata
+// operation.
+type GetMetadataOutput struct {
+ Content io.ReadCloser
+
+ ResultMetadata middleware.Metadata
+}
+
+func addGetMetadataMiddleware(stack *middleware.Stack, options Options) error {
+ return addAPIRequestMiddleware(stack,
+ options,
+ buildGetMetadataPath,
+ buildGetMetadataOutput)
+}
+
+func buildGetMetadataPath(params interface{}) (string, error) {
+ p, ok := params.(*GetMetadataInput)
+ if !ok {
+ return "", fmt.Errorf("unknown parameter type %T", params)
+ }
+
+ return appendURIPath(getMetadataPath, p.Path), nil
+}
+
+func buildGetMetadataOutput(resp *smithyhttp.Response) (interface{}, error) {
+ return &GetMetadataOutput{
+ Content: resp.Body,
+ }, nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetRegion.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetRegion.go
new file mode 100644
index 000000000..7b9b48912
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetRegion.go
@@ -0,0 +1,72 @@
+package imds
+
+import (
+ "context"
+ "fmt"
+
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// GetRegion retrieves an identity document describing an
+// instance. Error is returned if the request fails or is unable to parse
+// the response.
+func (c *Client) GetRegion(
+ ctx context.Context, params *GetRegionInput, optFns ...func(*Options),
+) (
+ *GetRegionOutput, error,
+) {
+ if params == nil {
+ params = &GetRegionInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "GetRegion", params, optFns,
+ addGetRegionMiddleware,
+ )
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*GetRegionOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+// GetRegionInput provides the input parameters for GetRegion operation.
+type GetRegionInput struct{}
+
+// GetRegionOutput provides the output parameters for GetRegion operation.
+type GetRegionOutput struct {
+ Region string
+
+ ResultMetadata middleware.Metadata
+}
+
+func addGetRegionMiddleware(stack *middleware.Stack, options Options) error {
+ return addAPIRequestMiddleware(stack,
+ options,
+ buildGetInstanceIdentityDocumentPath,
+ buildGetRegionOutput,
+ )
+}
+
+func buildGetRegionOutput(resp *smithyhttp.Response) (interface{}, error) {
+ out, err := buildGetInstanceIdentityDocumentOutput(resp)
+ if err != nil {
+ return nil, err
+ }
+
+ result, ok := out.(*GetInstanceIdentityDocumentOutput)
+ if !ok {
+ return nil, fmt.Errorf("unexpected instance identity document type, %T", out)
+ }
+
+ region := result.Region
+ if len(region) == 0 {
+ return "", fmt.Errorf("instance metadata did not return a region value")
+ }
+
+ return &GetRegionOutput{
+ Region: region,
+ }, nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetToken.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetToken.go
new file mode 100644
index 000000000..2f58b9cfa
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetToken.go
@@ -0,0 +1,111 @@
+package imds
+
+import (
+ "context"
+ "fmt"
+ "io"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+const getTokenPath = "/latest/api/token"
+const tokenTTLHeader = "X-Aws-Ec2-Metadata-Token-Ttl-Seconds"
+
+// getToken uses the duration to return a token for EC2 IMDS, or an error if
+// the request failed.
+func (c *Client) getToken(ctx context.Context, params *getTokenInput, optFns ...func(*Options)) (*getTokenOutput, error) {
+ if params == nil {
+ params = &getTokenInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "getToken", params, optFns,
+ addGetTokenMiddleware,
+ )
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*getTokenOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type getTokenInput struct {
+ TokenTTL time.Duration
+}
+
+type getTokenOutput struct {
+ Token string
+ TokenTTL time.Duration
+
+ ResultMetadata middleware.Metadata
+}
+
+func addGetTokenMiddleware(stack *middleware.Stack, options Options) error {
+ err := addRequestMiddleware(stack,
+ options,
+ "PUT",
+ buildGetTokenPath,
+ buildGetTokenOutput)
+ if err != nil {
+ return err
+ }
+
+ err = stack.Serialize.Add(&tokenTTLRequestHeader{}, middleware.After)
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func buildGetTokenPath(interface{}) (string, error) {
+ return getTokenPath, nil
+}
+
+func buildGetTokenOutput(resp *smithyhttp.Response) (interface{}, error) {
+ defer resp.Body.Close()
+
+ ttlHeader := resp.Header.Get(tokenTTLHeader)
+ tokenTTL, err := strconv.ParseInt(ttlHeader, 10, 64)
+ if err != nil {
+ return nil, fmt.Errorf("unable to parse API token, %w", err)
+ }
+
+ var token strings.Builder
+ if _, err := io.Copy(&token, resp.Body); err != nil {
+ return nil, fmt.Errorf("unable to read API token, %w", err)
+ }
+
+ return &getTokenOutput{
+ Token: token.String(),
+ TokenTTL: time.Duration(tokenTTL) * time.Second,
+ }, nil
+}
+
+type tokenTTLRequestHeader struct{}
+
+func (*tokenTTLRequestHeader) ID() string { return "tokenTTLRequestHeader" }
+func (*tokenTTLRequestHeader) HandleSerialize(
+ ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler,
+) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ req, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, fmt.Errorf("expect HTTP transport, got %T", in.Request)
+ }
+
+ input, ok := in.Parameters.(*getTokenInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("expect getTokenInput, got %T", in.Parameters)
+ }
+
+ req.Header.Set(tokenTTLHeader, strconv.Itoa(int(input.TokenTTL/time.Second)))
+
+ return next.HandleSerialize(ctx, in)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetUserData.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetUserData.go
new file mode 100644
index 000000000..88aa61e9a
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetUserData.go
@@ -0,0 +1,60 @@
+package imds
+
+import (
+ "context"
+ "io"
+
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+const getUserDataPath = "/latest/user-data"
+
+// GetUserData uses the path provided to request information from the EC2
+// instance metadata service for dynamic data. The content will be returned
+// as a string, or error if the request failed.
+func (c *Client) GetUserData(ctx context.Context, params *GetUserDataInput, optFns ...func(*Options)) (*GetUserDataOutput, error) {
+ if params == nil {
+ params = &GetUserDataInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "GetUserData", params, optFns,
+ addGetUserDataMiddleware,
+ )
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*GetUserDataOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+// GetUserDataInput provides the input parameters for the GetUserData
+// operation.
+type GetUserDataInput struct{}
+
+// GetUserDataOutput provides the output parameters for the GetUserData
+// operation.
+type GetUserDataOutput struct {
+ Content io.ReadCloser
+
+ ResultMetadata middleware.Metadata
+}
+
+func addGetUserDataMiddleware(stack *middleware.Stack, options Options) error {
+ return addAPIRequestMiddleware(stack,
+ options,
+ buildGetUserDataPath,
+ buildGetUserDataOutput)
+}
+
+func buildGetUserDataPath(params interface{}) (string, error) {
+ return getUserDataPath, nil
+}
+
+func buildGetUserDataOutput(resp *smithyhttp.Response) (interface{}, error) {
+ return &GetUserDataOutput{
+ Content: resp.Body,
+ }, nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/doc.go
new file mode 100644
index 000000000..9ae608291
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/doc.go
@@ -0,0 +1,6 @@
+// Package imds provides the API client for interacting with the Amazon EC2
+// Instance Metadata Service.
+//
+// See the EC2 IMDS user guide for more information on using the API.
+// https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-metadata.html
+package imds
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/go.mod b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/go.mod
new file mode 100644
index 000000000..8ebb44e3f
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/go.mod
@@ -0,0 +1,11 @@
+module github.com/aws/aws-sdk-go-v2/feature/ec2/imds
+
+go 1.15
+
+require (
+ github.com/aws/aws-sdk-go-v2 v1.2.1
+ github.com/aws/smithy-go v1.2.0
+ github.com/google/go-cmp v0.5.4
+)
+
+replace github.com/aws/aws-sdk-go-v2 => ../../../
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/go.sum b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/go.sum
new file mode 100644
index 000000000..c3783ae60
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/go.sum
@@ -0,0 +1,13 @@
+github.com/aws/smithy-go v1.2.0 h1:0PoGBWXkXDIyVdPaZW9gMhaGzj3UOAgTdiVoHuuZAFA=
+github.com/aws/smithy-go v1.2.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAmR5n+E=
+github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/google/go-cmp v0.5.4 h1:L8R9j+yAqZuZjsqh/z+F1NCffTKKLShY6zXTItVIZ8M=
+github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
+github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U=
+github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4=
+golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/request_middleware.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/request_middleware.go
new file mode 100644
index 000000000..48a6bdb5f
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/request_middleware.go
@@ -0,0 +1,226 @@
+package imds
+
+import (
+ "context"
+ "fmt"
+ "net/url"
+ "path"
+ "time"
+
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/aws/retry"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+func addAPIRequestMiddleware(stack *middleware.Stack,
+ options Options,
+ getPath func(interface{}) (string, error),
+ getOutput func(*smithyhttp.Response) (interface{}, error),
+) (err error) {
+ err = addRequestMiddleware(stack, options, "GET", getPath, getOutput)
+ if err != nil {
+ return err
+ }
+
+ // Token Serializer build and state management.
+ if !options.disableAPIToken {
+ err = stack.Finalize.Insert(options.tokenProvider, (*retry.Attempt)(nil).ID(), middleware.After)
+ if err != nil {
+ return err
+ }
+
+ err = stack.Deserialize.Insert(options.tokenProvider, "OperationDeserializer", middleware.Before)
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func addRequestMiddleware(stack *middleware.Stack,
+ options Options,
+ method string,
+ getPath func(interface{}) (string, error),
+ getOutput func(*smithyhttp.Response) (interface{}, error),
+) (err error) {
+ err = awsmiddleware.AddSDKAgentKey(awsmiddleware.FeatureMetadata, "ec2-imds")(stack)
+ if err != nil {
+ return err
+ }
+
+ // Operation timeout
+ err = stack.Initialize.Add(&operationTimeout{
+ Timeout: defaultOperationTimeout,
+ }, middleware.Before)
+ if err != nil {
+ return err
+ }
+
+ // Operation Serializer
+ err = stack.Serialize.Add(&serializeRequest{
+ GetPath: getPath,
+ Method: method,
+ }, middleware.After)
+ if err != nil {
+ return err
+ }
+
+ // Operation endpoint resolver
+ err = stack.Serialize.Insert(&resolveEndpoint{
+ Endpoint: options.Endpoint,
+ }, "OperationSerializer", middleware.Before)
+ if err != nil {
+ return err
+ }
+
+ // Operation Deserializer
+ err = stack.Deserialize.Add(&deserializeResponse{
+ GetOutput: getOutput,
+ }, middleware.After)
+ if err != nil {
+ return err
+ }
+
+ // Retry support
+ return retry.AddRetryMiddlewares(stack, retry.AddRetryMiddlewaresOptions{
+ Retryer: options.Retryer,
+ LogRetryAttempts: options.ClientLogMode.IsRetries(),
+ })
+}
+
+type serializeRequest struct {
+ GetPath func(interface{}) (string, error)
+ Method string
+}
+
+func (*serializeRequest) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *serializeRequest) HandleSerialize(
+ ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler,
+) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown transport type %T", in.Request)
+ }
+
+ reqPath, err := m.GetPath(in.Parameters)
+ if err != nil {
+ return out, metadata, fmt.Errorf("unable to get request URL path, %w", err)
+ }
+
+ request.Request.URL.Path = reqPath
+ request.Request.Method = m.Method
+
+ return next.HandleSerialize(ctx, in)
+}
+
+type deserializeResponse struct {
+ GetOutput func(*smithyhttp.Response) (interface{}, error)
+}
+
+func (*deserializeResponse) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *deserializeResponse) HandleDeserialize(
+ ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler,
+) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ resp, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, fmt.Errorf(
+ "unexpected transport response type, %T", out.RawResponse)
+ }
+
+ // Anything thats not 200 |< 300 is error
+ if resp.StatusCode < 200 || resp.StatusCode >= 300 {
+ resp.Body.Close()
+ return out, metadata, &smithyhttp.ResponseError{
+ Response: resp,
+ Err: fmt.Errorf("request to EC2 IMDS failed"),
+ }
+ }
+
+ result, err := m.GetOutput(resp)
+ if err != nil {
+ return out, metadata, fmt.Errorf(
+ "unable to get deserialized result for response, %w", err,
+ )
+ }
+ out.Result = result
+
+ return out, metadata, err
+}
+
+type resolveEndpoint struct {
+ Endpoint string
+}
+
+func (*resolveEndpoint) ID() string {
+ return "ResolveEndpoint"
+}
+
+func (m *resolveEndpoint) HandleSerialize(
+ ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler,
+) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+
+ req, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown transport type %T", in.Request)
+ }
+
+ req.URL, err = url.Parse(m.Endpoint)
+ if err != nil {
+ return out, metadata, fmt.Errorf("failed to parse endpoint URL: %w", err)
+ }
+
+ return next.HandleSerialize(ctx, in)
+}
+
+const (
+ defaultOperationTimeout = 5 * time.Second
+)
+
+type operationTimeout struct {
+ Timeout time.Duration
+}
+
+func (*operationTimeout) ID() string { return "OperationTimeout" }
+
+func (m *operationTimeout) HandleInitialize(
+ ctx context.Context, input middleware.InitializeInput, next middleware.InitializeHandler,
+) (
+ output middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ var cancelFn func()
+
+ ctx, cancelFn = context.WithTimeout(ctx, m.Timeout)
+ defer cancelFn()
+
+ return next.HandleInitialize(ctx, input)
+}
+
+// appendURIPath joins a URI path component to the existing path with `/`
+// separators between the path components. If the path being added ends with a
+// trailing `/` that slash will be maintained.
+func appendURIPath(base, add string) string {
+ reqPath := path.Join(base, add)
+ if len(add) != 0 && add[len(add)-1] == '/' {
+ reqPath += "/"
+ }
+ return reqPath
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/token_provider.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/token_provider.go
new file mode 100644
index 000000000..275fade48
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/token_provider.go
@@ -0,0 +1,237 @@
+package imds
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "net/http"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ smithy "github.com/aws/smithy-go"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+const (
+ // Headers for Token and TTL
+ tokenHeader = "x-aws-ec2-metadata-token"
+ defaultTokenTTL = 5 * time.Minute
+)
+
+type tokenProvider struct {
+ client *Client
+ tokenTTL time.Duration
+
+ token *apiToken
+ tokenMux sync.RWMutex
+
+ disabled uint32 // Atomic updated
+}
+
+func newTokenProvider(client *Client, ttl time.Duration) *tokenProvider {
+ return &tokenProvider{
+ client: client,
+ tokenTTL: ttl,
+ }
+}
+
+// apiToken provides the API token used by all operation calls for th EC2
+// Instance metadata service.
+type apiToken struct {
+ token string
+ expires time.Time
+}
+
+var timeNow = time.Now
+
+// Expired returns if the token is expired.
+func (t *apiToken) Expired() bool {
+ // Calling Round(0) on the current time will truncate the monotonic reading only. Ensures credential expiry
+ // time is always based on reported wall-clock time.
+ return timeNow().Round(0).After(t.expires)
+}
+
+func (t *tokenProvider) ID() string { return "APITokenProvider" }
+
+// HandleFinalize is the finalize stack middleware, that if the token provider is
+// enabled, will attempt to add the cached API token to the request. If the API
+// token is not cached, it will be retrieved in a separate API call, getToken.
+//
+// For retry attempts, handler must be added after attempt retryer.
+//
+// If request for getToken fails the token provider may be disabled from future
+// requests, depending on the response status code.
+func (t *tokenProvider) HandleFinalize(
+ ctx context.Context, input middleware.FinalizeInput, next middleware.FinalizeHandler,
+) (
+ out middleware.FinalizeOutput, metadata middleware.Metadata, err error,
+) {
+ if !t.enabled() {
+ // short-circuits to insecure data flow if token provider is disabled.
+ return next.HandleFinalize(ctx, input)
+ }
+
+ req, ok := input.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, fmt.Errorf("unexpected transport request type %T", input.Request)
+ }
+
+ tok, err := t.getToken(ctx)
+ if err != nil {
+ // If the error allows the token to downgrade to insecure flow allow that.
+ var bypassErr *bypassTokenRetrievalError
+ if errors.As(err, &bypassErr) {
+ return next.HandleFinalize(ctx, input)
+ }
+
+ return out, metadata, fmt.Errorf("failed to get API token, %w", err)
+ }
+
+ req.Header.Set(tokenHeader, tok.token)
+
+ return next.HandleFinalize(ctx, input)
+}
+
+// HandleDeserialize is the deserialize stack middleware for determining if the
+// operation the token provider is decorating failed because of a 401
+// unauthorized status code. If the operation failed for that reason the token
+// provider needs to be re-enabled so that it can start adding the API token to
+// operation calls.
+func (t *tokenProvider) HandleDeserialize(
+ ctx context.Context, input middleware.DeserializeInput, next middleware.DeserializeHandler,
+) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, input)
+ if err == nil {
+ return out, metadata, err
+ }
+
+ resp, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, fmt.Errorf("expect HTTP transport, got %T", out.RawResponse)
+ }
+
+ if resp.StatusCode == http.StatusUnauthorized { // unauthorized
+ err = &retryableError{Err: err}
+ t.enable()
+ }
+
+ return out, metadata, err
+}
+
+type retryableError struct {
+ Err error
+}
+
+func (*retryableError) RetryableError() bool { return true }
+
+func (e *retryableError) Error() string { return e.Err.Error() }
+
+func (t *tokenProvider) getToken(ctx context.Context) (tok *apiToken, err error) {
+ if !t.enabled() {
+ return nil, &bypassTokenRetrievalError{
+ Err: fmt.Errorf("cannot get API token, provider disabled"),
+ }
+ }
+
+ t.tokenMux.RLock()
+ tok = t.token
+ t.tokenMux.RUnlock()
+
+ if tok != nil && !tok.Expired() {
+ return tok, nil
+ }
+
+ tok, err = t.updateToken(ctx)
+ if err != nil {
+ return nil, fmt.Errorf("cannot get API token, %w", err)
+ }
+
+ return tok, nil
+}
+
+func (t *tokenProvider) updateToken(ctx context.Context) (*apiToken, error) {
+ t.tokenMux.Lock()
+ defer t.tokenMux.Unlock()
+
+ // Prevent multiple requests to update retrieving the token.
+ if t.token != nil && !t.token.Expired() {
+ tok := t.token
+ return tok, nil
+ }
+
+ result, err := t.client.getToken(ctx, &getTokenInput{
+ TokenTTL: t.tokenTTL,
+ })
+ if err != nil {
+ // change the disabled flag on token provider to true, when error is request timeout error.
+ var statusErr interface{ HTTPStatusCode() int }
+ if errors.As(err, &statusErr) {
+ switch statusErr.HTTPStatusCode() {
+
+ // Disable get token if failed because of 403, 404, or 405
+ case http.StatusForbidden,
+ http.StatusNotFound,
+ http.StatusMethodNotAllowed:
+
+ t.disable()
+
+ // 400 errors are terminal, and need to be upstreamed
+ case http.StatusBadRequest:
+ return nil, err
+ }
+ }
+
+ // Disable if request send failed or timed out getting response
+ var re *smithyhttp.RequestSendError
+ var ce *smithy.CanceledError
+ if errors.As(err, &re) || errors.As(err, &ce) {
+ atomic.StoreUint32(&t.disabled, 1)
+ }
+
+ // Token couldn't be retrieved, but bypass this, and allow the
+ // request to continue.
+ return nil, &bypassTokenRetrievalError{Err: err}
+ }
+
+ tok := &apiToken{
+ token: result.Token,
+ expires: timeNow().Add(result.TokenTTL),
+ }
+ t.token = tok
+
+ return tok, nil
+}
+
+type bypassTokenRetrievalError struct {
+ Err error
+}
+
+func (e *bypassTokenRetrievalError) Error() string {
+ return fmt.Sprintf("bypass token retrieval, %v", e.Err)
+}
+
+func (e *bypassTokenRetrievalError) Unwrap() error { return e.Err }
+
+// enabled returns if the token provider is current enabled or not.
+func (t *tokenProvider) enabled() bool {
+ return atomic.LoadUint32(&t.disabled) == 0
+}
+
+// disable disables the token provider and it will no longer attempt to inject
+// the token, nor request updates.
+func (t *tokenProvider) disable() {
+ atomic.StoreUint32(&t.disabled, 1)
+}
+
+// enable enables the token provide to start refreshing tokens, and adding them
+// to the pending request.
+func (t *tokenProvider) enable() {
+ t.tokenMux.Lock()
+ t.token = nil
+ t.tokenMux.Unlock()
+ atomic.StoreUint32(&t.disabled, 0)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/LICENSE.txt b/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/LICENSE.txt
new file mode 100644
index 000000000..d64569567
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/LICENSE.txt
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/api.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/api.go
new file mode 100644
index 000000000..4059f9851
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/api.go
@@ -0,0 +1,37 @@
+package manager
+
+import (
+ "context"
+
+ "github.com/aws/aws-sdk-go-v2/service/s3"
+)
+
+// DeleteObjectsAPIClient is an S3 API client that can invoke the DeleteObjects operation.
+type DeleteObjectsAPIClient interface {
+ DeleteObjects(context.Context, *s3.DeleteObjectsInput, ...func(*s3.Options)) (*s3.DeleteObjectsOutput, error)
+}
+
+// DownloadAPIClient is an S3 API client that can invoke the GetObject operation.
+type DownloadAPIClient interface {
+ GetObject(context.Context, *s3.GetObjectInput, ...func(*s3.Options)) (*s3.GetObjectOutput, error)
+}
+
+// HeadBucketAPIClient is an S3 API client that can invoke the HeadBucket operation.
+type HeadBucketAPIClient interface {
+ HeadBucket(context.Context, *s3.HeadBucketInput, ...func(*s3.Options)) (*s3.HeadBucketOutput, error)
+}
+
+// ListObjectsV2APIClient is an S3 API client that can invoke the ListObjectV2 operation.
+type ListObjectsV2APIClient interface {
+ ListObjectsV2(context.Context, *s3.ListObjectsV2Input, ...func(*s3.Options)) (*s3.ListObjectsV2Output, error)
+}
+
+// UploadAPIClient is an S3 API client that can invoke PutObject, UploadPart, CreateMultipartUpload,
+// CompleteMultipartUpload, and AbortMultipartUpload operations.
+type UploadAPIClient interface {
+ PutObject(context.Context, *s3.PutObjectInput, ...func(*s3.Options)) (*s3.PutObjectOutput, error)
+ UploadPart(context.Context, *s3.UploadPartInput, ...func(*s3.Options)) (*s3.UploadPartOutput, error)
+ CreateMultipartUpload(context.Context, *s3.CreateMultipartUploadInput, ...func(*s3.Options)) (*s3.CreateMultipartUploadOutput, error)
+ CompleteMultipartUpload(context.Context, *s3.CompleteMultipartUploadInput, ...func(*s3.Options)) (*s3.CompleteMultipartUploadOutput, error)
+ AbortMultipartUpload(context.Context, *s3.AbortMultipartUploadInput, ...func(*s3.Options)) (*s3.AbortMultipartUploadOutput, error)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/bucket_region.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/bucket_region.go
new file mode 100644
index 000000000..2d8bd7e00
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/bucket_region.go
@@ -0,0 +1,139 @@
+package manager
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "net/http"
+
+ "github.com/aws/aws-sdk-go-v2/aws"
+ "github.com/aws/aws-sdk-go-v2/service/s3"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+const bucketRegionHeader = "X-Amz-Bucket-Region"
+
+// GetBucketRegion will attempt to get the region for a bucket using the
+// client's configured region to determine which AWS partition to perform the query on.
+//
+// The request will not be signed, and will not use your AWS credentials.
+//
+// A BucketNotFound error will be returned if the bucket does not exist in the
+// AWS partition the client region belongs to.
+//
+// For example to get the region of a bucket which exists in "eu-central-1"
+// you could provide a region hint of "us-west-2".
+//
+// cfg, err := config.LoadDefaultConfig(context.TODO())
+// if err != nil {
+// log.Println("error:", err)
+// return
+// }
+//
+// bucket := "my-bucket"
+// region, err := manager.GetBucketRegion(ctx, s3.NewFromConfig(cfg), bucket)
+// if err != nil {
+// var bnf manager.BucketNotFound
+// if errors.As(err, &bnf) {
+// fmt.Fprintf(os.Stderr, "unable to find bucket %s's region\n", bucket)
+// }
+// return
+// }
+// fmt.Printf("Bucket %s is in %s region\n", bucket, region)
+//
+// By default the request will be made to the Amazon S3 endpoint using the virtual-hosted-style addressing.
+//
+// bucketname.s3.us-west-2.amazonaws.com/
+//
+// To configure the GetBucketRegion to make a request via the Amazon
+// S3 FIPS endpoints directly when a FIPS region name is not available, (e.g.
+// fips-us-gov-west-1) set the EndpointResolver on the config or client the
+// utility is called with.
+//
+// cfg, err := config.LoadDefaultConfig(context.TODO(),
+// config.WithEndpointResolver(
+// aws.EndpointResolverFunc(func(service, region string) (aws.Endpoint, error) {
+// return aws.Endpoint{URL: "https://s3-fips.us-west-2.amazonaws.com"}, nil
+// }),
+// )
+// if err != nil {
+// panic(err)
+// }
+func GetBucketRegion(ctx context.Context, client HeadBucketAPIClient, bucket string, optFns ...func(*s3.Options)) (string, error) {
+ var captureBucketRegion deserializeBucketRegion
+
+ clientOptionFns := make([]func(*s3.Options), len(optFns)+1)
+ clientOptionFns[0] = func(options *s3.Options) {
+ options.Credentials = aws.AnonymousCredentials{}
+ options.APIOptions = append(options.APIOptions, captureBucketRegion.RegisterMiddleware)
+ }
+ copy(clientOptionFns[1:], optFns)
+
+ _, err := client.HeadBucket(ctx, &s3.HeadBucketInput{
+ Bucket: aws.String(bucket),
+ }, clientOptionFns...)
+ if len(captureBucketRegion.BucketRegion) == 0 && err != nil {
+ var httpStatusErr interface {
+ HTTPStatusCode() int
+ }
+ if !errors.As(err, &httpStatusErr) {
+ return "", err
+ }
+
+ if httpStatusErr.HTTPStatusCode() == http.StatusNotFound {
+ return "", &bucketNotFound{}
+ }
+
+ return "", err
+ }
+
+ return captureBucketRegion.BucketRegion, nil
+}
+
+type deserializeBucketRegion struct {
+ BucketRegion string
+}
+
+func (d *deserializeBucketRegion) RegisterMiddleware(stack *middleware.Stack) error {
+ return stack.Deserialize.Add(d, middleware.After)
+}
+
+func (d *deserializeBucketRegion) ID() string {
+ return "DeserializeBucketRegion"
+}
+
+func (d *deserializeBucketRegion) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ resp, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown transport type %T", out.RawResponse)
+ }
+
+ d.BucketRegion = resp.Header.Get(bucketRegionHeader)
+
+ return out, metadata, err
+}
+
+// BucketNotFound indicates the bucket was not found in the partition when calling GetBucketRegion.
+type BucketNotFound interface {
+ error
+
+ isBucketNotFound()
+}
+
+type bucketNotFound struct{}
+
+func (b *bucketNotFound) Error() string {
+ return "bucket not found"
+}
+
+func (b *bucketNotFound) isBucketNotFound() {}
+
+var _ BucketNotFound = (*bucketNotFound)(nil)
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/buffered_read_seeker.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/buffered_read_seeker.go
new file mode 100644
index 000000000..e781aef61
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/buffered_read_seeker.go
@@ -0,0 +1,79 @@
+package manager
+
+import (
+ "io"
+)
+
+// BufferedReadSeeker is buffered io.ReadSeeker
+type BufferedReadSeeker struct {
+ r io.ReadSeeker
+ buffer []byte
+ readIdx, writeIdx int
+}
+
+// NewBufferedReadSeeker returns a new BufferedReadSeeker
+// if len(b) == 0 then the buffer will be initialized to 64 KiB.
+func NewBufferedReadSeeker(r io.ReadSeeker, b []byte) *BufferedReadSeeker {
+ if len(b) == 0 {
+ b = make([]byte, 64*1024)
+ }
+ return &BufferedReadSeeker{r: r, buffer: b}
+}
+
+func (b *BufferedReadSeeker) reset(r io.ReadSeeker) {
+ b.r = r
+ b.readIdx, b.writeIdx = 0, 0
+}
+
+// Read will read up len(p) bytes into p and will return
+// the number of bytes read and any error that occurred.
+// If the len(p) > the buffer size then a single read request
+// will be issued to the underlying io.ReadSeeker for len(p) bytes.
+// A Read request will at most perform a single Read to the underlying
+// io.ReadSeeker, and may return < len(p) if serviced from the buffer.
+func (b *BufferedReadSeeker) Read(p []byte) (n int, err error) {
+ if len(p) == 0 {
+ return n, err
+ }
+
+ if b.readIdx == b.writeIdx {
+ if len(p) >= len(b.buffer) {
+ n, err = b.r.Read(p)
+ return n, err
+ }
+ b.readIdx, b.writeIdx = 0, 0
+
+ n, err = b.r.Read(b.buffer)
+ if n == 0 {
+ return n, err
+ }
+
+ b.writeIdx += n
+ }
+
+ n = copy(p, b.buffer[b.readIdx:b.writeIdx])
+ b.readIdx += n
+
+ return n, err
+}
+
+// Seek will position then underlying io.ReadSeeker to the given offset
+// and will clear the buffer.
+func (b *BufferedReadSeeker) Seek(offset int64, whence int) (int64, error) {
+ n, err := b.r.Seek(offset, whence)
+
+ b.reset(b.r)
+
+ return n, err
+}
+
+// ReadAt will read up to len(p) bytes at the given file offset.
+// This will result in the buffer being cleared.
+func (b *BufferedReadSeeker) ReadAt(p []byte, off int64) (int, error) {
+ _, err := b.Seek(off, io.SeekStart)
+ if err != nil {
+ return 0, err
+ }
+
+ return b.Read(p)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/default_read_seeker_write_to.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/default_read_seeker_write_to.go
new file mode 100644
index 000000000..6d1dc6d2c
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/default_read_seeker_write_to.go
@@ -0,0 +1,7 @@
+// +build !windows
+
+package manager
+
+func defaultUploadBufferProvider() ReadSeekerWriteToProvider {
+ return nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/default_read_seeker_write_to_windows.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/default_read_seeker_write_to_windows.go
new file mode 100644
index 000000000..1ae881c10
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/default_read_seeker_write_to_windows.go
@@ -0,0 +1,5 @@
+package manager
+
+func defaultUploadBufferProvider() ReadSeekerWriteToProvider {
+ return NewBufferedReadSeekerWriteToPool(1024 * 1024)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/default_writer_read_from.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/default_writer_read_from.go
new file mode 100644
index 000000000..d55181452
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/default_writer_read_from.go
@@ -0,0 +1,7 @@
+// +build !windows
+
+package manager
+
+func defaultDownloadBufferProvider() WriterReadFromProvider {
+ return nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/default_writer_read_from_windows.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/default_writer_read_from_windows.go
new file mode 100644
index 000000000..88887ff58
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/default_writer_read_from_windows.go
@@ -0,0 +1,5 @@
+package manager
+
+func defaultDownloadBufferProvider() WriterReadFromProvider {
+ return NewPooledBufferedWriterReadFromProvider(1024 * 1024)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/doc.go
new file mode 100644
index 000000000..31171a698
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/doc.go
@@ -0,0 +1,3 @@
+// Package manager provides utilities to upload and download objects from
+// S3 concurrently. Helpful for when working with large objects.
+package manager
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/download.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/download.go
new file mode 100644
index 000000000..5c8053fce
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/download.go
@@ -0,0 +1,501 @@
+package manager
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "io"
+ "net/http"
+ "strconv"
+ "strings"
+ "sync"
+
+ "github.com/aws/aws-sdk-go-v2/aws"
+ "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/internal/awsutil"
+ "github.com/aws/aws-sdk-go-v2/service/s3"
+ "github.com/aws/smithy-go/logging"
+)
+
+const userAgentKey = "s3-transfer"
+
+// DefaultDownloadPartSize is the default range of bytes to get at a time when
+// using Download().
+const DefaultDownloadPartSize = 1024 * 1024 * 5
+
+// DefaultDownloadConcurrency is the default number of goroutines to spin up
+// when using Download().
+const DefaultDownloadConcurrency = 5
+
+// DefaultPartBodyMaxRetries is the default number of retries to make when a part fails to upload.
+const DefaultPartBodyMaxRetries = 3
+
+type errReadingBody struct {
+ err error
+}
+
+func (e *errReadingBody) Error() string {
+ return fmt.Sprintf("failed to read part body: %v", e.err)
+}
+
+func (e *errReadingBody) Unwrap() error {
+ return e.err
+}
+
+// The Downloader structure that calls Download(). It is safe to call Download()
+// on this structure for multiple objects and across concurrent goroutines.
+// Mutating the Downloader's properties is not safe to be done concurrently.
+type Downloader struct {
+ // The size (in bytes) to request from S3 for each part.
+ // The minimum allowed part size is 5MB, and if this value is set to zero,
+ // the DefaultDownloadPartSize value will be used.
+ //
+ // PartSize is ignored if the Range input parameter is provided.
+ PartSize int64
+
+ // PartBodyMaxRetries is the number of retry attempts to make for failed part uploads
+ PartBodyMaxRetries int
+
+ // Logger to send logging messages to
+ Logger logging.Logger
+
+ // Enable Logging of part download retry attempts
+ LogInterruptedDownloads bool
+
+ // The number of goroutines to spin up in parallel when sending parts.
+ // If this is set to zero, the DefaultDownloadConcurrency value will be used.
+ //
+ // Concurrency of 1 will download the parts sequentially.
+ //
+ // Concurrency is ignored if the Range input parameter is provided.
+ Concurrency int
+
+ // An S3 client to use when performing downloads.
+ S3 DownloadAPIClient
+
+ // List of client options that will be passed down to individual API
+ // operation requests made by the downloader.
+ ClientOptions []func(*s3.Options)
+
+ // Defines the buffer strategy used when downloading a part.
+ //
+ // If a WriterReadFromProvider is given the Download manager
+ // will pass the io.WriterAt of the Download request to the provider
+ // and will use the returned WriterReadFrom from the provider as the
+ // destination writer when copying from http response body.
+ BufferProvider WriterReadFromProvider
+}
+
+// WithDownloaderClientOptions appends to the Downloader's API request options.
+func WithDownloaderClientOptions(opts ...func(*s3.Options)) func(*Downloader) {
+ return func(d *Downloader) {
+ d.ClientOptions = append(d.ClientOptions, opts...)
+ }
+}
+
+// NewDownloader creates a new Downloader instance to downloads objects from
+// S3 in concurrent chunks. Pass in additional functional options to customize
+// the downloader behavior. Requires a client.ConfigProvider in order to create
+// a S3 service client. The session.Session satisfies the client.ConfigProvider
+// interface.
+//
+// Example:
+// // Load AWS Config
+// cfg, err := config.LoadDefaultConfig(context.TODO())
+// if err != nil {
+// panic(err)
+// }
+//
+// // Create an S3 client using the loaded configuration
+// s3.NewFromConfig(cfg)
+//
+// // Create a downloader passing it the S3 client
+// downloader := manager.NewDownloader(s3.NewFromConfig(cfg))
+//
+// // Create a downloader with the client and custom downloader options
+// downloader := manager.NewDownloader(client, func(d *manager.Downloader) {
+// d.PartSize = 64 * 1024 * 1024 // 64MB per part
+// })
+func NewDownloader(c DownloadAPIClient, options ...func(*Downloader)) *Downloader {
+ d := &Downloader{
+ S3: c,
+ PartSize: DefaultDownloadPartSize,
+ PartBodyMaxRetries: DefaultPartBodyMaxRetries,
+ Concurrency: DefaultDownloadConcurrency,
+ BufferProvider: defaultDownloadBufferProvider(),
+ }
+ for _, option := range options {
+ option(d)
+ }
+
+ return d
+}
+
+// Download downloads an object in S3 and writes the payload into w
+// using concurrent GET requests. The n int64 returned is the size of the object downloaded
+// in bytes.
+//
+// DownloadWithContext is the same as Download with the additional support for
+// Context input parameters. The Context must not be nil. A nil Context will
+// cause a panic. Use the Context to add deadlining, timeouts, etc. The
+// DownloadWithContext may create sub-contexts for individual underlying
+// requests.
+//
+// Additional functional options can be provided to configure the individual
+// download. These options are copies of the Downloader instance Download is
+// called from. Modifying the options will not impact the original Downloader
+// instance. Use the WithDownloaderClientOptions helper function to pass in request
+// options that will be applied to all API operations made with this downloader.
+//
+// The w io.WriterAt can be satisfied by an os.File to do multipart concurrent
+// downloads, or in memory []byte wrapper using aws.WriteAtBuffer.
+//
+// Specifying a Downloader.Concurrency of 1 will cause the Downloader to
+// download the parts from S3 sequentially.
+//
+// It is safe to call this method concurrently across goroutines.
+//
+// If the GetObjectInput's Range value is provided that will cause the downloader
+// to perform a single GetObjectInput request for that object's range. This will
+// caused the part size, and concurrency configurations to be ignored.
+func (d Downloader) Download(ctx context.Context, w io.WriterAt, input *s3.GetObjectInput, options ...func(*Downloader)) (n int64, err error) {
+ impl := downloader{w: w, in: input, cfg: d, ctx: ctx}
+
+ // Copy ClientOptions
+ clientOptions := make([]func(*s3.Options), 0, len(impl.cfg.ClientOptions)+1)
+ clientOptions = append(clientOptions, func(o *s3.Options) {
+ o.APIOptions = append(o.APIOptions, middleware.AddSDKAgentKey(middleware.FeatureMetadata, userAgentKey))
+ })
+ clientOptions = append(clientOptions, impl.cfg.ClientOptions...)
+ impl.cfg.ClientOptions = clientOptions
+
+ for _, option := range options {
+ option(&impl.cfg)
+ }
+
+ // Ensures we don't need nil checks later on
+ impl.cfg.Logger = logging.WithContext(ctx, impl.cfg.Logger)
+
+ impl.partBodyMaxRetries = d.PartBodyMaxRetries
+
+ impl.totalBytes = -1
+ if impl.cfg.Concurrency == 0 {
+ impl.cfg.Concurrency = DefaultDownloadConcurrency
+ }
+
+ if impl.cfg.PartSize == 0 {
+ impl.cfg.PartSize = DefaultDownloadPartSize
+ }
+
+ return impl.download()
+}
+
+// downloader is the implementation structure used internally by Downloader.
+type downloader struct {
+ ctx context.Context
+ cfg Downloader
+
+ in *s3.GetObjectInput
+ w io.WriterAt
+
+ wg sync.WaitGroup
+ m sync.Mutex
+
+ pos int64
+ totalBytes int64
+ written int64
+ err error
+
+ partBodyMaxRetries int
+}
+
+// download performs the implementation of the object download across ranged
+// GETs.
+func (d *downloader) download() (n int64, err error) {
+ // If range is specified fall back to single download of that range
+ // this enables the functionality of ranged gets with the downloader but
+ // at the cost of no multipart downloads.
+ if rng := aws.ToString(d.in.Range); len(rng) > 0 {
+ d.downloadRange(rng)
+ return d.written, d.err
+ }
+
+ // Spin off first worker to check additional header information
+ d.getChunk()
+
+ if total := d.getTotalBytes(); total >= 0 {
+ // Spin up workers
+ ch := make(chan dlchunk, d.cfg.Concurrency)
+
+ for i := 0; i < d.cfg.Concurrency; i++ {
+ d.wg.Add(1)
+ go d.downloadPart(ch)
+ }
+
+ // Assign work
+ for d.getErr() == nil {
+ if d.pos >= total {
+ break // We're finished queuing chunks
+ }
+
+ // Queue the next range of bytes to read.
+ ch <- dlchunk{w: d.w, start: d.pos, size: d.cfg.PartSize}
+ d.pos += d.cfg.PartSize
+ }
+
+ // Wait for completion
+ close(ch)
+ d.wg.Wait()
+ } else {
+ // Checking if we read anything new
+ for d.err == nil {
+ d.getChunk()
+ }
+
+ // We expect a 416 error letting us know we are done downloading the
+ // total bytes. Since we do not know the content's length, this will
+ // keep grabbing chunks of data until the range of bytes specified in
+ // the request is out of range of the content. Once, this happens, a
+ // 416 should occur.
+ var responseError interface {
+ HTTPStatusCode() int
+ }
+ if errors.As(d.err, &responseError) {
+ if responseError.HTTPStatusCode() == http.StatusRequestedRangeNotSatisfiable {
+ d.err = nil
+ }
+ }
+ }
+
+ // Return error
+ return d.written, d.err
+}
+
+// downloadPart is an individual goroutine worker reading from the ch channel
+// and performing a GetObject request on the data with a given byte range.
+//
+// If this is the first worker, this operation also resolves the total number
+// of bytes to be read so that the worker manager knows when it is finished.
+func (d *downloader) downloadPart(ch chan dlchunk) {
+ defer d.wg.Done()
+ for {
+ chunk, ok := <-ch
+ if !ok {
+ break
+ }
+ if d.getErr() != nil {
+ // Drain the channel if there is an error, to prevent deadlocking
+ // of download producer.
+ continue
+ }
+
+ if err := d.downloadChunk(chunk); err != nil {
+ d.setErr(err)
+ }
+ }
+}
+
+// getChunk grabs a chunk of data from the body.
+// Not thread safe. Should only used when grabbing data on a single thread.
+func (d *downloader) getChunk() {
+ if d.getErr() != nil {
+ return
+ }
+
+ chunk := dlchunk{w: d.w, start: d.pos, size: d.cfg.PartSize}
+ d.pos += d.cfg.PartSize
+
+ if err := d.downloadChunk(chunk); err != nil {
+ d.setErr(err)
+ }
+}
+
+// downloadRange downloads an Object given the passed in Byte-Range value.
+// The chunk used down download the range will be configured for that range.
+func (d *downloader) downloadRange(rng string) {
+ if d.getErr() != nil {
+ return
+ }
+
+ chunk := dlchunk{w: d.w, start: d.pos}
+ // Ranges specified will short circuit the multipart download
+ chunk.withRange = rng
+
+ if err := d.downloadChunk(chunk); err != nil {
+ d.setErr(err)
+ }
+
+ // Update the position based on the amount of data received.
+ d.pos = d.written
+}
+
+// downloadChunk downloads the chunk from s3
+func (d *downloader) downloadChunk(chunk dlchunk) error {
+ in := &s3.GetObjectInput{}
+ awsutil.Copy(in, d.in)
+
+ // Get the next byte range of data
+ in.Range = aws.String(chunk.ByteRange())
+
+ var n int64
+ var err error
+ for retry := 0; retry <= d.partBodyMaxRetries; retry++ {
+ n, err = d.tryDownloadChunk(in, &chunk)
+ if err == nil {
+ break
+ }
+ // Check if the returned error is an errReadingBody.
+ // If err is errReadingBody this indicates that an error
+ // occurred while copying the http response body.
+ // If this occurs we unwrap the err to set the underlying error
+ // and attempt any remaining retries.
+ if bodyErr, ok := err.(*errReadingBody); ok {
+ err = bodyErr.Unwrap()
+ } else {
+ return err
+ }
+
+ chunk.cur = 0
+
+ d.cfg.Logger.Logf(logging.Debug, "object part body download interrupted %s, err, %v, retrying attempt %d",
+ aws.ToString(in.Key), err, retry)
+ }
+
+ d.incrWritten(n)
+
+ return err
+}
+
+func (d *downloader) tryDownloadChunk(in *s3.GetObjectInput, w io.Writer) (int64, error) {
+ cleanup := func() {}
+ if d.cfg.BufferProvider != nil {
+ w, cleanup = d.cfg.BufferProvider.GetReadFrom(w)
+ }
+ defer cleanup()
+
+ resp, err := d.cfg.S3.GetObject(d.ctx, in, d.cfg.ClientOptions...)
+ if err != nil {
+ return 0, err
+ }
+ d.setTotalBytes(resp) // Set total if not yet set.
+
+ n, err := io.Copy(w, resp.Body)
+ resp.Body.Close()
+ if err != nil {
+ return n, &errReadingBody{err: err}
+ }
+
+ return n, nil
+}
+
+// getTotalBytes is a thread-safe getter for retrieving the total byte status.
+func (d *downloader) getTotalBytes() int64 {
+ d.m.Lock()
+ defer d.m.Unlock()
+
+ return d.totalBytes
+}
+
+// setTotalBytes is a thread-safe setter for setting the total byte status.
+// Will extract the object's total bytes from the Content-Range if the file
+// will be chunked, or Content-Length. Content-Length is used when the response
+// does not include a Content-Range. Meaning the object was not chunked. This
+// occurs when the full file fits within the PartSize directive.
+func (d *downloader) setTotalBytes(resp *s3.GetObjectOutput) {
+ d.m.Lock()
+ defer d.m.Unlock()
+
+ if d.totalBytes >= 0 {
+ return
+ }
+
+ if resp.ContentRange == nil {
+ // ContentRange is nil when the full file contents is provided, and
+ // is not chunked. Use ContentLength instead.
+ if resp.ContentLength > 0 {
+ d.totalBytes = resp.ContentLength
+ return
+ }
+ } else {
+ parts := strings.Split(*resp.ContentRange, "/")
+
+ total := int64(-1)
+ var err error
+ // Checking for whether or not a numbered total exists
+ // If one does not exist, we will assume the total to be -1, undefined,
+ // and sequentially download each chunk until hitting a 416 error
+ totalStr := parts[len(parts)-1]
+ if totalStr != "*" {
+ total, err = strconv.ParseInt(totalStr, 10, 64)
+ if err != nil {
+ d.err = err
+ return
+ }
+ }
+
+ d.totalBytes = total
+ }
+}
+
+func (d *downloader) incrWritten(n int64) {
+ d.m.Lock()
+ defer d.m.Unlock()
+
+ d.written += n
+}
+
+// getErr is a thread-safe getter for the error object
+func (d *downloader) getErr() error {
+ d.m.Lock()
+ defer d.m.Unlock()
+
+ return d.err
+}
+
+// setErr is a thread-safe setter for the error object
+func (d *downloader) setErr(e error) {
+ d.m.Lock()
+ defer d.m.Unlock()
+
+ d.err = e
+}
+
+// dlchunk represents a single chunk of data to write by the worker routine.
+// This structure also implements an io.SectionReader style interface for
+// io.WriterAt, effectively making it an io.SectionWriter (which does not
+// exist).
+type dlchunk struct {
+ w io.WriterAt
+ start int64
+ size int64
+ cur int64
+
+ // specifies the byte range the chunk should be downloaded with.
+ withRange string
+}
+
+// Write wraps io.WriterAt for the dlchunk, writing from the dlchunk's start
+// position to its end (or EOF).
+//
+// If a range is specified on the dlchunk the size will be ignored when writing.
+// as the total size may not of be known ahead of time.
+func (c *dlchunk) Write(p []byte) (n int, err error) {
+ if c.cur >= c.size && len(c.withRange) == 0 {
+ return 0, io.EOF
+ }
+
+ n, err = c.w.WriteAt(p, c.start+c.cur)
+ c.cur += int64(n)
+
+ return
+}
+
+// ByteRange returns a HTTP Byte-Range header value that should be used by the
+// client to request the chunk's range.
+func (c *dlchunk) ByteRange() string {
+ if len(c.withRange) != 0 {
+ return c.withRange
+ }
+
+ return fmt.Sprintf("bytes=%d-%d", c.start, c.start+c.size-1)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/go.mod b/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/go.mod
new file mode 100644
index 000000000..8f3b4eb14
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/go.mod
@@ -0,0 +1,38 @@
+module github.com/aws/aws-sdk-go-v2/feature/s3/manager
+
+go 1.15
+
+require (
+ github.com/aws/aws-sdk-go-v2 v1.2.1
+ github.com/aws/aws-sdk-go-v2/config v1.1.2
+ github.com/aws/aws-sdk-go-v2/credentials v1.1.2
+ github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.0.3
+ github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.0.2 // indirect
+ github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.0.3
+ github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.1.1 // indirect
+ github.com/aws/aws-sdk-go-v2/service/s3 v1.2.1
+ github.com/aws/aws-sdk-go-v2/service/sso v1.1.2
+ github.com/aws/aws-sdk-go-v2/service/sts v1.1.2
+ github.com/aws/smithy-go v1.2.0
+ github.com/google/go-cmp v0.5.4
+)
+
+replace github.com/aws/aws-sdk-go-v2 => ../../../
+
+replace github.com/aws/aws-sdk-go-v2/config => ../../../config/
+
+replace github.com/aws/aws-sdk-go-v2/service/s3 => ../../../service/s3/
+
+replace github.com/aws/aws-sdk-go-v2/credentials => ../../../credentials/
+
+replace github.com/aws/aws-sdk-go-v2/feature/ec2/imds => ../../../feature/ec2/imds/
+
+replace github.com/aws/aws-sdk-go-v2/service/sts => ../../../service/sts/
+
+replace github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding => ../../../service/internal/accept-encoding/
+
+replace github.com/aws/aws-sdk-go-v2/service/internal/s3shared => ../../../service/internal/s3shared/
+
+replace github.com/aws/aws-sdk-go-v2/service/internal/presigned-url => ../../../service/internal/presigned-url/
+
+replace github.com/aws/aws-sdk-go-v2/service/sso => ../../../service/sso/
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/go.sum b/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/go.sum
new file mode 100644
index 000000000..d937214c3
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/go.sum
@@ -0,0 +1,19 @@
+github.com/aws/smithy-go v1.2.0 h1:0PoGBWXkXDIyVdPaZW9gMhaGzj3UOAgTdiVoHuuZAFA=
+github.com/aws/smithy-go v1.2.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAmR5n+E=
+github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8=
+github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/google/go-cmp v0.5.4 h1:L8R9j+yAqZuZjsqh/z+F1NCffTKKLShY6zXTItVIZ8M=
+github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg=
+github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
+github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8=
+github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U=
+github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
+github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4=
+golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10=
+gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/pool.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/pool.go
new file mode 100644
index 000000000..6b93a3bc4
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/pool.go
@@ -0,0 +1,251 @@
+package manager
+
+import (
+ "context"
+ "fmt"
+ "sync"
+)
+
+type byteSlicePool interface {
+ Get(context.Context) (*[]byte, error)
+ Put(*[]byte)
+ ModifyCapacity(int)
+ SliceSize() int64
+ Close()
+}
+
+type maxSlicePool struct {
+ // allocator is defined as a function pointer to allow
+ // for test cases to instrument custom tracers when allocations
+ // occur.
+ allocator sliceAllocator
+
+ slices chan *[]byte
+ allocations chan struct{}
+ capacityChange chan struct{}
+
+ max int
+ sliceSize int64
+
+ mtx sync.RWMutex
+}
+
+func newMaxSlicePool(sliceSize int64) *maxSlicePool {
+ p := &maxSlicePool{sliceSize: sliceSize}
+ p.allocator = p.newSlice
+
+ return p
+}
+
+var errZeroCapacity = fmt.Errorf("get called on zero capacity pool")
+
+func (p *maxSlicePool) Get(ctx context.Context) (*[]byte, error) {
+ // check if context is canceled before attempting to get a slice
+ // this ensures priority is given to the cancel case first
+ select {
+ case <-ctx.Done():
+ return nil, ctx.Err()
+ default:
+ }
+
+ p.mtx.RLock()
+
+ for {
+ select {
+ case bs, ok := <-p.slices:
+ p.mtx.RUnlock()
+ if !ok {
+ // attempt to get on a zero capacity pool
+ return nil, errZeroCapacity
+ }
+ return bs, nil
+ case <-ctx.Done():
+ p.mtx.RUnlock()
+ return nil, ctx.Err()
+ default:
+ // pass
+ }
+
+ select {
+ case _, ok := <-p.allocations:
+ p.mtx.RUnlock()
+ if !ok {
+ // attempt to get on a zero capacity pool
+ return nil, errZeroCapacity
+ }
+ return p.allocator(), nil
+ case <-ctx.Done():
+ p.mtx.RUnlock()
+ return nil, ctx.Err()
+ default:
+ // In the event that there are no slices or allocations available
+ // This prevents some deadlock situations that can occur around sync.RWMutex
+ // When a lock request occurs on ModifyCapacity, no new readers are allowed to acquire a read lock.
+ // By releasing the read lock here and waiting for a notification, we prevent a deadlock situation where
+ // Get could hold the read lock indefinitely waiting for capacity, ModifyCapacity is waiting for a write lock,
+ // and a Put is blocked trying to get a read-lock which is blocked by ModifyCapacity.
+
+ // Short-circuit if the pool capacity is zero.
+ if p.max == 0 {
+ p.mtx.RUnlock()
+ return nil, errZeroCapacity
+ }
+
+ // Since we will be releasing the read-lock we need to take the reference to the channel.
+ // Since channels are references we will still get notified if slices are added, or if
+ // the channel is closed due to a capacity modification. This specifically avoids a data race condition
+ // where ModifyCapacity both closes a channel and initializes a new one while we don't have a read-lock.
+ c := p.capacityChange
+
+ p.mtx.RUnlock()
+
+ select {
+ case _ = <-c:
+ p.mtx.RLock()
+ case <-ctx.Done():
+ return nil, ctx.Err()
+ }
+ }
+ }
+}
+
+func (p *maxSlicePool) Put(bs *[]byte) {
+ p.mtx.RLock()
+ defer p.mtx.RUnlock()
+
+ if p.max == 0 {
+ return
+ }
+
+ select {
+ case p.slices <- bs:
+ p.notifyCapacity()
+ default:
+ // If the new channel when attempting to add the slice then we drop the slice.
+ // The logic here is to prevent a deadlock situation if channel is already at max capacity.
+ // Allows us to reap allocations that are returned and are no longer needed.
+ }
+}
+
+func (p *maxSlicePool) ModifyCapacity(delta int) {
+ if delta == 0 {
+ return
+ }
+
+ p.mtx.Lock()
+ defer p.mtx.Unlock()
+
+ p.max += delta
+
+ if p.max == 0 {
+ p.empty()
+ return
+ }
+
+ if p.capacityChange != nil {
+ close(p.capacityChange)
+ }
+ p.capacityChange = make(chan struct{}, p.max)
+
+ origAllocations := p.allocations
+ p.allocations = make(chan struct{}, p.max)
+
+ newAllocs := len(origAllocations) + delta
+ for i := 0; i < newAllocs; i++ {
+ p.allocations <- struct{}{}
+ }
+
+ if origAllocations != nil {
+ close(origAllocations)
+ }
+
+ origSlices := p.slices
+ p.slices = make(chan *[]byte, p.max)
+ if origSlices == nil {
+ return
+ }
+
+ close(origSlices)
+ for bs := range origSlices {
+ select {
+ case p.slices <- bs:
+ default:
+ // If the new channel blocks while adding slices from the old channel
+ // then we drop the slice. The logic here is to prevent a deadlock situation
+ // if the new channel has a smaller capacity then the old.
+ }
+ }
+}
+
+func (p *maxSlicePool) notifyCapacity() {
+ select {
+ case p.capacityChange <- struct{}{}:
+ default:
+ // This *shouldn't* happen as the channel is both buffered to the max pool capacity size and is resized
+ // on capacity modifications. This is just a safety to ensure that a blocking situation can't occur.
+ }
+}
+
+func (p *maxSlicePool) SliceSize() int64 {
+ return p.sliceSize
+}
+
+func (p *maxSlicePool) Close() {
+ p.mtx.Lock()
+ defer p.mtx.Unlock()
+ p.empty()
+}
+
+func (p *maxSlicePool) empty() {
+ p.max = 0
+
+ if p.capacityChange != nil {
+ close(p.capacityChange)
+ p.capacityChange = nil
+ }
+
+ if p.allocations != nil {
+ close(p.allocations)
+ for range p.allocations {
+ // drain channel
+ }
+ p.allocations = nil
+ }
+
+ if p.slices != nil {
+ close(p.slices)
+ for range p.slices {
+ // drain channel
+ }
+ p.slices = nil
+ }
+}
+
+func (p *maxSlicePool) newSlice() *[]byte {
+ bs := make([]byte, p.sliceSize)
+ return &bs
+}
+
+type returnCapacityPoolCloser struct {
+ byteSlicePool
+ returnCapacity int
+}
+
+func (n *returnCapacityPoolCloser) ModifyCapacity(delta int) {
+ if delta > 0 {
+ n.returnCapacity = -1 * delta
+ }
+ n.byteSlicePool.ModifyCapacity(delta)
+}
+
+func (n *returnCapacityPoolCloser) Close() {
+ if n.returnCapacity < 0 {
+ n.byteSlicePool.ModifyCapacity(n.returnCapacity)
+ }
+}
+
+type sliceAllocator func() *[]byte
+
+var newByteSlicePool = func(sliceSize int64) byteSlicePool {
+ return newMaxSlicePool(sliceSize)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/read_seeker_write_to.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/read_seeker_write_to.go
new file mode 100644
index 000000000..ce117c32a
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/read_seeker_write_to.go
@@ -0,0 +1,65 @@
+package manager
+
+import (
+ "io"
+ "sync"
+)
+
+// ReadSeekerWriteTo defines an interface implementing io.WriteTo and io.ReadSeeker
+type ReadSeekerWriteTo interface {
+ io.ReadSeeker
+ io.WriterTo
+}
+
+// BufferedReadSeekerWriteTo wraps a BufferedReadSeeker with an io.WriteAt
+// implementation.
+type BufferedReadSeekerWriteTo struct {
+ *BufferedReadSeeker
+}
+
+// WriteTo writes to the given io.Writer from BufferedReadSeeker until there's no more data to write or
+// an error occurs. Returns the number of bytes written and any error encountered during the write.
+func (b *BufferedReadSeekerWriteTo) WriteTo(writer io.Writer) (int64, error) {
+ return io.Copy(writer, b.BufferedReadSeeker)
+}
+
+// ReadSeekerWriteToProvider provides an implementation of io.WriteTo for an io.ReadSeeker
+type ReadSeekerWriteToProvider interface {
+ GetWriteTo(seeker io.ReadSeeker) (r ReadSeekerWriteTo, cleanup func())
+}
+
+// BufferedReadSeekerWriteToPool uses a sync.Pool to create and reuse
+// []byte slices for buffering parts in memory
+type BufferedReadSeekerWriteToPool struct {
+ pool sync.Pool
+}
+
+// NewBufferedReadSeekerWriteToPool will return a new BufferedReadSeekerWriteToPool that will create
+// a pool of reusable buffers . If size is less then < 64 KiB then the buffer
+// will default to 64 KiB. Reason: io.Copy from writers or readers that don't support io.WriteTo or io.ReadFrom
+// respectively will default to copying 32 KiB.
+func NewBufferedReadSeekerWriteToPool(size int) *BufferedReadSeekerWriteToPool {
+ if size < 65536 {
+ size = 65536
+ }
+
+ return &BufferedReadSeekerWriteToPool{
+ pool: sync.Pool{New: func() interface{} {
+ return make([]byte, size)
+ }},
+ }
+}
+
+// GetWriteTo will wrap the provided io.ReadSeeker with a BufferedReadSeekerWriteTo.
+// The provided cleanup must be called after operations have been completed on the
+// returned io.ReadSeekerWriteTo in order to signal the return of resources to the pool.
+func (p *BufferedReadSeekerWriteToPool) GetWriteTo(seeker io.ReadSeeker) (r ReadSeekerWriteTo, cleanup func()) {
+ buffer := p.pool.Get().([]byte)
+
+ r = &BufferedReadSeekerWriteTo{BufferedReadSeeker: NewBufferedReadSeeker(seeker, buffer)}
+ cleanup = func() {
+ p.pool.Put(buffer)
+ }
+
+ return r, cleanup
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/types.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/types.go
new file mode 100644
index 000000000..968f90732
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/types.go
@@ -0,0 +1,187 @@
+package manager
+
+import (
+ "io"
+ "sync"
+)
+
+// ReadSeekCloser wraps a io.Reader returning a ReaderSeekerCloser. Allows the
+// SDK to accept an io.Reader that is not also an io.Seeker for unsigned
+// streaming payload API operations.
+//
+// A readSeekCloser wrapping an nonseekable io.Reader used in an API operation's
+// input will prevent that operation being retried in the case of
+// network errors, and cause operation requests to fail if yhe operation
+// requires payload signing.
+//
+// Note: If using with S3 PutObject to stream an object upload. The SDK's S3
+// Upload Manager(s3manager.Uploader) provides support for streaming
+// with the ability to retry network errors.
+func ReadSeekCloser(r io.Reader) *ReaderSeekerCloser {
+ return &ReaderSeekerCloser{r}
+}
+
+// ReaderSeekerCloser represents a reader that can also delegate io.Seeker and
+// io.Closer interfaces to the underlying object if they are available.
+type ReaderSeekerCloser struct {
+ r io.Reader
+}
+
+// seekerLen attempts to get the number of bytes remaining at the seeker's
+// current position. Returns the number of bytes remaining or error.
+func seekerLen(s io.Seeker) (int64, error) {
+ // Determine if the seeker is actually seekable. ReaderSeekerCloser
+ // hides the fact that a io.Readers might not actually be seekable.
+ switch v := s.(type) {
+ case *ReaderSeekerCloser:
+ return v.GetLen()
+ }
+
+ return computeSeekerLength(s)
+}
+
+// GetLen returns the length of the bytes remaining in the underlying reader.
+// Checks first for Len(), then io.Seeker to determine the size of the
+// underlying reader.
+//
+// Will return -1 if the length cannot be determined.
+func (r *ReaderSeekerCloser) GetLen() (int64, error) {
+ if l, ok := r.HasLen(); ok {
+ return int64(l), nil
+ }
+
+ if s, ok := r.r.(io.Seeker); ok {
+ return computeSeekerLength(s)
+ }
+
+ return -1, nil
+}
+
+func computeSeekerLength(s io.Seeker) (int64, error) {
+ curOffset, err := s.Seek(0, io.SeekCurrent)
+ if err != nil {
+ return 0, err
+ }
+
+ endOffset, err := s.Seek(0, io.SeekEnd)
+ if err != nil {
+ return 0, err
+ }
+
+ _, err = s.Seek(curOffset, io.SeekStart)
+ if err != nil {
+ return 0, err
+ }
+
+ return endOffset - curOffset, nil
+}
+
+// HasLen returns the length of the underlying reader if the value implements
+// the Len() int method.
+func (r *ReaderSeekerCloser) HasLen() (int, bool) {
+ type lenner interface {
+ Len() int
+ }
+
+ if lr, ok := r.r.(lenner); ok {
+ return lr.Len(), true
+ }
+
+ return 0, false
+}
+
+// Read reads from the reader up to size of p. The number of bytes read, and
+// error if it occurred will be returned.
+//
+// If the reader is not an io.Reader zero bytes read, and nil error will be
+// returned.
+//
+// Performs the same functionality as io.Reader Read
+func (r *ReaderSeekerCloser) Read(p []byte) (int, error) {
+ switch t := r.r.(type) {
+ case io.Reader:
+ return t.Read(p)
+ }
+ return 0, nil
+}
+
+// Seek sets the offset for the next Read to offset, interpreted according to
+// whence: 0 means relative to the origin of the file, 1 means relative to the
+// current offset, and 2 means relative to the end. Seek returns the new offset
+// and an error, if any.
+//
+// If the ReaderSeekerCloser is not an io.Seeker nothing will be done.
+func (r *ReaderSeekerCloser) Seek(offset int64, whence int) (int64, error) {
+ switch t := r.r.(type) {
+ case io.Seeker:
+ return t.Seek(offset, whence)
+ }
+ return int64(0), nil
+}
+
+// IsSeeker returns if the underlying reader is also a seeker.
+func (r *ReaderSeekerCloser) IsSeeker() bool {
+ _, ok := r.r.(io.Seeker)
+ return ok
+}
+
+// Close closes the ReaderSeekerCloser.
+//
+// If the ReaderSeekerCloser is not an io.Closer nothing will be done.
+func (r *ReaderSeekerCloser) Close() error {
+ switch t := r.r.(type) {
+ case io.Closer:
+ return t.Close()
+ }
+ return nil
+}
+
+// A WriteAtBuffer provides a in memory buffer supporting the io.WriterAt interface
+// Can be used with the s3manager.Downloader to download content to a buffer
+// in memory. Safe to use concurrently.
+type WriteAtBuffer struct {
+ buf []byte
+ m sync.Mutex
+
+ // GrowthCoeff defines the growth rate of the internal buffer. By
+ // default, the growth rate is 1, where expanding the internal
+ // buffer will allocate only enough capacity to fit the new expected
+ // length.
+ GrowthCoeff float64
+}
+
+// NewWriteAtBuffer creates a WriteAtBuffer with an internal buffer
+// provided by buf.
+func NewWriteAtBuffer(buf []byte) *WriteAtBuffer {
+ return &WriteAtBuffer{buf: buf}
+}
+
+// WriteAt writes a slice of bytes to a buffer starting at the position provided
+// The number of bytes written will be returned, or error. Can overwrite previous
+// written slices if the write ats overlap.
+func (b *WriteAtBuffer) WriteAt(p []byte, pos int64) (n int, err error) {
+ pLen := len(p)
+ expLen := pos + int64(pLen)
+ b.m.Lock()
+ defer b.m.Unlock()
+ if int64(len(b.buf)) < expLen {
+ if int64(cap(b.buf)) < expLen {
+ if b.GrowthCoeff < 1 {
+ b.GrowthCoeff = 1
+ }
+ newBuf := make([]byte, expLen, int64(b.GrowthCoeff*float64(expLen)))
+ copy(newBuf, b.buf)
+ b.buf = newBuf
+ }
+ b.buf = b.buf[:expLen]
+ }
+ copy(b.buf[pos:], p)
+ return pLen, nil
+}
+
+// Bytes returns a slice of bytes written to the buffer.
+func (b *WriteAtBuffer) Bytes() []byte {
+ b.m.Lock()
+ defer b.m.Unlock()
+ return b.buf
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/upload.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/upload.go
new file mode 100644
index 000000000..b08206bd2
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/upload.go
@@ -0,0 +1,684 @@
+package manager
+
+import (
+ "bytes"
+ "context"
+ "fmt"
+ "io"
+ "net/http"
+ "sort"
+ "sync"
+
+ "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/internal/awsutil"
+ "github.com/aws/aws-sdk-go-v2/service/s3"
+ "github.com/aws/aws-sdk-go-v2/service/s3/types"
+)
+
+// MaxUploadParts is the maximum allowed number of parts in a multi-part upload
+// on Amazon S3.
+const MaxUploadParts int32 = 10000
+
+// MinUploadPartSize is the minimum allowed part size when uploading a part to
+// Amazon S3.
+const MinUploadPartSize int64 = 1024 * 1024 * 5
+
+// DefaultUploadPartSize is the default part size to buffer chunks of a
+// payload into.
+const DefaultUploadPartSize = MinUploadPartSize
+
+// DefaultUploadConcurrency is the default number of goroutines to spin up when
+// using Upload().
+const DefaultUploadConcurrency = 5
+
+// A MultiUploadFailure wraps a failed S3 multipart upload. An error returned
+// will satisfy this interface when a multi part upload failed to upload all
+// chucks to S3. In the case of a failure the UploadID is needed to operate on
+// the chunks, if any, which were uploaded.
+//
+// Example:
+//
+// u := manager.NewUploader(client)
+// output, err := u.upload(context.Background(), input)
+// if err != nil {
+// var multierr manager.MultiUploadFailure
+// if errors.As(err, &multierr) {
+// fmt.Printf("upload failure UploadID=%s, %s\n", multierr.UploadID(), multierr.Error())
+// } else {
+// fmt.Printf("upload failure, %s\n", err.Error())
+// }
+// }
+//
+type MultiUploadFailure interface {
+ error
+
+ // UploadID returns the upload id for the S3 multipart upload that failed.
+ UploadID() string
+}
+
+// A multiUploadError wraps the upload ID of a failed s3 multipart upload.
+// Composed of BaseError for code, message, and original error
+//
+// Should be used for an error that occurred failing a S3 multipart upload,
+// and a upload ID is available. If an uploadID is not available a more relevant
+type multiUploadError struct {
+ err error
+
+ // ID for multipart upload which failed.
+ uploadID string
+}
+
+// batchItemError returns the string representation of the error.
+//
+// See apierr.BaseError ErrorWithExtra for output format
+//
+// Satisfies the error interface.
+func (m *multiUploadError) Error() string {
+ var extra string
+ if m.err != nil {
+ extra = fmt.Sprintf(", cause: %s", m.err.Error())
+ }
+ return fmt.Sprintf("upload multipart failed, upload id: %s%s", m.uploadID, extra)
+}
+
+// Unwrap returns the underlying error that cause the upload failure
+func (m *multiUploadError) Unwrap() error {
+ return m.err
+}
+
+// UploadID returns the id of the S3 upload which failed.
+func (m *multiUploadError) UploadID() string {
+ return m.uploadID
+}
+
+// UploadOutput represents a response from the Upload() call.
+type UploadOutput struct {
+ // The URL where the object was uploaded to.
+ Location string
+
+ // The version of the object that was uploaded. Will only be populated if
+ // the S3 Bucket is versioned. If the bucket is not versioned this field
+ // will not be set.
+ VersionID *string
+
+ // The ID for a multipart upload to S3. In the case of an error the error
+ // can be cast to the MultiUploadFailure interface to extract the upload ID.
+ UploadID string
+}
+
+// WithUploaderRequestOptions appends to the Uploader's API client options.
+func WithUploaderRequestOptions(opts ...func(*s3.Options)) func(*Uploader) {
+ return func(u *Uploader) {
+ u.ClientOptions = append(u.ClientOptions, opts...)
+ }
+}
+
+// The Uploader structure that calls Upload(). It is safe to call Upload()
+// on this structure for multiple objects and across concurrent goroutines.
+// Mutating the Uploader's properties is not safe to be done concurrently.
+type Uploader struct {
+ // The buffer size (in bytes) to use when buffering data into chunks and
+ // sending them as parts to S3. The minimum allowed part size is 5MB, and
+ // if this value is set to zero, the DefaultUploadPartSize value will be used.
+ PartSize int64
+
+ // The number of goroutines to spin up in parallel per call to Upload when
+ // sending parts. If this is set to zero, the DefaultUploadConcurrency value
+ // will be used.
+ //
+ // The concurrency pool is not shared between calls to Upload.
+ Concurrency int
+
+ // Setting this value to true will cause the SDK to avoid calling
+ // AbortMultipartUpload on a failure, leaving all successfully uploaded
+ // parts on S3 for manual recovery.
+ //
+ // Note that storing parts of an incomplete multipart upload counts towards
+ // space usage on S3 and will add additional costs if not cleaned up.
+ LeavePartsOnError bool
+
+ // MaxUploadParts is the max number of parts which will be uploaded to S3.
+ // Will be used to calculate the partsize of the object to be uploaded.
+ // E.g: 5GB file, with MaxUploadParts set to 100, will upload the file
+ // as 100, 50MB parts. With a limited of s3.MaxUploadParts (10,000 parts).
+ //
+ // MaxUploadParts must not be used to limit the total number of bytes uploaded.
+ // Use a type like to io.LimitReader (https://golang.org/pkg/io/#LimitedReader)
+ // instead. An io.LimitReader is helpful when uploading an unbounded reader
+ // to S3, and you know its maximum size. Otherwise the reader's io.EOF returned
+ // error must be used to signal end of stream.
+ //
+ // Defaults to package const's MaxUploadParts value.
+ MaxUploadParts int32
+
+ // The client to use when uploading to S3.
+ S3 UploadAPIClient
+
+ // List of request options that will be passed down to individual API
+ // operation requests made by the uploader.
+ ClientOptions []func(*s3.Options)
+
+ // Defines the buffer strategy used when uploading a part
+ BufferProvider ReadSeekerWriteToProvider
+
+ // partPool allows for the re-usage of streaming payload part buffers between upload calls
+ partPool byteSlicePool
+}
+
+// NewUploader creates a new Uploader instance to upload objects to S3. Pass In
+// additional functional options to customize the uploader's behavior. Requires a
+// client.ConfigProvider in order to create a S3 service client. The session.Session
+// satisfies the client.ConfigProvider interface.
+//
+// Example:
+// // Load AWS Config
+// cfg, err := config.LoadDefaultConfig(context.TODO())
+// if err != nil {
+// panic(err)
+// }
+//
+// // Create an S3 Client with the config
+// client := s3.NewFromConfig(cfg)
+//
+// // Create an uploader passing it the client
+// uploader := manager.NewUploader(client)
+//
+// // Create an uploader with the client and custom options
+// uploader := manager.NewUploader(client, func(u *manager.Uploader) {
+// u.PartSize = 64 * 1024 * 1024 // 64MB per part
+// })
+func NewUploader(client UploadAPIClient, options ...func(*Uploader)) *Uploader {
+ u := &Uploader{
+ S3: client,
+ PartSize: DefaultUploadPartSize,
+ Concurrency: DefaultUploadConcurrency,
+ LeavePartsOnError: false,
+ MaxUploadParts: MaxUploadParts,
+ BufferProvider: defaultUploadBufferProvider(),
+ }
+
+ for _, option := range options {
+ option(u)
+ }
+
+ u.partPool = newByteSlicePool(u.PartSize)
+
+ return u
+}
+
+// Upload uploads an object to S3, intelligently buffering large
+// files into smaller chunks and sending them in parallel across multiple
+// goroutines. You can configure the buffer size and concurrency through the
+// Uploader parameters.
+//
+// Additional functional options can be provided to configure the individual
+// upload. These options are copies of the Uploader instance Upload is called from.
+// Modifying the options will not impact the original Uploader instance.
+//
+// Use the WithUploaderRequestOptions helper function to pass in request
+// options that will be applied to all API operations made with this uploader.
+//
+// It is safe to call this method concurrently across goroutines.
+func (u Uploader) Upload(ctx context.Context, input *s3.PutObjectInput, opts ...func(*Uploader)) (*UploadOutput, error) {
+ i := uploader{in: input, cfg: u, ctx: ctx}
+
+ // Copy ClientOptions
+ clientOptions := make([]func(*s3.Options), 0, len(i.cfg.ClientOptions)+1)
+ clientOptions = append(clientOptions, func(o *s3.Options) {
+ o.APIOptions = append(o.APIOptions, middleware.AddSDKAgentKey(middleware.FeatureMetadata, userAgentKey))
+ })
+ clientOptions = append(clientOptions, i.cfg.ClientOptions...)
+ i.cfg.ClientOptions = clientOptions
+
+ for _, opt := range opts {
+ opt(&i.cfg)
+ }
+
+ return i.upload()
+}
+
+// internal structure to manage an upload to S3.
+type uploader struct {
+ ctx context.Context
+ cfg Uploader
+
+ in *s3.PutObjectInput
+
+ readerPos int64 // current reader position
+ totalSize int64 // set to -1 if the size is not known
+}
+
+// internal logic for deciding whether to upload a single part or use a
+// multipart upload.
+func (u *uploader) upload() (*UploadOutput, error) {
+ if err := u.init(); err != nil {
+ return nil, fmt.Errorf("unable to initialize upload: %w", err)
+ }
+ defer u.cfg.partPool.Close()
+
+ if u.cfg.PartSize < MinUploadPartSize {
+ return nil, fmt.Errorf("part size must be at least %d bytes", MinUploadPartSize)
+ }
+
+ // Do one read to determine if we have more than one part
+ reader, _, cleanup, err := u.nextReader()
+ if err == io.EOF { // single part
+ return u.singlePart(reader, cleanup)
+ } else if err != nil {
+ cleanup()
+ return nil, fmt.Errorf("read upload data failed: %w", err)
+ }
+
+ mu := multiuploader{uploader: u}
+ return mu.upload(reader, cleanup)
+}
+
+// init will initialize all default options.
+func (u *uploader) init() error {
+ if u.cfg.Concurrency == 0 {
+ u.cfg.Concurrency = DefaultUploadConcurrency
+ }
+ if u.cfg.PartSize == 0 {
+ u.cfg.PartSize = DefaultUploadPartSize
+ }
+ if u.cfg.MaxUploadParts == 0 {
+ u.cfg.MaxUploadParts = MaxUploadParts
+ }
+
+ // Try to get the total size for some optimizations
+ if err := u.initSize(); err != nil {
+ return err
+ }
+
+ // If PartSize was changed or partPool was never setup then we need to allocated a new pool
+ // so that we return []byte slices of the correct size
+ poolCap := u.cfg.Concurrency + 1
+ if u.cfg.partPool == nil || u.cfg.partPool.SliceSize() != u.cfg.PartSize {
+ u.cfg.partPool = newByteSlicePool(u.cfg.PartSize)
+ u.cfg.partPool.ModifyCapacity(poolCap)
+ } else {
+ u.cfg.partPool = &returnCapacityPoolCloser{byteSlicePool: u.cfg.partPool}
+ u.cfg.partPool.ModifyCapacity(poolCap)
+ }
+
+ return nil
+}
+
+// initSize tries to detect the total stream size, setting u.totalSize. If
+// the size is not known, totalSize is set to -1.
+func (u *uploader) initSize() error {
+ u.totalSize = -1
+
+ switch r := u.in.Body.(type) {
+ case io.Seeker:
+ n, err := seekerLen(r)
+ if err != nil {
+ return err
+ }
+ u.totalSize = n
+
+ // Try to adjust partSize if it is too small and account for
+ // integer division truncation.
+ if u.totalSize/u.cfg.PartSize >= int64(u.cfg.MaxUploadParts) {
+ // Add one to the part size to account for remainders
+ // during the size calculation. e.g odd number of bytes.
+ u.cfg.PartSize = (u.totalSize / int64(u.cfg.MaxUploadParts)) + 1
+ }
+ }
+
+ return nil
+}
+
+// nextReader returns a seekable reader representing the next packet of data.
+// This operation increases the shared u.readerPos counter, but note that it
+// does not need to be wrapped in a mutex because nextReader is only called
+// from the main thread.
+func (u *uploader) nextReader() (io.ReadSeeker, int, func(), error) {
+ switch r := u.in.Body.(type) {
+ case readerAtSeeker:
+ var err error
+
+ n := u.cfg.PartSize
+ if u.totalSize >= 0 {
+ bytesLeft := u.totalSize - u.readerPos
+
+ if bytesLeft <= u.cfg.PartSize {
+ err = io.EOF
+ n = bytesLeft
+ }
+ }
+
+ var (
+ reader io.ReadSeeker
+ cleanup func()
+ )
+
+ reader = io.NewSectionReader(r, u.readerPos, n)
+ if u.cfg.BufferProvider != nil {
+ reader, cleanup = u.cfg.BufferProvider.GetWriteTo(reader)
+ } else {
+ cleanup = func() {}
+ }
+
+ u.readerPos += n
+
+ return reader, int(n), cleanup, err
+
+ default:
+ part, err := u.cfg.partPool.Get(u.ctx)
+ if err != nil {
+ return nil, 0, func() {}, err
+ }
+
+ n, err := readFillBuf(r, *part)
+ u.readerPos += int64(n)
+
+ cleanup := func() {
+ u.cfg.partPool.Put(part)
+ }
+
+ return bytes.NewReader((*part)[0:n]), n, cleanup, err
+ }
+}
+
+func readFillBuf(r io.Reader, b []byte) (offset int, err error) {
+ for offset < len(b) && err == nil {
+ var n int
+ n, err = r.Read(b[offset:])
+ offset += n
+ }
+
+ return offset, err
+}
+
+// singlePart contains upload logic for uploading a single chunk via
+// a regular PutObject request. Multipart requests require at least two
+// parts, or at least 5MB of data.
+func (u *uploader) singlePart(r io.ReadSeeker, cleanup func()) (*UploadOutput, error) {
+ defer cleanup()
+
+ params := &s3.PutObjectInput{}
+ awsutil.Copy(params, u.in)
+ params.Body = r
+
+ // Need to use request form because URL generated in request is
+ // used in return.
+
+ var locationRecorder recordLocationClient
+ out, err := u.cfg.S3.PutObject(u.ctx, params, append(u.cfg.ClientOptions, locationRecorder.WrapClient())...)
+ if err != nil {
+ return nil, err
+ }
+
+ return &UploadOutput{
+ Location: locationRecorder.location,
+ VersionID: out.VersionId,
+ }, nil
+}
+
+type httpClient interface {
+ Do(r *http.Request) (*http.Response, error)
+}
+
+type recordLocationClient struct {
+ httpClient
+ location string
+}
+
+func (c *recordLocationClient) WrapClient() func(o *s3.Options) {
+ return func(o *s3.Options) {
+ c.httpClient = o.HTTPClient
+ o.HTTPClient = c
+ }
+}
+
+func (c *recordLocationClient) Do(r *http.Request) (resp *http.Response, err error) {
+ resp, err = c.httpClient.Do(r)
+ if err != nil {
+ return resp, err
+ }
+
+ if resp.Request != nil && resp.Request.URL != nil {
+ url := *resp.Request.URL
+ url.RawQuery = ""
+ c.location = url.String()
+ }
+
+ return resp, err
+}
+
+// internal structure to manage a specific multipart upload to S3.
+type multiuploader struct {
+ *uploader
+ wg sync.WaitGroup
+ m sync.Mutex
+ err error
+ uploadID string
+ parts completedParts
+}
+
+// keeps track of a single chunk of data being sent to S3.
+type chunk struct {
+ buf io.ReadSeeker
+ num int32
+ cleanup func()
+}
+
+// completedParts is a wrapper to make parts sortable by their part number,
+// since S3 required this list to be sent in sorted order.
+type completedParts []types.CompletedPart
+
+func (a completedParts) Len() int { return len(a) }
+func (a completedParts) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
+func (a completedParts) Less(i, j int) bool { return a[i].PartNumber < a[j].PartNumber }
+
+// upload will perform a multipart upload using the firstBuf buffer containing
+// the first chunk of data.
+func (u *multiuploader) upload(firstBuf io.ReadSeeker, cleanup func()) (*UploadOutput, error) {
+ params := &s3.CreateMultipartUploadInput{}
+ awsutil.Copy(params, u.in)
+
+ // Create the multipart
+ var locationRecorder recordLocationClient
+ resp, err := u.cfg.S3.CreateMultipartUpload(u.ctx, params, append(u.cfg.ClientOptions, locationRecorder.WrapClient())...)
+ if err != nil {
+ cleanup()
+ return nil, err
+ }
+ u.uploadID = *resp.UploadId
+
+ // Create the workers
+ ch := make(chan chunk, u.cfg.Concurrency)
+ for i := 0; i < u.cfg.Concurrency; i++ {
+ u.wg.Add(1)
+ go u.readChunk(ch)
+ }
+
+ // Send part 1 to the workers
+ var num int32 = 1
+ ch <- chunk{buf: firstBuf, num: num, cleanup: cleanup}
+
+ // Read and queue the rest of the parts
+ for u.geterr() == nil && err == nil {
+ var (
+ reader io.ReadSeeker
+ nextChunkLen int
+ ok bool
+ )
+
+ reader, nextChunkLen, cleanup, err = u.nextReader()
+ ok, err = u.shouldContinue(num, nextChunkLen, err)
+ if !ok {
+ cleanup()
+ if err != nil {
+ u.seterr(err)
+ }
+ break
+ }
+
+ num++
+
+ ch <- chunk{buf: reader, num: num, cleanup: cleanup}
+ }
+
+ // Close the channel, wait for workers, and complete upload
+ close(ch)
+ u.wg.Wait()
+ complete := u.complete()
+
+ if err := u.geterr(); err != nil {
+ return nil, &multiUploadError{
+ err: err,
+ uploadID: u.uploadID,
+ }
+ }
+
+ return &UploadOutput{
+ Location: locationRecorder.location,
+ VersionID: complete.VersionId,
+ UploadID: u.uploadID,
+ }, nil
+}
+
+func (u *multiuploader) shouldContinue(part int32, nextChunkLen int, err error) (bool, error) {
+ if err != nil && err != io.EOF {
+ return false, fmt.Errorf("read multipart upload data failed, %w", err)
+ }
+
+ if nextChunkLen == 0 {
+ // No need to upload empty part, if file was empty to start
+ // with empty single part would of been created and never
+ // started multipart upload.
+ return false, nil
+ }
+
+ part++
+ // This upload exceeded maximum number of supported parts, error now.
+ if part > u.cfg.MaxUploadParts || part > MaxUploadParts {
+ var msg string
+ if part > u.cfg.MaxUploadParts {
+ msg = fmt.Sprintf("exceeded total allowed configured MaxUploadParts (%d). Adjust PartSize to fit in this limit",
+ u.cfg.MaxUploadParts)
+ } else {
+ msg = fmt.Sprintf("exceeded total allowed S3 limit MaxUploadParts (%d). Adjust PartSize to fit in this limit",
+ MaxUploadParts)
+ }
+ return false, fmt.Errorf(msg)
+ }
+
+ return true, err
+}
+
+// readChunk runs in worker goroutines to pull chunks off of the ch channel
+// and send() them as UploadPart requests.
+func (u *multiuploader) readChunk(ch chan chunk) {
+ defer u.wg.Done()
+ for {
+ data, ok := <-ch
+
+ if !ok {
+ break
+ }
+
+ if u.geterr() == nil {
+ if err := u.send(data); err != nil {
+ u.seterr(err)
+ }
+ }
+
+ data.cleanup()
+ }
+}
+
+// send performs an UploadPart request and keeps track of the completed
+// part information.
+func (u *multiuploader) send(c chunk) error {
+ params := &s3.UploadPartInput{
+ Bucket: u.in.Bucket,
+ Key: u.in.Key,
+ Body: c.buf,
+ UploadId: &u.uploadID,
+ SSECustomerAlgorithm: u.in.SSECustomerAlgorithm,
+ SSECustomerKey: u.in.SSECustomerKey,
+ PartNumber: c.num,
+ }
+
+ resp, err := u.cfg.S3.UploadPart(u.ctx, params, u.cfg.ClientOptions...)
+ if err != nil {
+ return err
+ }
+
+ n := c.num
+ completed := types.CompletedPart{ETag: resp.ETag, PartNumber: n}
+
+ u.m.Lock()
+ u.parts = append(u.parts, completed)
+ u.m.Unlock()
+
+ return nil
+}
+
+// geterr is a thread-safe getter for the error object
+func (u *multiuploader) geterr() error {
+ u.m.Lock()
+ defer u.m.Unlock()
+
+ return u.err
+}
+
+// seterr is a thread-safe setter for the error object
+func (u *multiuploader) seterr(e error) {
+ u.m.Lock()
+ defer u.m.Unlock()
+
+ u.err = e
+}
+
+// fail will abort the multipart unless LeavePartsOnError is set to true.
+func (u *multiuploader) fail() {
+ if u.cfg.LeavePartsOnError {
+ return
+ }
+
+ params := &s3.AbortMultipartUploadInput{
+ Bucket: u.in.Bucket,
+ Key: u.in.Key,
+ UploadId: &u.uploadID,
+ }
+ _, err := u.cfg.S3.AbortMultipartUpload(u.ctx, params, u.cfg.ClientOptions...)
+ if err != nil {
+ // TODO: Add logging
+ //logMessage(u.cfg.S3, aws.LogDebug, fmt.Sprintf("failed to abort multipart upload, %v", err))
+ _ = err
+ }
+}
+
+// complete successfully completes a multipart upload and returns the response.
+func (u *multiuploader) complete() *s3.CompleteMultipartUploadOutput {
+ if u.geterr() != nil {
+ u.fail()
+ return nil
+ }
+
+ // Parts must be sorted in PartNumber order.
+ sort.Sort(u.parts)
+
+ params := &s3.CompleteMultipartUploadInput{
+ Bucket: u.in.Bucket,
+ Key: u.in.Key,
+ UploadId: &u.uploadID,
+ MultipartUpload: &types.CompletedMultipartUpload{Parts: u.parts},
+ }
+ resp, err := u.cfg.S3.CompleteMultipartUpload(u.ctx, params, u.cfg.ClientOptions...)
+ if err != nil {
+ u.seterr(err)
+ u.fail()
+ }
+
+ return resp
+}
+
+type readerAtSeeker interface {
+ io.ReaderAt
+ io.ReadSeeker
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/writer_read_from.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/writer_read_from.go
new file mode 100644
index 000000000..3df983a65
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/writer_read_from.go
@@ -0,0 +1,75 @@
+package manager
+
+import (
+ "bufio"
+ "io"
+ "sync"
+
+ "github.com/aws/aws-sdk-go-v2/internal/sdkio"
+)
+
+// WriterReadFrom defines an interface implementing io.Writer and io.ReaderFrom
+type WriterReadFrom interface {
+ io.Writer
+ io.ReaderFrom
+}
+
+// WriterReadFromProvider provides an implementation of io.ReadFrom for the given io.Writer
+type WriterReadFromProvider interface {
+ GetReadFrom(writer io.Writer) (w WriterReadFrom, cleanup func())
+}
+
+type bufferedWriter interface {
+ WriterReadFrom
+ Flush() error
+ Reset(io.Writer)
+}
+
+type bufferedReadFrom struct {
+ bufferedWriter
+}
+
+func (b *bufferedReadFrom) ReadFrom(r io.Reader) (int64, error) {
+ n, err := b.bufferedWriter.ReadFrom(r)
+ if flushErr := b.Flush(); flushErr != nil && err == nil {
+ err = flushErr
+ }
+ return n, err
+}
+
+// PooledBufferedReadFromProvider is a WriterReadFromProvider that uses a sync.Pool
+// to manage allocation and reuse of *bufio.Writer structures.
+type PooledBufferedReadFromProvider struct {
+ pool sync.Pool
+}
+
+// NewPooledBufferedWriterReadFromProvider returns a new PooledBufferedReadFromProvider
+// Size is used to control the size of the underlying *bufio.Writer created for
+// calls to GetReadFrom.
+func NewPooledBufferedWriterReadFromProvider(size int) *PooledBufferedReadFromProvider {
+ if size < int(32*sdkio.KibiByte) {
+ size = int(64 * sdkio.KibiByte)
+ }
+
+ return &PooledBufferedReadFromProvider{
+ pool: sync.Pool{
+ New: func() interface{} {
+ return &bufferedReadFrom{bufferedWriter: bufio.NewWriterSize(nil, size)}
+ },
+ },
+ }
+}
+
+// GetReadFrom takes an io.Writer and wraps it with a type which satisfies the WriterReadFrom
+// interface/ Additionally a cleanup function is provided which must be called after usage of the WriterReadFrom
+// has been completed in order to allow the reuse of the *bufio.Writer
+func (p *PooledBufferedReadFromProvider) GetReadFrom(writer io.Writer) (r WriterReadFrom, cleanup func()) {
+ buffer := p.pool.Get().(*bufferedReadFrom)
+ buffer.Reset(writer)
+ r = buffer
+ cleanup = func() {
+ buffer.Reset(nil) // Reset to nil writer to release reference
+ p.pool.Put(buffer)
+ }
+ return r, cleanup
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/awsutil/copy.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/awsutil/copy.go
new file mode 100644
index 000000000..938cd14c1
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/awsutil/copy.go
@@ -0,0 +1,112 @@
+package awsutil
+
+import (
+ "io"
+ "reflect"
+ "time"
+)
+
+// Copy deeply copies a src structure to dst. Useful for copying request and
+// response structures.
+//
+// Can copy between structs of different type, but will only copy fields which
+// are assignable, and exist in both structs. Fields which are not assignable,
+// or do not exist in both structs are ignored.
+func Copy(dst, src interface{}) {
+ dstval := reflect.ValueOf(dst)
+ if !dstval.IsValid() {
+ panic("Copy dst cannot be nil")
+ }
+
+ rcopy(dstval, reflect.ValueOf(src), true)
+}
+
+// CopyOf returns a copy of src while also allocating the memory for dst.
+// src must be a pointer type or this operation will fail.
+func CopyOf(src interface{}) (dst interface{}) {
+ dsti := reflect.New(reflect.TypeOf(src).Elem())
+ dst = dsti.Interface()
+ rcopy(dsti, reflect.ValueOf(src), true)
+ return
+}
+
+// rcopy performs a recursive copy of values from the source to destination.
+//
+// root is used to skip certain aspects of the copy which are not valid
+// for the root node of a object.
+func rcopy(dst, src reflect.Value, root bool) {
+ if !src.IsValid() {
+ return
+ }
+
+ switch src.Kind() {
+ case reflect.Ptr:
+ if _, ok := src.Interface().(io.Reader); ok {
+ if dst.Kind() == reflect.Ptr && dst.Elem().CanSet() {
+ dst.Elem().Set(src)
+ } else if dst.CanSet() {
+ dst.Set(src)
+ }
+ } else {
+ e := src.Type().Elem()
+ if dst.CanSet() && !src.IsNil() {
+ if _, ok := src.Interface().(*time.Time); !ok {
+ if dst.Kind() == reflect.String {
+ dst.SetString(e.String())
+ } else {
+ dst.Set(reflect.New(e))
+ }
+ } else {
+ tempValue := reflect.New(e)
+ tempValue.Elem().Set(src.Elem())
+ // Sets time.Time's unexported values
+ dst.Set(tempValue)
+ }
+ }
+ if dst.Kind() != reflect.String && src.Elem().IsValid() {
+ // Keep the current root state since the depth hasn't changed
+ rcopy(dst.Elem(), src.Elem(), root)
+ }
+ }
+ case reflect.Struct:
+ t := dst.Type()
+ for i := 0; i < t.NumField(); i++ {
+ name := t.Field(i).Name
+ srcVal := src.FieldByName(name)
+ dstVal := dst.FieldByName(name)
+ if srcVal.IsValid() && dstVal.CanSet() {
+ rcopy(dstVal, srcVal, false)
+ }
+ }
+ case reflect.Slice:
+ if src.IsNil() {
+ break
+ }
+
+ s := reflect.MakeSlice(src.Type(), src.Len(), src.Cap())
+ dst.Set(s)
+ for i := 0; i < src.Len(); i++ {
+ rcopy(dst.Index(i), src.Index(i), false)
+ }
+ case reflect.Map:
+ if src.IsNil() {
+ break
+ }
+
+ s := reflect.MakeMap(src.Type())
+ dst.Set(s)
+ for _, k := range src.MapKeys() {
+ v := src.MapIndex(k)
+ v2 := reflect.New(v.Type()).Elem()
+ rcopy(v2, v, false)
+ dst.SetMapIndex(k, v2)
+ }
+ default:
+ // Assign the value if possible. If its not assignable, the value would
+ // need to be converted and the impact of that may be unexpected, or is
+ // not compatible with the dst type.
+ if src.Type().AssignableTo(dst.Type()) {
+ dst.Set(src)
+ }
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/awsutil/equal.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/awsutil/equal.go
new file mode 100644
index 000000000..bcfe51a2b
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/awsutil/equal.go
@@ -0,0 +1,33 @@
+package awsutil
+
+import (
+ "reflect"
+)
+
+// DeepEqual returns if the two values are deeply equal like reflect.DeepEqual.
+// In addition to this, this method will also dereference the input values if
+// possible so the DeepEqual performed will not fail if one parameter is a
+// pointer and the other is not.
+//
+// DeepEqual will not perform indirection of nested values of the input parameters.
+func DeepEqual(a, b interface{}) bool {
+ ra := reflect.Indirect(reflect.ValueOf(a))
+ rb := reflect.Indirect(reflect.ValueOf(b))
+
+ if raValid, rbValid := ra.IsValid(), rb.IsValid(); !raValid && !rbValid {
+ // If the elements are both nil, and of the same type the are equal
+ // If they are of different types they are not equal
+ return reflect.TypeOf(a) == reflect.TypeOf(b)
+ } else if raValid != rbValid {
+ // Both values must be valid to be equal
+ return false
+ }
+
+ // Special casing for strings as typed enumerations are string aliases
+ // but are not deep equal.
+ if ra.Kind() == reflect.String && rb.Kind() == reflect.String {
+ return ra.String() == rb.String()
+ }
+
+ return reflect.DeepEqual(ra.Interface(), rb.Interface())
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/awsutil/path_value.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/awsutil/path_value.go
new file mode 100644
index 000000000..7e69bd5eb
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/awsutil/path_value.go
@@ -0,0 +1,225 @@
+package awsutil
+
+import (
+ "reflect"
+ "regexp"
+ "strconv"
+ "strings"
+
+ "github.com/jmespath/go-jmespath"
+)
+
+var indexRe = regexp.MustCompile(`(.+)\[(-?\d+)?\]$`)
+
+// rValuesAtPath returns a slice of values found in value v. The values
+// in v are explored recursively so all nested values are collected.
+func rValuesAtPath(v interface{}, path string, createPath, caseSensitive, nilTerm bool) []reflect.Value {
+ pathparts := strings.Split(path, "||")
+ if len(pathparts) > 1 {
+ for _, pathpart := range pathparts {
+ vals := rValuesAtPath(v, pathpart, createPath, caseSensitive, nilTerm)
+ if len(vals) > 0 {
+ return vals
+ }
+ }
+ return nil
+ }
+
+ values := []reflect.Value{reflect.Indirect(reflect.ValueOf(v))}
+ components := strings.Split(path, ".")
+ for len(values) > 0 && len(components) > 0 {
+ var index *int64
+ var indexStar bool
+ c := strings.TrimSpace(components[0])
+ if c == "" { // no actual component, illegal syntax
+ return nil
+ } else if caseSensitive && c != "*" && strings.ToLower(c[0:1]) == c[0:1] {
+ // TODO normalize case for user
+ return nil // don't support unexported fields
+ }
+
+ // parse this component
+ if m := indexRe.FindStringSubmatch(c); m != nil {
+ c = m[1]
+ if m[2] == "" {
+ index = nil
+ indexStar = true
+ } else {
+ i, _ := strconv.ParseInt(m[2], 10, 32)
+ index = &i
+ indexStar = false
+ }
+ }
+
+ nextvals := []reflect.Value{}
+ for _, value := range values {
+ // pull component name out of struct member
+ if value.Kind() != reflect.Struct {
+ continue
+ }
+
+ if c == "*" { // pull all members
+ for i := 0; i < value.NumField(); i++ {
+ if f := reflect.Indirect(value.Field(i)); f.IsValid() {
+ nextvals = append(nextvals, f)
+ }
+ }
+ continue
+ }
+
+ value = value.FieldByNameFunc(func(name string) bool {
+ if c == name {
+ return true
+ } else if !caseSensitive && strings.ToLower(name) == strings.ToLower(c) {
+ return true
+ }
+ return false
+ })
+
+ if nilTerm && value.Kind() == reflect.Ptr && len(components[1:]) == 0 {
+ if !value.IsNil() {
+ value.Set(reflect.Zero(value.Type()))
+ }
+ return []reflect.Value{value}
+ }
+
+ if createPath && value.Kind() == reflect.Ptr && value.IsNil() {
+ // TODO if the value is the terminus it should not be created
+ // if the value to be set to its position is nil.
+ value.Set(reflect.New(value.Type().Elem()))
+ value = value.Elem()
+ } else {
+ value = reflect.Indirect(value)
+ }
+
+ if value.Kind() == reflect.Slice || value.Kind() == reflect.Map {
+ if !createPath && value.IsNil() {
+ value = reflect.ValueOf(nil)
+ }
+ }
+
+ if value.IsValid() {
+ nextvals = append(nextvals, value)
+ }
+ }
+ values = nextvals
+
+ if indexStar || index != nil {
+ nextvals = []reflect.Value{}
+ for _, valItem := range values {
+ value := reflect.Indirect(valItem)
+ if value.Kind() != reflect.Slice {
+ continue
+ }
+
+ if indexStar { // grab all indices
+ for i := 0; i < value.Len(); i++ {
+ idx := reflect.Indirect(value.Index(i))
+ if idx.IsValid() {
+ nextvals = append(nextvals, idx)
+ }
+ }
+ continue
+ }
+
+ // pull out index
+ i := int(*index)
+ if i >= value.Len() { // check out of bounds
+ if createPath {
+ // TODO resize slice
+ } else {
+ continue
+ }
+ } else if i < 0 { // support negative indexing
+ i = value.Len() + i
+ }
+ value = reflect.Indirect(value.Index(i))
+
+ if value.Kind() == reflect.Slice || value.Kind() == reflect.Map {
+ if !createPath && value.IsNil() {
+ value = reflect.ValueOf(nil)
+ }
+ }
+
+ if value.IsValid() {
+ nextvals = append(nextvals, value)
+ }
+ }
+ values = nextvals
+ }
+
+ components = components[1:]
+ }
+ return values
+}
+
+// ValuesAtPath returns a list of values at the case insensitive lexical
+// path inside of a structure.
+func ValuesAtPath(i interface{}, path string) ([]interface{}, error) {
+ result, err := jmespath.Search(path, i)
+ if err != nil {
+ return nil, err
+ }
+
+ v := reflect.ValueOf(result)
+ if !v.IsValid() || (v.Kind() == reflect.Ptr && v.IsNil()) {
+ return nil, nil
+ }
+ if s, ok := result.([]interface{}); ok {
+ return s, err
+ }
+ if v.Kind() == reflect.Map && v.Len() == 0 {
+ return nil, nil
+ }
+ if v.Kind() == reflect.Slice {
+ out := make([]interface{}, v.Len())
+ for i := 0; i < v.Len(); i++ {
+ out[i] = v.Index(i).Interface()
+ }
+ return out, nil
+ }
+
+ return []interface{}{result}, nil
+}
+
+// SetValueAtPath sets a value at the case insensitive lexical path inside
+// of a structure.
+func SetValueAtPath(i interface{}, path string, v interface{}) {
+ if rvals := rValuesAtPath(i, path, true, false, v == nil); rvals != nil {
+ for _, rval := range rvals {
+ if rval.Kind() == reflect.Ptr && rval.IsNil() {
+ continue
+ }
+ setValue(rval, v)
+ }
+ }
+}
+
+func setValue(dstVal reflect.Value, src interface{}) {
+ if dstVal.Kind() == reflect.Ptr {
+ dstVal = reflect.Indirect(dstVal)
+ }
+ srcVal := reflect.ValueOf(src)
+
+ if !srcVal.IsValid() { // src is literal nil
+ if dstVal.CanAddr() {
+ // Convert to pointer so that pointer's value can be nil'ed
+ // dstVal = dstVal.Addr()
+ }
+ dstVal.Set(reflect.Zero(dstVal.Type()))
+
+ } else if srcVal.Kind() == reflect.Ptr {
+ if srcVal.IsNil() {
+ srcVal = reflect.Zero(dstVal.Type())
+ } else {
+ srcVal = reflect.ValueOf(src).Elem()
+ }
+ dstVal.Set(srcVal)
+ } else {
+ if dstVal.Kind() == reflect.String {
+ dstVal.SetString(srcVal.String())
+ } else {
+ dstVal.Set(srcVal)
+ }
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/awsutil/prettify.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/awsutil/prettify.go
new file mode 100644
index 000000000..710eb432f
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/awsutil/prettify.go
@@ -0,0 +1,113 @@
+package awsutil
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "reflect"
+ "strings"
+)
+
+// Prettify returns the string representation of a value.
+func Prettify(i interface{}) string {
+ var buf bytes.Buffer
+ prettify(reflect.ValueOf(i), 0, &buf)
+ return buf.String()
+}
+
+// prettify will recursively walk value v to build a textual
+// representation of the value.
+func prettify(v reflect.Value, indent int, buf *bytes.Buffer) {
+ for v.Kind() == reflect.Ptr {
+ v = v.Elem()
+ }
+
+ switch v.Kind() {
+ case reflect.Struct:
+ strtype := v.Type().String()
+ if strtype == "time.Time" {
+ fmt.Fprintf(buf, "%s", v.Interface())
+ break
+ } else if strings.HasPrefix(strtype, "io.") {
+ buf.WriteString("")
+ break
+ }
+
+ buf.WriteString("{\n")
+
+ names := []string{}
+ for i := 0; i < v.Type().NumField(); i++ {
+ name := v.Type().Field(i).Name
+ f := v.Field(i)
+ if name[0:1] == strings.ToLower(name[0:1]) {
+ continue // ignore unexported fields
+ }
+ if (f.Kind() == reflect.Ptr || f.Kind() == reflect.Slice || f.Kind() == reflect.Map) && f.IsNil() {
+ continue // ignore unset fields
+ }
+ names = append(names, name)
+ }
+
+ for i, n := range names {
+ val := v.FieldByName(n)
+ buf.WriteString(strings.Repeat(" ", indent+2))
+ buf.WriteString(n + ": ")
+ prettify(val, indent+2, buf)
+
+ if i < len(names)-1 {
+ buf.WriteString(",\n")
+ }
+ }
+
+ buf.WriteString("\n" + strings.Repeat(" ", indent) + "}")
+ case reflect.Slice:
+ strtype := v.Type().String()
+ if strtype == "[]uint8" {
+ fmt.Fprintf(buf, " len %d", v.Len())
+ break
+ }
+
+ nl, id, id2 := "", "", ""
+ if v.Len() > 3 {
+ nl, id, id2 = "\n", strings.Repeat(" ", indent), strings.Repeat(" ", indent+2)
+ }
+ buf.WriteString("[" + nl)
+ for i := 0; i < v.Len(); i++ {
+ buf.WriteString(id2)
+ prettify(v.Index(i), indent+2, buf)
+
+ if i < v.Len()-1 {
+ buf.WriteString("," + nl)
+ }
+ }
+
+ buf.WriteString(nl + id + "]")
+ case reflect.Map:
+ buf.WriteString("{\n")
+
+ for i, k := range v.MapKeys() {
+ buf.WriteString(strings.Repeat(" ", indent+2))
+ buf.WriteString(k.String() + ": ")
+ prettify(v.MapIndex(k), indent+2, buf)
+
+ if i < v.Len()-1 {
+ buf.WriteString(",\n")
+ }
+ }
+
+ buf.WriteString("\n" + strings.Repeat(" ", indent) + "}")
+ default:
+ if !v.IsValid() {
+ fmt.Fprint(buf, "")
+ return
+ }
+ format := "%v"
+ switch v.Interface().(type) {
+ case string:
+ format = "%q"
+ case io.ReadSeeker, io.Reader:
+ format = "buffer(%p)"
+ }
+ fmt.Fprintf(buf, format, v.Interface())
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/awsutil/string_value.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/awsutil/string_value.go
new file mode 100644
index 000000000..645df2450
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/awsutil/string_value.go
@@ -0,0 +1,88 @@
+package awsutil
+
+import (
+ "bytes"
+ "fmt"
+ "reflect"
+ "strings"
+)
+
+// StringValue returns the string representation of a value.
+func StringValue(i interface{}) string {
+ var buf bytes.Buffer
+ stringValue(reflect.ValueOf(i), 0, &buf)
+ return buf.String()
+}
+
+func stringValue(v reflect.Value, indent int, buf *bytes.Buffer) {
+ for v.Kind() == reflect.Ptr {
+ v = v.Elem()
+ }
+
+ switch v.Kind() {
+ case reflect.Struct:
+ buf.WriteString("{\n")
+
+ for i := 0; i < v.Type().NumField(); i++ {
+ ft := v.Type().Field(i)
+ fv := v.Field(i)
+
+ if ft.Name[0:1] == strings.ToLower(ft.Name[0:1]) {
+ continue // ignore unexported fields
+ }
+ if (fv.Kind() == reflect.Ptr || fv.Kind() == reflect.Slice) && fv.IsNil() {
+ continue // ignore unset fields
+ }
+
+ buf.WriteString(strings.Repeat(" ", indent+2))
+ buf.WriteString(ft.Name + ": ")
+
+ if tag := ft.Tag.Get("sensitive"); tag == "true" {
+ buf.WriteString("")
+ } else {
+ stringValue(fv, indent+2, buf)
+ }
+
+ buf.WriteString(",\n")
+ }
+
+ buf.WriteString("\n" + strings.Repeat(" ", indent) + "}")
+ case reflect.Slice:
+ nl, id, id2 := "", "", ""
+ if v.Len() > 3 {
+ nl, id, id2 = "\n", strings.Repeat(" ", indent), strings.Repeat(" ", indent+2)
+ }
+ buf.WriteString("[" + nl)
+ for i := 0; i < v.Len(); i++ {
+ buf.WriteString(id2)
+ stringValue(v.Index(i), indent+2, buf)
+
+ if i < v.Len()-1 {
+ buf.WriteString("," + nl)
+ }
+ }
+
+ buf.WriteString(nl + id + "]")
+ case reflect.Map:
+ buf.WriteString("{\n")
+
+ for i, k := range v.MapKeys() {
+ buf.WriteString(strings.Repeat(" ", indent+2))
+ buf.WriteString(k.String() + ": ")
+ stringValue(v.MapIndex(k), indent+2, buf)
+
+ if i < v.Len()-1 {
+ buf.WriteString(",\n")
+ }
+ }
+
+ buf.WriteString("\n" + strings.Repeat(" ", indent) + "}")
+ default:
+ format := "%v"
+ switch v.Interface().(type) {
+ case string:
+ format = "%q"
+ }
+ fmt.Fprintf(buf, format, v.Interface())
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/endpoints.go
new file mode 100644
index 000000000..92635a1bb
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/endpoints.go
@@ -0,0 +1,183 @@
+package endpoints
+
+import (
+ "fmt"
+ "regexp"
+ "strings"
+
+ "github.com/aws/aws-sdk-go-v2/aws"
+)
+
+const (
+ defaultProtocol = "https"
+ defaultSigner = "v4"
+)
+
+var (
+ protocolPriority = []string{"https", "http"}
+ signerPriority = []string{"v4"}
+)
+
+// Options provide configuration needed to direct how endpoints are resolved.
+type Options struct {
+ // Disable usage of HTTPS (TLS / SSL)
+ DisableHTTPS bool
+}
+
+// Partitions is a slice of partition
+type Partitions []Partition
+
+// ResolveEndpoint resolves a service endpoint for the given region and options.
+func (ps Partitions) ResolveEndpoint(region string, opts Options) (aws.Endpoint, error) {
+ if len(ps) == 0 {
+ return aws.Endpoint{}, fmt.Errorf("no partitions found")
+ }
+
+ for i := 0; i < len(ps); i++ {
+ if !ps[i].canResolveEndpoint(region) {
+ continue
+ }
+
+ return ps[i].ResolveEndpoint(region, opts)
+ }
+
+ // fallback to first partition format to use when resolving the endpoint.
+ return ps[0].ResolveEndpoint(region, opts)
+}
+
+// Partition is an AWS partition description for a service and its' region endpoints.
+type Partition struct {
+ ID string
+ RegionRegex *regexp.Regexp
+ PartitionEndpoint string
+ IsRegionalized bool
+ Defaults Endpoint
+ Endpoints Endpoints
+}
+
+func (p Partition) canResolveEndpoint(region string) bool {
+ _, ok := p.Endpoints[region]
+ return ok || p.RegionRegex.MatchString(region)
+}
+
+// ResolveEndpoint resolves and service endpoint for the given region and options.
+func (p Partition) ResolveEndpoint(region string, options Options) (resolved aws.Endpoint, err error) {
+ if len(region) == 0 && len(p.PartitionEndpoint) != 0 {
+ region = p.PartitionEndpoint
+ }
+
+ e, _ := p.endpointForRegion(region)
+
+ return e.resolve(p.ID, region, p.Defaults, options), nil
+}
+
+func (p Partition) endpointForRegion(region string) (Endpoint, bool) {
+ if !p.IsRegionalized {
+ return p.Endpoints[p.PartitionEndpoint], region == p.PartitionEndpoint
+ }
+
+ if e, ok := p.Endpoints[region]; ok {
+ return e, true
+ }
+
+ // Unable to find any matching endpoint, return
+ // blank that will be used for generic endpoint creation.
+ return Endpoint{}, false
+}
+
+// Endpoints is a map of service config regions to endpoints
+type Endpoints map[string]Endpoint
+
+// CredentialScope is the credential scope of a region and service
+type CredentialScope struct {
+ Region string
+ Service string
+}
+
+// Endpoint is a service endpoint description
+type Endpoint struct {
+ // True if the endpoint cannot be resolved for this partition/region/service
+ Unresolveable aws.Ternary
+
+ Hostname string
+ Protocols []string
+
+ CredentialScope CredentialScope
+
+ SignatureVersions []string `json:"signatureVersions"`
+}
+
+func (e Endpoint) resolve(partition, region string, def Endpoint, options Options) aws.Endpoint {
+ var merged Endpoint
+ merged.mergeIn(def)
+ merged.mergeIn(e)
+ e = merged
+
+ var u string
+ if e.Unresolveable != aws.TrueTernary {
+ // Only attempt to resolve the endpoint if it can be resolved.
+ hostname := strings.Replace(e.Hostname, "{region}", region, 1)
+
+ scheme := getEndpointScheme(e.Protocols, options.DisableHTTPS)
+ u = scheme + "://" + hostname
+ }
+
+ signingRegion := e.CredentialScope.Region
+ if len(signingRegion) == 0 {
+ signingRegion = region
+ }
+ signingName := e.CredentialScope.Service
+
+ return aws.Endpoint{
+ URL: u,
+ PartitionID: partition,
+ SigningRegion: signingRegion,
+ SigningName: signingName,
+ SigningMethod: getByPriority(e.SignatureVersions, signerPriority, defaultSigner),
+ }
+}
+
+func (e *Endpoint) mergeIn(other Endpoint) {
+ if other.Unresolveable != aws.UnknownTernary {
+ e.Unresolveable = other.Unresolveable
+ }
+ if len(other.Hostname) > 0 {
+ e.Hostname = other.Hostname
+ }
+ if len(other.Protocols) > 0 {
+ e.Protocols = other.Protocols
+ }
+ if len(other.CredentialScope.Region) > 0 {
+ e.CredentialScope.Region = other.CredentialScope.Region
+ }
+ if len(other.CredentialScope.Service) > 0 {
+ e.CredentialScope.Service = other.CredentialScope.Service
+ }
+ if len(other.SignatureVersions) > 0 {
+ e.SignatureVersions = other.SignatureVersions
+ }
+}
+
+func getEndpointScheme(protocols []string, disableHTTPS bool) string {
+ if disableHTTPS {
+ return "http"
+ }
+
+ return getByPriority(protocols, protocolPriority, defaultProtocol)
+}
+
+func getByPriority(s []string, p []string, def string) string {
+ if len(s) == 0 {
+ return def
+ }
+
+ for i := 0; i < len(p); i++ {
+ for j := 0; j < len(s); j++ {
+ if s[j] == p[i] {
+ return s[j]
+ }
+ }
+ }
+
+ return s[0]
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/ast.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/ast.go
new file mode 100644
index 000000000..e83a99886
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/ast.go
@@ -0,0 +1,120 @@
+package ini
+
+// ASTKind represents different states in the parse table
+// and the type of AST that is being constructed
+type ASTKind int
+
+// ASTKind* is used in the parse table to transition between
+// the different states
+const (
+ ASTKindNone = ASTKind(iota)
+ ASTKindStart
+ ASTKindExpr
+ ASTKindEqualExpr
+ ASTKindStatement
+ ASTKindSkipStatement
+ ASTKindExprStatement
+ ASTKindSectionStatement
+ ASTKindNestedSectionStatement
+ ASTKindCompletedNestedSectionStatement
+ ASTKindCommentStatement
+ ASTKindCompletedSectionStatement
+)
+
+func (k ASTKind) String() string {
+ switch k {
+ case ASTKindNone:
+ return "none"
+ case ASTKindStart:
+ return "start"
+ case ASTKindExpr:
+ return "expr"
+ case ASTKindStatement:
+ return "stmt"
+ case ASTKindSectionStatement:
+ return "section_stmt"
+ case ASTKindExprStatement:
+ return "expr_stmt"
+ case ASTKindCommentStatement:
+ return "comment"
+ case ASTKindNestedSectionStatement:
+ return "nested_section_stmt"
+ case ASTKindCompletedSectionStatement:
+ return "completed_stmt"
+ case ASTKindSkipStatement:
+ return "skip"
+ default:
+ return ""
+ }
+}
+
+// AST interface allows us to determine what kind of node we
+// are on and casting may not need to be necessary.
+//
+// The root is always the first node in Children
+type AST struct {
+ Kind ASTKind
+ Root Token
+ RootToken bool
+ Children []AST
+}
+
+func newAST(kind ASTKind, root AST, children ...AST) AST {
+ return AST{
+ Kind: kind,
+ Children: append([]AST{root}, children...),
+ }
+}
+
+func newASTWithRootToken(kind ASTKind, root Token, children ...AST) AST {
+ return AST{
+ Kind: kind,
+ Root: root,
+ RootToken: true,
+ Children: children,
+ }
+}
+
+// AppendChild will append to the list of children an AST has.
+func (a *AST) AppendChild(child AST) {
+ a.Children = append(a.Children, child)
+}
+
+// GetRoot will return the root AST which can be the first entry
+// in the children list or a token.
+func (a *AST) GetRoot() AST {
+ if a.RootToken {
+ return *a
+ }
+
+ if len(a.Children) == 0 {
+ return AST{}
+ }
+
+ return a.Children[0]
+}
+
+// GetChildren will return the current AST's list of children
+func (a *AST) GetChildren() []AST {
+ if len(a.Children) == 0 {
+ return []AST{}
+ }
+
+ if a.RootToken {
+ return a.Children
+ }
+
+ return a.Children[1:]
+}
+
+// SetChildren will set and override all children of the AST.
+func (a *AST) SetChildren(children []AST) {
+ if a.RootToken {
+ a.Children = children
+ } else {
+ a.Children = append(a.Children[:1], children...)
+ }
+}
+
+// Start is used to indicate the starting state of the parse table.
+var Start = newAST(ASTKindStart, AST{})
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/comma_token.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/comma_token.go
new file mode 100644
index 000000000..0895d53cb
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/comma_token.go
@@ -0,0 +1,11 @@
+package ini
+
+var commaRunes = []rune(",")
+
+func isComma(b rune) bool {
+ return b == ','
+}
+
+func newCommaToken() Token {
+ return newToken(TokenComma, commaRunes, NoneType)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/comment_token.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/comment_token.go
new file mode 100644
index 000000000..0b76999ba
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/comment_token.go
@@ -0,0 +1,35 @@
+package ini
+
+// isComment will return whether or not the next byte(s) is a
+// comment.
+func isComment(b []rune) bool {
+ if len(b) == 0 {
+ return false
+ }
+
+ switch b[0] {
+ case ';':
+ return true
+ case '#':
+ return true
+ }
+
+ return false
+}
+
+// newCommentToken will create a comment token and
+// return how many bytes were read.
+func newCommentToken(b []rune) (Token, int, error) {
+ i := 0
+ for ; i < len(b); i++ {
+ if b[i] == '\n' {
+ break
+ }
+
+ if len(b)-i > 2 && b[i] == '\r' && b[i+1] == '\n' {
+ break
+ }
+ }
+
+ return newToken(TokenComment, b[:i], NoneType), i, nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/doc.go
new file mode 100644
index 000000000..25ce0fe13
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/doc.go
@@ -0,0 +1,29 @@
+// Package ini is an LL(1) parser for configuration files.
+//
+// Example:
+// sections, err := ini.OpenFile("/path/to/file")
+// if err != nil {
+// panic(err)
+// }
+//
+// profile := "foo"
+// section, ok := sections.GetSection(profile)
+// if !ok {
+// fmt.Printf("section %q could not be found", profile)
+// }
+//
+// Below is the BNF that describes this parser
+// Grammar:
+// stmt -> value stmt'
+// stmt' -> epsilon | op stmt
+// value -> number | string | boolean | quoted_string
+//
+// section -> [ section'
+// section' -> value section_close
+// section_close -> ]
+//
+// SkipState will skip (NL WS)+
+//
+// comment -> # comment' | ; comment'
+// comment' -> epsilon | value
+package ini
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/empty_token.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/empty_token.go
new file mode 100644
index 000000000..04345a54c
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/empty_token.go
@@ -0,0 +1,4 @@
+package ini
+
+// emptyToken is used to satisfy the Token interface
+var emptyToken = newToken(TokenNone, []rune{}, NoneType)
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/errors.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/errors.go
new file mode 100644
index 000000000..0f278d55e
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/errors.go
@@ -0,0 +1,22 @@
+package ini
+
+import "fmt"
+
+// UnableToReadFile is an error indicating that a ini file could not be read
+type UnableToReadFile struct {
+ Err error
+}
+
+// Error returns an error message and the underlying error message if present
+func (e *UnableToReadFile) Error() string {
+ base := "unable to read file"
+ if e.Err == nil {
+ return base
+ }
+ return fmt.Sprintf("%s: %v", base, e.Err)
+}
+
+// Unwrap returns the underlying error
+func (e *UnableToReadFile) Unwrap() error {
+ return e.Err
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/expression.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/expression.go
new file mode 100644
index 000000000..91ba2a59d
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/expression.go
@@ -0,0 +1,24 @@
+package ini
+
+// newExpression will return an expression AST.
+// Expr represents an expression
+//
+// grammar:
+// expr -> string | number
+func newExpression(tok Token) AST {
+ return newASTWithRootToken(ASTKindExpr, tok)
+}
+
+func newEqualExpr(left AST, tok Token) AST {
+ return newASTWithRootToken(ASTKindEqualExpr, tok, left)
+}
+
+// EqualExprKey will return a LHS value in the equal expr
+func EqualExprKey(ast AST) string {
+ children := ast.GetChildren()
+ if len(children) == 0 || ast.Kind != ASTKindEqualExpr {
+ return ""
+ }
+
+ return string(children[0].Root.Raw())
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/fuzz.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/fuzz.go
new file mode 100644
index 000000000..8d462f77e
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/fuzz.go
@@ -0,0 +1,17 @@
+// +build gofuzz
+
+package ini
+
+import (
+ "bytes"
+)
+
+func Fuzz(data []byte) int {
+ b := bytes.NewReader(data)
+
+ if _, err := Parse(b); err != nil {
+ return 0
+ }
+
+ return 1
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/ini.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/ini.go
new file mode 100644
index 000000000..4a80eb9a9
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/ini.go
@@ -0,0 +1,49 @@
+package ini
+
+import (
+ "io"
+ "os"
+)
+
+// OpenFile takes a path to a given file, and will open and parse
+// that file.
+func OpenFile(path string) (Sections, error) {
+ f, err := os.Open(path)
+ if err != nil {
+ return Sections{}, &UnableToReadFile{Err: err}
+ }
+ defer f.Close()
+
+ return Parse(f, path)
+}
+
+// Parse will parse the given file using the shared config
+// visitor.
+func Parse(f io.Reader, path string) (Sections, error) {
+ tree, err := ParseAST(f)
+ if err != nil {
+ return Sections{}, err
+ }
+
+ v := NewDefaultVisitor(path)
+ if err = Walk(tree, v); err != nil {
+ return Sections{}, err
+ }
+
+ return v.Sections, nil
+}
+
+// ParseBytes will parse the given bytes and return the parsed sections.
+func ParseBytes(b []byte) (Sections, error) {
+ tree, err := ParseASTBytes(b)
+ if err != nil {
+ return Sections{}, err
+ }
+
+ v := NewDefaultVisitor("")
+ if err = Walk(tree, v); err != nil {
+ return Sections{}, err
+ }
+
+ return v.Sections, nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/ini_lexer.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/ini_lexer.go
new file mode 100644
index 000000000..abf1fb036
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/ini_lexer.go
@@ -0,0 +1,157 @@
+package ini
+
+import (
+ "bytes"
+ "io"
+ "io/ioutil"
+)
+
+// TokenType represents the various different tokens types
+type TokenType int
+
+func (t TokenType) String() string {
+ switch t {
+ case TokenNone:
+ return "none"
+ case TokenLit:
+ return "literal"
+ case TokenSep:
+ return "sep"
+ case TokenOp:
+ return "op"
+ case TokenWS:
+ return "ws"
+ case TokenNL:
+ return "newline"
+ case TokenComment:
+ return "comment"
+ case TokenComma:
+ return "comma"
+ default:
+ return ""
+ }
+}
+
+// TokenType enums
+const (
+ TokenNone = TokenType(iota)
+ TokenLit
+ TokenSep
+ TokenComma
+ TokenOp
+ TokenWS
+ TokenNL
+ TokenComment
+)
+
+type iniLexer struct{}
+
+// Tokenize will return a list of tokens during lexical analysis of the
+// io.Reader.
+func (l *iniLexer) Tokenize(r io.Reader) ([]Token, error) {
+ b, err := ioutil.ReadAll(r)
+ if err != nil {
+ return nil, &UnableToReadFile{Err: err}
+ }
+
+ return l.tokenize(b)
+}
+
+func (l *iniLexer) tokenize(b []byte) ([]Token, error) {
+ runes := bytes.Runes(b)
+ var err error
+ n := 0
+ tokenAmount := countTokens(runes)
+ tokens := make([]Token, tokenAmount)
+ count := 0
+
+ for len(runes) > 0 && count < tokenAmount {
+ switch {
+ case isWhitespace(runes[0]):
+ tokens[count], n, err = newWSToken(runes)
+ case isComma(runes[0]):
+ tokens[count], n = newCommaToken(), 1
+ case isComment(runes):
+ tokens[count], n, err = newCommentToken(runes)
+ case isNewline(runes):
+ tokens[count], n, err = newNewlineToken(runes)
+ case isSep(runes):
+ tokens[count], n, err = newSepToken(runes)
+ case isOp(runes):
+ tokens[count], n, err = newOpToken(runes)
+ default:
+ tokens[count], n, err = newLitToken(runes)
+ }
+
+ if err != nil {
+ return nil, err
+ }
+
+ count++
+
+ runes = runes[n:]
+ }
+
+ return tokens[:count], nil
+}
+
+func countTokens(runes []rune) int {
+ count, n := 0, 0
+ var err error
+
+ for len(runes) > 0 {
+ switch {
+ case isWhitespace(runes[0]):
+ _, n, err = newWSToken(runes)
+ case isComma(runes[0]):
+ _, n = newCommaToken(), 1
+ case isComment(runes):
+ _, n, err = newCommentToken(runes)
+ case isNewline(runes):
+ _, n, err = newNewlineToken(runes)
+ case isSep(runes):
+ _, n, err = newSepToken(runes)
+ case isOp(runes):
+ _, n, err = newOpToken(runes)
+ default:
+ _, n, err = newLitToken(runes)
+ }
+
+ if err != nil {
+ return 0
+ }
+
+ count++
+ runes = runes[n:]
+ }
+
+ return count + 1
+}
+
+// Token indicates a metadata about a given value.
+type Token struct {
+ t TokenType
+ ValueType ValueType
+ base int
+ raw []rune
+}
+
+var emptyValue = Value{}
+
+func newToken(t TokenType, raw []rune, v ValueType) Token {
+ return Token{
+ t: t,
+ raw: raw,
+ ValueType: v,
+ }
+}
+
+// Raw return the raw runes that were consumed
+func (tok Token) Raw() []rune {
+ return tok.raw
+}
+
+// Type returns the token type
+func (tok Token) Type() TokenType {
+ return tok.t
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/ini_parser.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/ini_parser.go
new file mode 100644
index 000000000..643fbe467
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/ini_parser.go
@@ -0,0 +1,356 @@
+package ini
+
+import (
+ "fmt"
+ "io"
+)
+
+// State enums for the parse table
+const (
+ InvalidState = iota
+ // stmt -> value stmt'
+ StatementState
+ // stmt' -> MarkComplete | op stmt
+ StatementPrimeState
+ // value -> number | string | boolean | quoted_string
+ ValueState
+ // section -> [ section'
+ OpenScopeState
+ // section' -> value section_close
+ SectionState
+ // section_close -> ]
+ CloseScopeState
+ // SkipState will skip (NL WS)+
+ SkipState
+ // SkipTokenState will skip any token and push the previous
+ // state onto the stack.
+ SkipTokenState
+ // comment -> # comment' | ; comment'
+ // comment' -> MarkComplete | value
+ CommentState
+ // MarkComplete state will complete statements and move that
+ // to the completed AST list
+ MarkCompleteState
+ // TerminalState signifies that the tokens have been fully parsed
+ TerminalState
+)
+
+// parseTable is a state machine to dictate the grammar above.
+var parseTable = map[ASTKind]map[TokenType]int{
+ ASTKindStart: {
+ TokenLit: StatementState,
+ TokenSep: OpenScopeState,
+ TokenWS: SkipTokenState,
+ TokenNL: SkipTokenState,
+ TokenComment: CommentState,
+ TokenNone: TerminalState,
+ },
+ ASTKindCommentStatement: {
+ TokenLit: StatementState,
+ TokenSep: OpenScopeState,
+ TokenWS: SkipTokenState,
+ TokenNL: SkipTokenState,
+ TokenComment: CommentState,
+ TokenNone: MarkCompleteState,
+ },
+ ASTKindExpr: {
+ TokenOp: StatementPrimeState,
+ TokenLit: ValueState,
+ TokenSep: OpenScopeState,
+ TokenWS: ValueState,
+ TokenNL: SkipState,
+ TokenComment: CommentState,
+ TokenNone: MarkCompleteState,
+ },
+ ASTKindEqualExpr: {
+ TokenLit: ValueState,
+ TokenWS: SkipTokenState,
+ TokenNL: SkipState,
+ },
+ ASTKindStatement: {
+ TokenLit: SectionState,
+ TokenSep: CloseScopeState,
+ TokenWS: SkipTokenState,
+ TokenNL: SkipTokenState,
+ TokenComment: CommentState,
+ TokenNone: MarkCompleteState,
+ },
+ ASTKindExprStatement: {
+ TokenLit: ValueState,
+ TokenSep: OpenScopeState,
+ TokenOp: ValueState,
+ TokenWS: ValueState,
+ TokenNL: MarkCompleteState,
+ TokenComment: CommentState,
+ TokenNone: TerminalState,
+ TokenComma: SkipState,
+ },
+ ASTKindSectionStatement: {
+ TokenLit: SectionState,
+ TokenOp: SectionState,
+ TokenSep: CloseScopeState,
+ TokenWS: SectionState,
+ TokenNL: SkipTokenState,
+ },
+ ASTKindCompletedSectionStatement: {
+ TokenWS: SkipTokenState,
+ TokenNL: SkipTokenState,
+ TokenLit: StatementState,
+ TokenSep: OpenScopeState,
+ TokenComment: CommentState,
+ TokenNone: MarkCompleteState,
+ },
+ ASTKindSkipStatement: {
+ TokenLit: StatementState,
+ TokenSep: OpenScopeState,
+ TokenWS: SkipTokenState,
+ TokenNL: SkipTokenState,
+ TokenComment: CommentState,
+ TokenNone: TerminalState,
+ },
+}
+
+// ParseAST will parse input from an io.Reader using
+// an LL(1) parser.
+func ParseAST(r io.Reader) ([]AST, error) {
+ lexer := iniLexer{}
+ tokens, err := lexer.Tokenize(r)
+ if err != nil {
+ return []AST{}, err
+ }
+
+ return parse(tokens)
+}
+
+// ParseASTBytes will parse input from a byte slice using
+// an LL(1) parser.
+func ParseASTBytes(b []byte) ([]AST, error) {
+ lexer := iniLexer{}
+ tokens, err := lexer.tokenize(b)
+ if err != nil {
+ return []AST{}, err
+ }
+
+ return parse(tokens)
+}
+
+func parse(tokens []Token) ([]AST, error) {
+ start := Start
+ stack := newParseStack(3, len(tokens))
+
+ stack.Push(start)
+ s := newSkipper()
+
+loop:
+ for stack.Len() > 0 {
+ k := stack.Pop()
+
+ var tok Token
+ if len(tokens) == 0 {
+ // this occurs when all the tokens have been processed
+ // but reduction of what's left on the stack needs to
+ // occur.
+ tok = emptyToken
+ } else {
+ tok = tokens[0]
+ }
+
+ step := parseTable[k.Kind][tok.Type()]
+ if s.ShouldSkip(tok) {
+ // being in a skip state with no tokens will break out of
+ // the parse loop since there is nothing left to process.
+ if len(tokens) == 0 {
+ break loop
+ }
+ // if should skip is true, we skip the tokens until should skip is set to false.
+ step = SkipTokenState
+ }
+
+ switch step {
+ case TerminalState:
+ // Finished parsing. Push what should be the last
+ // statement to the stack. If there is anything left
+ // on the stack, an error in parsing has occurred.
+ if k.Kind != ASTKindStart {
+ stack.MarkComplete(k)
+ }
+ break loop
+ case SkipTokenState:
+ // When skipping a token, the previous state was popped off the stack.
+ // To maintain the correct state, the previous state will be pushed
+ // onto the stack.
+ stack.Push(k)
+ case StatementState:
+ if k.Kind != ASTKindStart {
+ stack.MarkComplete(k)
+ }
+ expr := newExpression(tok)
+ stack.Push(expr)
+ case StatementPrimeState:
+ if tok.Type() != TokenOp {
+ stack.MarkComplete(k)
+ continue
+ }
+
+ if k.Kind != ASTKindExpr {
+ return nil, NewParseError(
+ fmt.Sprintf("invalid expression: expected Expr type, but found %T type", k),
+ )
+ }
+
+ k = trimSpaces(k)
+ expr := newEqualExpr(k, tok)
+ stack.Push(expr)
+ case ValueState:
+ // ValueState requires the previous state to either be an equal expression
+ // or an expression statement.
+ //
+ // This grammar occurs when the RHS is a number, word, or quoted string.
+ // equal_expr -> lit op equal_expr'
+ // equal_expr' -> number | string | quoted_string
+ // quoted_string -> " quoted_string'
+ // quoted_string' -> string quoted_string_end
+ // quoted_string_end -> "
+ //
+ // otherwise
+ // expr_stmt -> equal_expr (expr_stmt')*
+ // expr_stmt' -> ws S | op S | MarkComplete
+ // S -> equal_expr' expr_stmt'
+ switch k.Kind {
+ case ASTKindEqualExpr:
+ // assigning a value to some key
+ k.AppendChild(newExpression(tok))
+ stack.Push(newExprStatement(k))
+ case ASTKindExpr:
+ k.Root.raw = append(k.Root.raw, tok.Raw()...)
+ stack.Push(k)
+ case ASTKindExprStatement:
+ root := k.GetRoot()
+ children := root.GetChildren()
+ if len(children) == 0 {
+ return nil, NewParseError(
+ fmt.Sprintf("invalid expression: AST contains no children %s", k.Kind),
+ )
+ }
+
+ rhs := children[len(children)-1]
+
+ if rhs.Root.ValueType != QuotedStringType {
+ rhs.Root.ValueType = StringType
+ rhs.Root.raw = append(rhs.Root.raw, tok.Raw()...)
+
+ }
+
+ children[len(children)-1] = rhs
+ k.SetChildren(children)
+
+ stack.Push(k)
+ }
+ case OpenScopeState:
+ if !runeCompare(tok.Raw(), openBrace) {
+ return nil, NewParseError("expected '['")
+ }
+ // If OpenScopeState is not at the start, we must mark the previous ast as complete
+ //
+ // for example: if previous ast was a skip statement;
+ // we should mark it as complete before we create a new statement
+ if k.Kind != ASTKindStart {
+ stack.MarkComplete(k)
+ }
+
+ stmt := newStatement()
+ stack.Push(stmt)
+ case CloseScopeState:
+ if !runeCompare(tok.Raw(), closeBrace) {
+ return nil, NewParseError("expected ']'")
+ }
+
+ k = trimSpaces(k)
+ stack.Push(newCompletedSectionStatement(k))
+ case SectionState:
+ var stmt AST
+
+ switch k.Kind {
+ case ASTKindStatement:
+ // If there are multiple literals inside of a scope declaration,
+ // then the current token's raw value will be appended to the Name.
+ //
+ // This handles cases like [ profile default ]
+ //
+ // k will represent a SectionStatement with the children representing
+ // the label of the section
+ stmt = newSectionStatement(tok)
+ case ASTKindSectionStatement:
+ k.Root.raw = append(k.Root.raw, tok.Raw()...)
+ stmt = k
+ default:
+ return nil, NewParseError(
+ fmt.Sprintf("invalid statement: expected statement: %v", k.Kind),
+ )
+ }
+
+ stack.Push(stmt)
+ case MarkCompleteState:
+ if k.Kind != ASTKindStart {
+ stack.MarkComplete(k)
+ }
+
+ if stack.Len() == 0 {
+ stack.Push(start)
+ }
+ case SkipState:
+ stack.Push(newSkipStatement(k))
+ s.Skip()
+ case CommentState:
+ if k.Kind == ASTKindStart {
+ stack.Push(k)
+ } else {
+ stack.MarkComplete(k)
+ }
+
+ stmt := newCommentStatement(tok)
+ stack.Push(stmt)
+ default:
+ return nil, NewParseError(
+ fmt.Sprintf("invalid state with ASTKind %v and TokenType %v",
+ k.Kind, tok.Type()))
+ }
+
+ if len(tokens) > 0 {
+ tokens = tokens[1:]
+ }
+ }
+
+ // this occurs when a statement has not been completed
+ if stack.top > 1 {
+ return nil, NewParseError(fmt.Sprintf("incomplete ini expression"))
+ }
+
+ // returns a sublist which exludes the start symbol
+ return stack.List(), nil
+}
+
+// trimSpaces will trim spaces on the left and right hand side of
+// the literal.
+func trimSpaces(k AST) AST {
+ // trim left hand side of spaces
+ for i := 0; i < len(k.Root.raw); i++ {
+ if !isWhitespace(k.Root.raw[i]) {
+ break
+ }
+
+ k.Root.raw = k.Root.raw[1:]
+ i--
+ }
+
+ // trim right hand side of spaces
+ for i := len(k.Root.raw) - 1; i >= 0; i-- {
+ if !isWhitespace(k.Root.raw[i]) {
+ break
+ }
+
+ k.Root.raw = k.Root.raw[:len(k.Root.raw)-1]
+ }
+
+ return k
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/literal_tokens.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/literal_tokens.go
new file mode 100644
index 000000000..91b379986
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/literal_tokens.go
@@ -0,0 +1,334 @@
+package ini
+
+import (
+ "fmt"
+ "strconv"
+ "strings"
+)
+
+var (
+ runesTrue = []rune("true")
+ runesFalse = []rune("false")
+)
+
+var literalValues = [][]rune{
+ runesTrue,
+ runesFalse,
+}
+
+func isBoolValue(b []rune) bool {
+ for _, lv := range literalValues {
+ if isLitValue(lv, b) {
+ return true
+ }
+ }
+ return false
+}
+
+func isLitValue(want, have []rune) bool {
+ if len(have) < len(want) {
+ return false
+ }
+
+ for i := 0; i < len(want); i++ {
+ if want[i] != have[i] {
+ return false
+ }
+ }
+
+ return true
+}
+
+// isNumberValue will return whether not the leading characters in
+// a byte slice is a number. A number is delimited by whitespace or
+// the newline token.
+//
+// A number is defined to be in a binary, octal, decimal (int | float), hex format,
+// or in scientific notation.
+func isNumberValue(b []rune) bool {
+ negativeIndex := 0
+ helper := numberHelper{}
+ needDigit := false
+
+ for i := 0; i < len(b); i++ {
+ negativeIndex++
+
+ switch b[i] {
+ case '-':
+ if helper.IsNegative() || negativeIndex != 1 {
+ return false
+ }
+ helper.Determine(b[i])
+ needDigit = true
+ continue
+ case 'e', 'E':
+ if err := helper.Determine(b[i]); err != nil {
+ return false
+ }
+ negativeIndex = 0
+ needDigit = true
+ continue
+ case 'b':
+ if helper.numberFormat == hex {
+ break
+ }
+ fallthrough
+ case 'o', 'x':
+ needDigit = true
+ if i == 0 {
+ return false
+ }
+
+ fallthrough
+ case '.':
+ if err := helper.Determine(b[i]); err != nil {
+ return false
+ }
+ needDigit = true
+ continue
+ }
+
+ if i > 0 && (isNewline(b[i:]) || isWhitespace(b[i])) {
+ return !needDigit
+ }
+
+ if !helper.CorrectByte(b[i]) {
+ return false
+ }
+ needDigit = false
+ }
+
+ return !needDigit
+}
+
+func isValid(b []rune) (bool, int, error) {
+ if len(b) == 0 {
+ // TODO: should probably return an error
+ return false, 0, nil
+ }
+
+ return isValidRune(b[0]), 1, nil
+}
+
+func isValidRune(r rune) bool {
+ return r != ':' && r != '=' && r != '[' && r != ']' && r != ' ' && r != '\n'
+}
+
+// ValueType is an enum that will signify what type
+// the Value is
+type ValueType int
+
+func (v ValueType) String() string {
+ switch v {
+ case NoneType:
+ return "NONE"
+ case DecimalType:
+ return "FLOAT"
+ case IntegerType:
+ return "INT"
+ case StringType:
+ return "STRING"
+ case BoolType:
+ return "BOOL"
+ }
+
+ return ""
+}
+
+// ValueType enums
+const (
+ NoneType = ValueType(iota)
+ DecimalType
+ IntegerType
+ StringType
+ QuotedStringType
+ BoolType
+)
+
+// Value is a union container
+type Value struct {
+ Type ValueType
+ raw []rune
+
+ integer int64
+ decimal float64
+ boolean bool
+ str string
+}
+
+func newValue(t ValueType, base int, raw []rune) (Value, error) {
+ v := Value{
+ Type: t,
+ raw: raw,
+ }
+ var err error
+
+ switch t {
+ case DecimalType:
+ v.decimal, err = strconv.ParseFloat(string(raw), 64)
+ case IntegerType:
+ if base != 10 {
+ raw = raw[2:]
+ }
+
+ v.integer, err = strconv.ParseInt(string(raw), base, 64)
+ case StringType:
+ v.str = string(raw)
+ case QuotedStringType:
+ v.str = string(raw[1 : len(raw)-1])
+ case BoolType:
+ v.boolean = runeCompare(v.raw, runesTrue)
+ }
+
+ // issue 2253
+ //
+ // if the value trying to be parsed is too large, then we will use
+ // the 'StringType' and raw value instead.
+ if nerr, ok := err.(*strconv.NumError); ok && nerr.Err == strconv.ErrRange {
+ v.Type = StringType
+ v.str = string(raw)
+ err = nil
+ }
+
+ return v, err
+}
+
+// NewStringValue returns a Value type generated using a string input.
+func NewStringValue(str string) (Value, error) {
+ return newValue(StringType, 10, []rune(str))
+}
+
+// NewIntValue returns a Value type generated using an int64 input.
+func NewIntValue(i int64) (Value, error) {
+ return newValue(IntegerType, 10, []rune{rune(i)})
+}
+
+// Append will append values and change the type to a string
+// type.
+func (v *Value) Append(tok Token) {
+ r := tok.Raw()
+ if v.Type != QuotedStringType {
+ v.Type = StringType
+ r = tok.raw[1 : len(tok.raw)-1]
+ }
+ if tok.Type() != TokenLit {
+ v.raw = append(v.raw, tok.Raw()...)
+ } else {
+ v.raw = append(v.raw, r...)
+ }
+}
+
+func (v Value) String() string {
+ switch v.Type {
+ case DecimalType:
+ return fmt.Sprintf("decimal: %f", v.decimal)
+ case IntegerType:
+ return fmt.Sprintf("integer: %d", v.integer)
+ case StringType:
+ return fmt.Sprintf("string: %s", string(v.raw))
+ case QuotedStringType:
+ return fmt.Sprintf("quoted string: %s", string(v.raw))
+ case BoolType:
+ return fmt.Sprintf("bool: %t", v.boolean)
+ default:
+ return "union not set"
+ }
+}
+
+func newLitToken(b []rune) (Token, int, error) {
+ n := 0
+ var err error
+
+ token := Token{}
+ if b[0] == '"' {
+ n, err = getStringValue(b)
+ if err != nil {
+ return token, n, err
+ }
+
+ token = newToken(TokenLit, b[:n], QuotedStringType)
+ } else if isNumberValue(b) {
+ var base int
+ base, n, err = getNumericalValue(b)
+ if err != nil {
+ return token, 0, err
+ }
+
+ value := b[:n]
+ vType := IntegerType
+ if contains(value, '.') || hasExponent(value) {
+ vType = DecimalType
+ }
+ token = newToken(TokenLit, value, vType)
+ token.base = base
+ } else if isBoolValue(b) {
+ n, err = getBoolValue(b)
+
+ token = newToken(TokenLit, b[:n], BoolType)
+ } else {
+ n, err = getValue(b)
+ token = newToken(TokenLit, b[:n], StringType)
+ }
+
+ return token, n, err
+}
+
+// IntValue returns an integer value
+func (v Value) IntValue() int64 {
+ return v.integer
+}
+
+// FloatValue returns a float value
+func (v Value) FloatValue() float64 {
+ return v.decimal
+}
+
+// BoolValue returns a bool value
+func (v Value) BoolValue() bool {
+ return v.boolean
+}
+
+func isTrimmable(r rune) bool {
+ switch r {
+ case '\n', ' ':
+ return true
+ }
+ return false
+}
+
+// StringValue returns the string value
+func (v Value) StringValue() string {
+ switch v.Type {
+ case StringType:
+ return strings.TrimFunc(string(v.raw), isTrimmable)
+ case QuotedStringType:
+ // preserve all characters in the quotes
+ return string(removeEscapedCharacters(v.raw[1 : len(v.raw)-1]))
+ default:
+ return strings.TrimFunc(string(v.raw), isTrimmable)
+ }
+}
+
+func contains(runes []rune, c rune) bool {
+ for i := 0; i < len(runes); i++ {
+ if runes[i] == c {
+ return true
+ }
+ }
+
+ return false
+}
+
+func runeCompare(v1 []rune, v2 []rune) bool {
+ if len(v1) != len(v2) {
+ return false
+ }
+
+ for i := 0; i < len(v1); i++ {
+ if v1[i] != v2[i] {
+ return false
+ }
+ }
+
+ return true
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/newline_token.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/newline_token.go
new file mode 100644
index 000000000..e52ac399f
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/newline_token.go
@@ -0,0 +1,30 @@
+package ini
+
+func isNewline(b []rune) bool {
+ if len(b) == 0 {
+ return false
+ }
+
+ if b[0] == '\n' {
+ return true
+ }
+
+ if len(b) < 2 {
+ return false
+ }
+
+ return b[0] == '\r' && b[1] == '\n'
+}
+
+func newNewlineToken(b []rune) (Token, int, error) {
+ i := 1
+ if b[0] == '\r' && isNewline(b[1:]) {
+ i++
+ }
+
+ if !isNewline([]rune(b[:i])) {
+ return emptyToken, 0, NewParseError("invalid new line token")
+ }
+
+ return newToken(TokenNL, b[:i], NoneType), i, nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/number_helper.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/number_helper.go
new file mode 100644
index 000000000..a45c0bc56
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/number_helper.go
@@ -0,0 +1,152 @@
+package ini
+
+import (
+ "bytes"
+ "fmt"
+ "strconv"
+)
+
+const (
+ none = numberFormat(iota)
+ binary
+ octal
+ decimal
+ hex
+ exponent
+)
+
+type numberFormat int
+
+// numberHelper is used to dictate what format a number is in
+// and what to do for negative values. Since -1e-4 is a valid
+// number, we cannot just simply check for duplicate negatives.
+type numberHelper struct {
+ numberFormat numberFormat
+
+ negative bool
+ negativeExponent bool
+}
+
+func (b numberHelper) Exists() bool {
+ return b.numberFormat != none
+}
+
+func (b numberHelper) IsNegative() bool {
+ return b.negative || b.negativeExponent
+}
+
+func (b *numberHelper) Determine(c rune) error {
+ if b.Exists() {
+ return NewParseError(fmt.Sprintf("multiple number formats: 0%v", string(c)))
+ }
+
+ switch c {
+ case 'b':
+ b.numberFormat = binary
+ case 'o':
+ b.numberFormat = octal
+ case 'x':
+ b.numberFormat = hex
+ case 'e', 'E':
+ b.numberFormat = exponent
+ case '-':
+ if b.numberFormat != exponent {
+ b.negative = true
+ } else {
+ b.negativeExponent = true
+ }
+ case '.':
+ b.numberFormat = decimal
+ default:
+ return NewParseError(fmt.Sprintf("invalid number character: %v", string(c)))
+ }
+
+ return nil
+}
+
+func (b numberHelper) CorrectByte(c rune) bool {
+ switch {
+ case b.numberFormat == binary:
+ if !isBinaryByte(c) {
+ return false
+ }
+ case b.numberFormat == octal:
+ if !isOctalByte(c) {
+ return false
+ }
+ case b.numberFormat == hex:
+ if !isHexByte(c) {
+ return false
+ }
+ case b.numberFormat == decimal:
+ if !isDigit(c) {
+ return false
+ }
+ case b.numberFormat == exponent:
+ if !isDigit(c) {
+ return false
+ }
+ case b.negativeExponent:
+ if !isDigit(c) {
+ return false
+ }
+ case b.negative:
+ if !isDigit(c) {
+ return false
+ }
+ default:
+ if !isDigit(c) {
+ return false
+ }
+ }
+
+ return true
+}
+
+func (b numberHelper) Base() int {
+ switch b.numberFormat {
+ case binary:
+ return 2
+ case octal:
+ return 8
+ case hex:
+ return 16
+ default:
+ return 10
+ }
+}
+
+func (b numberHelper) String() string {
+ buf := bytes.Buffer{}
+ i := 0
+
+ switch b.numberFormat {
+ case binary:
+ i++
+ buf.WriteString(strconv.Itoa(i) + ": binary format\n")
+ case octal:
+ i++
+ buf.WriteString(strconv.Itoa(i) + ": octal format\n")
+ case hex:
+ i++
+ buf.WriteString(strconv.Itoa(i) + ": hex format\n")
+ case exponent:
+ i++
+ buf.WriteString(strconv.Itoa(i) + ": exponent format\n")
+ default:
+ i++
+ buf.WriteString(strconv.Itoa(i) + ": integer format\n")
+ }
+
+ if b.negative {
+ i++
+ buf.WriteString(strconv.Itoa(i) + ": negative format\n")
+ }
+
+ if b.negativeExponent {
+ i++
+ buf.WriteString(strconv.Itoa(i) + ": negative exponent format\n")
+ }
+
+ return buf.String()
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/op_tokens.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/op_tokens.go
new file mode 100644
index 000000000..8a84c7cbe
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/op_tokens.go
@@ -0,0 +1,39 @@
+package ini
+
+import (
+ "fmt"
+)
+
+var (
+ equalOp = []rune("=")
+ equalColonOp = []rune(":")
+)
+
+func isOp(b []rune) bool {
+ if len(b) == 0 {
+ return false
+ }
+
+ switch b[0] {
+ case '=':
+ return true
+ case ':':
+ return true
+ default:
+ return false
+ }
+}
+
+func newOpToken(b []rune) (Token, int, error) {
+ tok := Token{}
+
+ switch b[0] {
+ case '=':
+ tok = newToken(TokenOp, equalOp, NoneType)
+ case ':':
+ tok = newToken(TokenOp, equalColonOp, NoneType)
+ default:
+ return tok, 0, NewParseError(fmt.Sprintf("unexpected op type, %v", b[0]))
+ }
+ return tok, 1, nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/parse_error.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/parse_error.go
new file mode 100644
index 000000000..30ae0b8f2
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/parse_error.go
@@ -0,0 +1,19 @@
+package ini
+
+// ParseError is an error which is returned during any part of
+// the parsing process.
+type ParseError struct {
+ msg string
+}
+
+// NewParseError will return a new ParseError where message
+// is the description of the error.
+func NewParseError(message string) *ParseError {
+ return &ParseError{
+ msg: message,
+ }
+}
+
+func (err *ParseError) Error() string {
+ return err.msg
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/parse_stack.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/parse_stack.go
new file mode 100644
index 000000000..7f01cf7c7
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/parse_stack.go
@@ -0,0 +1,60 @@
+package ini
+
+import (
+ "bytes"
+ "fmt"
+)
+
+// ParseStack is a stack that contains a container, the stack portion,
+// and the list which is the list of ASTs that have been successfully
+// parsed.
+type ParseStack struct {
+ top int
+ container []AST
+ list []AST
+ index int
+}
+
+func newParseStack(sizeContainer, sizeList int) ParseStack {
+ return ParseStack{
+ container: make([]AST, sizeContainer),
+ list: make([]AST, sizeList),
+ }
+}
+
+// Pop will return and truncate the last container element.
+func (s *ParseStack) Pop() AST {
+ s.top--
+ return s.container[s.top]
+}
+
+// Push will add the new AST to the container
+func (s *ParseStack) Push(ast AST) {
+ s.container[s.top] = ast
+ s.top++
+}
+
+// MarkComplete will append the AST to the list of completed statements
+func (s *ParseStack) MarkComplete(ast AST) {
+ s.list[s.index] = ast
+ s.index++
+}
+
+// List will return the completed statements
+func (s ParseStack) List() []AST {
+ return s.list[:s.index]
+}
+
+// Len will return the length of the container
+func (s *ParseStack) Len() int {
+ return s.top
+}
+
+func (s ParseStack) String() string {
+ buf := bytes.Buffer{}
+ for i, node := range s.list {
+ buf.WriteString(fmt.Sprintf("%d: %v\n", i+1, node))
+ }
+
+ return buf.String()
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/sep_tokens.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/sep_tokens.go
new file mode 100644
index 000000000..f82095ba2
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/sep_tokens.go
@@ -0,0 +1,41 @@
+package ini
+
+import (
+ "fmt"
+)
+
+var (
+ emptyRunes = []rune{}
+)
+
+func isSep(b []rune) bool {
+ if len(b) == 0 {
+ return false
+ }
+
+ switch b[0] {
+ case '[', ']':
+ return true
+ default:
+ return false
+ }
+}
+
+var (
+ openBrace = []rune("[")
+ closeBrace = []rune("]")
+)
+
+func newSepToken(b []rune) (Token, int, error) {
+ tok := Token{}
+
+ switch b[0] {
+ case '[':
+ tok = newToken(TokenSep, openBrace, NoneType)
+ case ']':
+ tok = newToken(TokenSep, closeBrace, NoneType)
+ default:
+ return tok, 0, NewParseError(fmt.Sprintf("unexpected sep type, %v", b[0]))
+ }
+ return tok, 1, nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/skipper.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/skipper.go
new file mode 100644
index 000000000..07e90876a
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/skipper.go
@@ -0,0 +1,45 @@
+package ini
+
+// skipper is used to skip certain blocks of an ini file.
+// Currently skipper is used to skip nested blocks of ini
+// files. See example below
+//
+// [ foo ]
+// nested = ; this section will be skipped
+// a=b
+// c=d
+// bar=baz ; this will be included
+type skipper struct {
+ shouldSkip bool
+ TokenSet bool
+ prevTok Token
+}
+
+func newSkipper() skipper {
+ return skipper{
+ prevTok: emptyToken,
+ }
+}
+
+func (s *skipper) ShouldSkip(tok Token) bool {
+ // should skip state will be modified only if previous token was new line (NL);
+ // and the current token is not WhiteSpace (WS).
+ if s.shouldSkip &&
+ s.prevTok.Type() == TokenNL &&
+ tok.Type() != TokenWS {
+ s.Continue()
+ return false
+ }
+
+ s.prevTok = tok
+ return s.shouldSkip
+}
+
+func (s *skipper) Skip() {
+ s.shouldSkip = true
+}
+
+func (s *skipper) Continue() {
+ s.shouldSkip = false
+ s.prevTok = emptyToken
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/statement.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/statement.go
new file mode 100644
index 000000000..ba0af01b5
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/statement.go
@@ -0,0 +1,35 @@
+package ini
+
+// Statement is an empty AST mostly used for transitioning states.
+func newStatement() AST {
+ return newAST(ASTKindStatement, AST{})
+}
+
+// SectionStatement represents a section AST
+func newSectionStatement(tok Token) AST {
+ return newASTWithRootToken(ASTKindSectionStatement, tok)
+}
+
+// ExprStatement represents a completed expression AST
+func newExprStatement(ast AST) AST {
+ return newAST(ASTKindExprStatement, ast)
+}
+
+// CommentStatement represents a comment in the ini defintion.
+//
+// grammar:
+// comment -> #comment' | ;comment'
+// comment' -> epsilon | value
+func newCommentStatement(tok Token) AST {
+ return newAST(ASTKindCommentStatement, newExpression(tok))
+}
+
+// CompletedSectionStatement represents a completed section
+func newCompletedSectionStatement(ast AST) AST {
+ return newAST(ASTKindCompletedSectionStatement, ast)
+}
+
+// SkipStatement is used to skip whole statements
+func newSkipStatement(ast AST) AST {
+ return newAST(ASTKindSkipStatement, ast)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/value_util.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/value_util.go
new file mode 100644
index 000000000..305999d29
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/value_util.go
@@ -0,0 +1,284 @@
+package ini
+
+import (
+ "fmt"
+)
+
+// getStringValue will return a quoted string and the amount
+// of bytes read
+//
+// an error will be returned if the string is not properly formatted
+func getStringValue(b []rune) (int, error) {
+ if b[0] != '"' {
+ return 0, NewParseError("strings must start with '\"'")
+ }
+
+ endQuote := false
+ i := 1
+
+ for ; i < len(b) && !endQuote; i++ {
+ if escaped := isEscaped(b[:i], b[i]); b[i] == '"' && !escaped {
+ endQuote = true
+ break
+ } else if escaped {
+ /*c, err := getEscapedByte(b[i])
+ if err != nil {
+ return 0, err
+ }
+
+ b[i-1] = c
+ b = append(b[:i], b[i+1:]...)
+ i--*/
+
+ continue
+ }
+ }
+
+ if !endQuote {
+ return 0, NewParseError("missing '\"' in string value")
+ }
+
+ return i + 1, nil
+}
+
+// getBoolValue will return a boolean and the amount
+// of bytes read
+//
+// an error will be returned if the boolean is not of a correct
+// value
+func getBoolValue(b []rune) (int, error) {
+ if len(b) < 4 {
+ return 0, NewParseError("invalid boolean value")
+ }
+
+ n := 0
+ for _, lv := range literalValues {
+ if len(lv) > len(b) {
+ continue
+ }
+
+ if isLitValue(lv, b) {
+ n = len(lv)
+ }
+ }
+
+ if n == 0 {
+ return 0, NewParseError("invalid boolean value")
+ }
+
+ return n, nil
+}
+
+// getNumericalValue will return a numerical string, the amount
+// of bytes read, and the base of the number
+//
+// an error will be returned if the number is not of a correct
+// value
+func getNumericalValue(b []rune) (int, int, error) {
+ if !isDigit(b[0]) {
+ return 0, 0, NewParseError("invalid digit value")
+ }
+
+ i := 0
+ helper := numberHelper{}
+
+loop:
+ for negativeIndex := 0; i < len(b); i++ {
+ negativeIndex++
+
+ if !isDigit(b[i]) {
+ switch b[i] {
+ case '-':
+ if helper.IsNegative() || negativeIndex != 1 {
+ return 0, 0, NewParseError("parse error '-'")
+ }
+
+ n := getNegativeNumber(b[i:])
+ i += (n - 1)
+ helper.Determine(b[i])
+ continue
+ case '.':
+ if err := helper.Determine(b[i]); err != nil {
+ return 0, 0, err
+ }
+ case 'e', 'E':
+ if err := helper.Determine(b[i]); err != nil {
+ return 0, 0, err
+ }
+
+ negativeIndex = 0
+ case 'b':
+ if helper.numberFormat == hex {
+ break
+ }
+ fallthrough
+ case 'o', 'x':
+ if i == 0 && b[i] != '0' {
+ return 0, 0, NewParseError("incorrect base format, expected leading '0'")
+ }
+
+ if i != 1 {
+ return 0, 0, NewParseError(fmt.Sprintf("incorrect base format found %s at %d index", string(b[i]), i))
+ }
+
+ if err := helper.Determine(b[i]); err != nil {
+ return 0, 0, err
+ }
+ default:
+ if isWhitespace(b[i]) {
+ break loop
+ }
+
+ if isNewline(b[i:]) {
+ break loop
+ }
+
+ if !(helper.numberFormat == hex && isHexByte(b[i])) {
+ if i+2 < len(b) && !isNewline(b[i:i+2]) {
+ return 0, 0, NewParseError("invalid numerical character")
+ } else if !isNewline([]rune{b[i]}) {
+ return 0, 0, NewParseError("invalid numerical character")
+ }
+
+ break loop
+ }
+ }
+ }
+ }
+
+ return helper.Base(), i, nil
+}
+
+// isDigit will return whether or not something is an integer
+func isDigit(b rune) bool {
+ return b >= '0' && b <= '9'
+}
+
+func hasExponent(v []rune) bool {
+ return contains(v, 'e') || contains(v, 'E')
+}
+
+func isBinaryByte(b rune) bool {
+ switch b {
+ case '0', '1':
+ return true
+ default:
+ return false
+ }
+}
+
+func isOctalByte(b rune) bool {
+ switch b {
+ case '0', '1', '2', '3', '4', '5', '6', '7':
+ return true
+ default:
+ return false
+ }
+}
+
+func isHexByte(b rune) bool {
+ if isDigit(b) {
+ return true
+ }
+ return (b >= 'A' && b <= 'F') ||
+ (b >= 'a' && b <= 'f')
+}
+
+func getValue(b []rune) (int, error) {
+ i := 0
+
+ for i < len(b) {
+ if isNewline(b[i:]) {
+ break
+ }
+
+ if isOp(b[i:]) {
+ break
+ }
+
+ valid, n, err := isValid(b[i:])
+ if err != nil {
+ return 0, err
+ }
+
+ if !valid {
+ break
+ }
+
+ i += n
+ }
+
+ return i, nil
+}
+
+// getNegativeNumber will return a negative number from a
+// byte slice. This will iterate through all characters until
+// a non-digit has been found.
+func getNegativeNumber(b []rune) int {
+ if b[0] != '-' {
+ return 0
+ }
+
+ i := 1
+ for ; i < len(b); i++ {
+ if !isDigit(b[i]) {
+ return i
+ }
+ }
+
+ return i
+}
+
+// isEscaped will return whether or not the character is an escaped
+// character.
+func isEscaped(value []rune, b rune) bool {
+ if len(value) == 0 {
+ return false
+ }
+
+ switch b {
+ case '\'': // single quote
+ case '"': // quote
+ case 'n': // newline
+ case 't': // tab
+ case '\\': // backslash
+ default:
+ return false
+ }
+
+ return value[len(value)-1] == '\\'
+}
+
+func getEscapedByte(b rune) (rune, error) {
+ switch b {
+ case '\'': // single quote
+ return '\'', nil
+ case '"': // quote
+ return '"', nil
+ case 'n': // newline
+ return '\n', nil
+ case 't': // table
+ return '\t', nil
+ case '\\': // backslash
+ return '\\', nil
+ default:
+ return b, NewParseError(fmt.Sprintf("invalid escaped character %c", b))
+ }
+}
+
+func removeEscapedCharacters(b []rune) []rune {
+ for i := 0; i < len(b); i++ {
+ if isEscaped(b[:i], b[i]) {
+ c, err := getEscapedByte(b[i])
+ if err != nil {
+ return b
+ }
+
+ b[i-1] = c
+ b = append(b[:i], b[i+1:]...)
+ i--
+ }
+ }
+
+ return b
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/visitor.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/visitor.go
new file mode 100644
index 000000000..cfd6fe4d6
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/visitor.go
@@ -0,0 +1,268 @@
+package ini
+
+import (
+ "fmt"
+ "sort"
+ "strings"
+)
+
+// Visitor is an interface used by walkers that will
+// traverse an array of ASTs.
+type Visitor interface {
+ VisitExpr(AST) error
+ VisitStatement(AST) error
+}
+
+// DefaultVisitor is used to visit statements and expressions
+// and ensure that they are both of the correct format.
+// In addition, upon visiting this will build sections and populate
+// the Sections field which can be used to retrieve profile
+// configuration.
+type DefaultVisitor struct {
+
+ // scope is the profile which is being visited
+ scope string
+
+ // path is the file path which the visitor is visiting
+ path string
+
+ // Sections defines list of the profile section
+ Sections Sections
+}
+
+// NewDefaultVisitor returns a DefaultVisitor. It takes in a filepath
+// which points to the file it is visiting.
+func NewDefaultVisitor(filepath string) *DefaultVisitor {
+ return &DefaultVisitor{
+ Sections: Sections{
+ container: map[string]Section{},
+ },
+ path: filepath,
+ }
+}
+
+// VisitExpr visits expressions...
+func (v *DefaultVisitor) VisitExpr(expr AST) error {
+ t := v.Sections.container[v.scope]
+ if t.values == nil {
+ t.values = values{}
+ }
+ if t.SourceFile == nil {
+ t.SourceFile = make(map[string]string, 0)
+ }
+
+ switch expr.Kind {
+ case ASTKindExprStatement:
+ opExpr := expr.GetRoot()
+ switch opExpr.Kind {
+ case ASTKindEqualExpr:
+ children := opExpr.GetChildren()
+ if len(children) <= 1 {
+ return NewParseError("unexpected token type")
+ }
+
+ rhs := children[1]
+
+ if rhs.Root.Type() != TokenLit {
+ return NewParseError("unexpected token type")
+ }
+
+ key := EqualExprKey(opExpr)
+ val, err := newValue(rhs.Root.ValueType, rhs.Root.base, rhs.Root.Raw())
+ if err != nil {
+ return err
+ }
+
+ // lower case key to standardize
+ k := strings.ToLower(key)
+
+ // identify if the section already had this key, append log on section
+ if t.Has(k) {
+ t.Logs = append(t.Logs,
+ fmt.Sprintf("For profile: %v, overriding %v value, "+
+ "with a %v value found in a duplicate profile defined later in the same file %v. \n",
+ t.Name, k, k, v.path))
+ }
+
+ // assign the value
+ t.values[k] = val
+ // update the source file path for region
+ t.SourceFile[k] = v.path
+ default:
+ return NewParseError(fmt.Sprintf("unsupported expression %v", expr))
+ }
+ default:
+ return NewParseError(fmt.Sprintf("unsupported expression %v", expr))
+ }
+
+ v.Sections.container[v.scope] = t
+ return nil
+}
+
+// VisitStatement visits statements...
+func (v *DefaultVisitor) VisitStatement(stmt AST) error {
+ switch stmt.Kind {
+ case ASTKindCompletedSectionStatement:
+ child := stmt.GetRoot()
+ if child.Kind != ASTKindSectionStatement {
+ return NewParseError(fmt.Sprintf("unsupported child statement: %T", child))
+ }
+
+ name := string(child.Root.Raw())
+
+ // trim start and end space
+ name = strings.TrimSpace(name)
+
+ // if has prefix "profile " + [ws+] + "profile-name",
+ // we standardize by removing the [ws+] between prefix and profile-name.
+ if strings.HasPrefix(name, "profile ") {
+ names := strings.SplitN(name, " ", 2)
+ name = names[0] + " " + strings.TrimLeft(names[1], " ")
+ }
+
+ // lower casing name to handle duplicates correctly.
+ name = strings.ToLower(name)
+ // attach profile name on section
+ if !v.Sections.HasSection(name) {
+ v.Sections.container[name] = NewSection(name)
+ }
+ v.scope = name
+ default:
+ return NewParseError(fmt.Sprintf("unsupported statement: %s", stmt.Kind))
+ }
+
+ return nil
+}
+
+// Sections is a map of Section structures that represent
+// a configuration.
+type Sections struct {
+ container map[string]Section
+}
+
+// NewSections returns empty ini Sections
+func NewSections() Sections {
+ return Sections{
+ container: make(map[string]Section, 0),
+ }
+}
+
+// GetSection will return section p. If section p does not exist,
+// false will be returned in the second parameter.
+func (t Sections) GetSection(p string) (Section, bool) {
+ v, ok := t.container[p]
+ return v, ok
+}
+
+// HasSection denotes if Sections consist of a section with
+// provided name.
+func (t Sections) HasSection(p string) bool {
+ _, ok := t.container[p]
+ return ok
+}
+
+// SetSection sets a section value for provided section name.
+func (t Sections) SetSection(p string, v Section) Sections {
+ t.container[p] = v
+ return t
+}
+
+// DeleteSection deletes a section entry/value for provided section name./
+func (t Sections) DeleteSection(p string) {
+ delete(t.container, p)
+}
+
+// values represents a map of union values.
+type values map[string]Value
+
+// List will return a list of all sections that were successfully
+// parsed.
+func (t Sections) List() []string {
+ keys := make([]string, len(t.container))
+ i := 0
+ for k := range t.container {
+ keys[i] = k
+ i++
+ }
+
+ sort.Strings(keys)
+ return keys
+}
+
+// Section contains a name and values. This represent
+// a sectioned entry in a configuration file.
+type Section struct {
+ // Name is the Section profile name
+ Name string
+
+ // values are the values within parsed profile
+ values values
+
+ // Errors is the list of errors
+ Errors []error
+
+ // Logs is the list of logs
+ Logs []string
+
+ // SourceFile is the INI Source file from where this section
+ // was retrieved. They key is the property, value is the
+ // source file the property was retrieved from.
+ SourceFile map[string]string
+}
+
+// NewSection returns an initialize section for the name
+func NewSection(name string) Section {
+ return Section{
+ Name: name,
+ values: values{},
+ SourceFile: map[string]string{},
+ }
+}
+
+// UpdateSourceFile updates source file for a property to provided filepath.
+func (t Section) UpdateSourceFile(property string, filepath string) {
+ t.SourceFile[property] = filepath
+}
+
+// UpdateValue updates value for a provided key with provided value
+func (t Section) UpdateValue(k string, v Value) error {
+ t.values[k] = v
+ return nil
+}
+
+// Has will return whether or not an entry exists in a given section
+func (t Section) Has(k string) bool {
+ _, ok := t.values[k]
+ return ok
+}
+
+// ValueType will returned what type the union is set to. If
+// k was not found, the NoneType will be returned.
+func (t Section) ValueType(k string) (ValueType, bool) {
+ v, ok := t.values[k]
+ return v.Type, ok
+}
+
+// Bool returns a bool value at k
+func (t Section) Bool(k string) bool {
+ return t.values[k].BoolValue()
+}
+
+// Int returns an integer value at k
+func (t Section) Int(k string) int64 {
+ return t.values[k].IntValue()
+}
+
+// Float64 returns a float value at k
+func (t Section) Float64(k string) float64 {
+ return t.values[k].FloatValue()
+}
+
+// String returns the string value at k
+func (t Section) String(k string) string {
+ _, ok := t.values[k]
+ if !ok {
+ return ""
+ }
+ return t.values[k].StringValue()
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/walker.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/walker.go
new file mode 100644
index 000000000..99915f7f7
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/walker.go
@@ -0,0 +1,25 @@
+package ini
+
+// Walk will traverse the AST using the v, the Visitor.
+func Walk(tree []AST, v Visitor) error {
+ for _, node := range tree {
+ switch node.Kind {
+ case ASTKindExpr,
+ ASTKindExprStatement:
+
+ if err := v.VisitExpr(node); err != nil {
+ return err
+ }
+ case ASTKindStatement,
+ ASTKindCompletedSectionStatement,
+ ASTKindNestedSectionStatement,
+ ASTKindCompletedNestedSectionStatement:
+
+ if err := v.VisitStatement(node); err != nil {
+ return err
+ }
+ }
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/ws_token.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/ws_token.go
new file mode 100644
index 000000000..7ffb4ae06
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/ws_token.go
@@ -0,0 +1,24 @@
+package ini
+
+import (
+ "unicode"
+)
+
+// isWhitespace will return whether or not the character is
+// a whitespace character.
+//
+// Whitespace is defined as a space or tab.
+func isWhitespace(c rune) bool {
+ return unicode.IsSpace(c) && c != '\n' && c != '\r'
+}
+
+func newWSToken(b []rune) (Token, int, error) {
+ i := 0
+ for ; i < len(b); i++ {
+ if !isWhitespace(b[i]) {
+ break
+ }
+ }
+
+ return newToken(TokenWS, b[:i], NoneType), i, nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/rand/rand.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/rand/rand.go
new file mode 100644
index 000000000..9791ea590
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/rand/rand.go
@@ -0,0 +1,33 @@
+package rand
+
+import (
+ "crypto/rand"
+ "fmt"
+ "io"
+ "math/big"
+)
+
+func init() {
+ Reader = rand.Reader
+}
+
+// Reader provides a random reader that can reset during testing.
+var Reader io.Reader
+
+var floatMaxBigInt = big.NewInt(1 << 53)
+
+// Float64 returns a float64 read from an io.Reader source. The returned float will be between [0.0, 1.0).
+func Float64(reader io.Reader) (float64, error) {
+ bi, err := rand.Int(reader, floatMaxBigInt)
+ if err != nil {
+ return 0, fmt.Errorf("failed to read random value, %v", err)
+ }
+
+ return float64(bi.Int64()) / (1 << 53), nil
+}
+
+// CryptoRandFloat64 returns a random float64 obtained from the crypto rand
+// source.
+func CryptoRandFloat64() (float64, error) {
+ return Float64(rand.Reader)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/sdk/interfaces.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/sdk/interfaces.go
new file mode 100644
index 000000000..2b42cbe64
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/sdk/interfaces.go
@@ -0,0 +1,9 @@
+package sdk
+
+// Invalidator provides access to a type's invalidate method to make it
+// invalidate it cache.
+//
+// e.g aws.SafeCredentialsProvider's Invalidate method.
+type Invalidator interface {
+ Invalidate()
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/sdk/time.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/sdk/time.go
new file mode 100644
index 000000000..8e8dabad5
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/sdk/time.go
@@ -0,0 +1,74 @@
+package sdk
+
+import (
+ "context"
+ "time"
+)
+
+func init() {
+ NowTime = time.Now
+ Sleep = time.Sleep
+ SleepWithContext = sleepWithContext
+}
+
+// NowTime is a value for getting the current time. This value can be overridden
+// for testing mocking out current time.
+var NowTime func() time.Time
+
+// Sleep is a value for sleeping for a duration. This value can be overridden
+// for testing and mocking out sleep duration.
+var Sleep func(time.Duration)
+
+// SleepWithContext will wait for the timer duration to expire, or the context
+// is canceled. Which ever happens first. If the context is canceled the Context's
+// error will be returned.
+//
+// This value can be overridden for testing and mocking out sleep duration.
+var SleepWithContext func(context.Context, time.Duration) error
+
+// sleepWithContext will wait for the timer duration to expire, or the context
+// is canceled. Which ever happens first. If the context is canceled the
+// Context's error will be returned.
+func sleepWithContext(ctx context.Context, dur time.Duration) error {
+ t := time.NewTimer(dur)
+ defer t.Stop()
+
+ select {
+ case <-t.C:
+ break
+ case <-ctx.Done():
+ return ctx.Err()
+ }
+
+ return nil
+}
+
+// noOpSleepWithContext does nothing, returns immediately.
+func noOpSleepWithContext(context.Context, time.Duration) error {
+ return nil
+}
+
+func noOpSleep(time.Duration) {}
+
+// TestingUseNopSleep is a utility for disabling sleep across the SDK for
+// testing.
+func TestingUseNopSleep() func() {
+ SleepWithContext = noOpSleepWithContext
+ Sleep = noOpSleep
+
+ return func() {
+ SleepWithContext = sleepWithContext
+ Sleep = time.Sleep
+ }
+}
+
+// TestingUseReferenceTime is a utility for swapping the time function across the SDK to return a specific reference time
+// for testing purposes.
+func TestingUseReferenceTime(referenceTime time.Time) func() {
+ NowTime = func() time.Time {
+ return referenceTime
+ }
+ return func() {
+ NowTime = time.Now
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/sdkio/byte.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/sdkio/byte.go
new file mode 100644
index 000000000..6c443988b
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/sdkio/byte.go
@@ -0,0 +1,12 @@
+package sdkio
+
+const (
+ // Byte is 8 bits
+ Byte int64 = 1
+ // KibiByte (KiB) is 1024 Bytes
+ KibiByte = Byte * 1024
+ // MebiByte (MiB) is 1024 KiB
+ MebiByte = KibiByte * 1024
+ // GibiByte (GiB) is 1024 MiB
+ GibiByte = MebiByte * 1024
+)
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/sdkio/ringbuffer.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/sdkio/ringbuffer.go
new file mode 100644
index 000000000..1389bf0bc
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/sdkio/ringbuffer.go
@@ -0,0 +1,72 @@
+package sdkio
+
+import "io"
+
+// RingBuffer struct satisfies io.ReadWrite interface.
+//
+// ReadBuffer is a revolving buffer data structure, which can be used to store snapshots of data in a
+// revolving window.
+type RingBuffer struct {
+ slice []byte
+ start int
+ end int
+ size int
+}
+
+// NewRingBuffer method takes in a byte slice as an input and returns a RingBuffer.
+func NewRingBuffer(slice []byte) *RingBuffer {
+ ringBuf := RingBuffer{
+ slice: slice,
+ }
+ return &ringBuf
+}
+
+// Write method inserts the elements in a byte slice, and returns the number of bytes written along with an error.
+func (r *RingBuffer) Write(p []byte) (int, error) {
+ for _, b := range p {
+ // check if end points to invalid index, we need to circle back
+ if r.end == len(r.slice) {
+ r.end = 0
+ }
+ // check if start points to invalid index, we need to circle back
+ if r.start == len(r.slice) {
+ r.start = 0
+ }
+ // if ring buffer is filled, increment the start index
+ if r.size == len(r.slice) {
+ r.size--
+ r.start++
+ }
+
+ r.slice[r.end] = b
+ r.end++
+ r.size++
+ }
+ return r.size, nil
+}
+
+// Read copies the data on the ring buffer into the byte slice provided to the method.
+// Returns the read count along with Error encountered while reading
+func (r *RingBuffer) Read(p []byte) (int, error) {
+ // readCount keeps track of the number of bytes read
+ var readCount int
+ for j := 0; j < len(p); j++ {
+ // if ring buffer is empty or completely read
+ // return EOF error.
+ if r.size == 0 {
+ return readCount, io.EOF
+ }
+
+ p[j] = r.slice[r.start]
+ readCount++
+ // increment the start pointer for ring buffer
+ r.start++
+ // decrement the size of ring buffer
+ r.size--
+
+ if r.start == len(r.slice) {
+ r.start = 0
+ }
+ }
+ return readCount, nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/strings/strings.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/strings/strings.go
new file mode 100644
index 000000000..d008ae27c
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/strings/strings.go
@@ -0,0 +1,11 @@
+package strings
+
+import (
+ "strings"
+)
+
+// HasPrefixFold tests whether the string s begins with prefix, interpreted as UTF-8 strings,
+// under Unicode case-folding.
+func HasPrefixFold(s, prefix string) bool {
+ return len(s) >= len(prefix) && strings.EqualFold(s[0:len(prefix)], prefix)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/sync/singleflight/LICENSE b/vendor/github.com/aws/aws-sdk-go-v2/internal/sync/singleflight/LICENSE
new file mode 100644
index 000000000..6a66aea5e
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/sync/singleflight/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2009 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/sync/singleflight/singleflight.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/sync/singleflight/singleflight.go
new file mode 100644
index 000000000..14ad0c589
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/sync/singleflight/singleflight.go
@@ -0,0 +1,120 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package singleflight provides a duplicate function call suppression
+// mechanism.
+package singleflight
+
+import "sync"
+
+// call is an in-flight or completed singleflight.Do call
+type call struct {
+ wg sync.WaitGroup
+
+ // These fields are written once before the WaitGroup is done
+ // and are only read after the WaitGroup is done.
+ val interface{}
+ err error
+
+ // forgotten indicates whether Forget was called with this call's key
+ // while the call was still in flight.
+ forgotten bool
+
+ // These fields are read and written with the singleflight
+ // mutex held before the WaitGroup is done, and are read but
+ // not written after the WaitGroup is done.
+ dups int
+ chans []chan<- Result
+}
+
+// Group represents a class of work and forms a namespace in
+// which units of work can be executed with duplicate suppression.
+type Group struct {
+ mu sync.Mutex // protects m
+ m map[string]*call // lazily initialized
+}
+
+// Result holds the results of Do, so they can be passed
+// on a channel.
+type Result struct {
+ Val interface{}
+ Err error
+ Shared bool
+}
+
+// Do executes and returns the results of the given function, making
+// sure that only one execution is in-flight for a given key at a
+// time. If a duplicate comes in, the duplicate caller waits for the
+// original to complete and receives the same results.
+// The return value shared indicates whether v was given to multiple callers.
+func (g *Group) Do(key string, fn func() (interface{}, error)) (v interface{}, err error, shared bool) {
+ g.mu.Lock()
+ if g.m == nil {
+ g.m = make(map[string]*call)
+ }
+ if c, ok := g.m[key]; ok {
+ c.dups++
+ g.mu.Unlock()
+ c.wg.Wait()
+ return c.val, c.err, true
+ }
+ c := new(call)
+ c.wg.Add(1)
+ g.m[key] = c
+ g.mu.Unlock()
+
+ g.doCall(c, key, fn)
+ return c.val, c.err, c.dups > 0
+}
+
+// DoChan is like Do but returns a channel that will receive the
+// results when they are ready.
+func (g *Group) DoChan(key string, fn func() (interface{}, error)) <-chan Result {
+ ch := make(chan Result, 1)
+ g.mu.Lock()
+ if g.m == nil {
+ g.m = make(map[string]*call)
+ }
+ if c, ok := g.m[key]; ok {
+ c.dups++
+ c.chans = append(c.chans, ch)
+ g.mu.Unlock()
+ return ch
+ }
+ c := &call{chans: []chan<- Result{ch}}
+ c.wg.Add(1)
+ g.m[key] = c
+ g.mu.Unlock()
+
+ go g.doCall(c, key, fn)
+
+ return ch
+}
+
+// doCall handles the single call for a key.
+func (g *Group) doCall(c *call, key string, fn func() (interface{}, error)) {
+ c.val, c.err = fn()
+ c.wg.Done()
+
+ g.mu.Lock()
+ if !c.forgotten {
+ delete(g.m, key)
+ }
+ for _, ch := range c.chans {
+ ch <- Result{c.val, c.err, c.dups > 0}
+ }
+ g.mu.Unlock()
+}
+
+// Forget tells the singleflight to forget about a key. Future calls
+// to Do for this key will call the function rather than waiting for
+// an earlier call to complete.
+func (g *Group) Forget(key string) {
+ g.mu.Lock()
+ if c, ok := g.m[key]; ok {
+ c.forgotten = true
+ }
+ delete(g.m, key)
+ g.mu.Unlock()
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/timeconv/duration.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/timeconv/duration.go
new file mode 100644
index 000000000..5d69db5f2
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/timeconv/duration.go
@@ -0,0 +1,13 @@
+package timeconv
+
+import "time"
+
+// FloatSecondsDur converts a fractional seconds to duration.
+func FloatSecondsDur(v float64) time.Duration {
+ return time.Duration(v * float64(time.Second))
+}
+
+// DurSecondsFloat converts a duration into fractional seconds.
+func DurSecondsFloat(d time.Duration) float64 {
+ return float64(d) / float64(time.Second)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/LICENSE.txt b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/LICENSE.txt
new file mode 100644
index 000000000..d64569567
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/LICENSE.txt
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/accept_encoding_gzip.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/accept_encoding_gzip.go
new file mode 100644
index 000000000..3f451fc9b
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/accept_encoding_gzip.go
@@ -0,0 +1,176 @@
+package acceptencoding
+
+import (
+ "compress/gzip"
+ "context"
+ "fmt"
+ "io"
+
+ "github.com/aws/smithy-go"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+const acceptEncodingHeaderKey = "Accept-Encoding"
+const contentEncodingHeaderKey = "Content-Encoding"
+
+// AddAcceptEncodingGzipOptions provides the options for the
+// AddAcceptEncodingGzip middleware setup.
+type AddAcceptEncodingGzipOptions struct {
+ Enable bool
+}
+
+// AddAcceptEncodingGzip explicitly adds handling for accept-encoding GZIP
+// middleware to the operation stack. This allows checksums to be correctly
+// computed without disabling GZIP support.
+func AddAcceptEncodingGzip(stack *middleware.Stack, options AddAcceptEncodingGzipOptions) error {
+ if options.Enable {
+ if err := stack.Finalize.Add(&EnableGzip{}, middleware.Before); err != nil {
+ return err
+ }
+ if err := stack.Deserialize.Insert(&DecompressGzip{}, "OperationDeserializer", middleware.After); err != nil {
+ return err
+ }
+ return nil
+ }
+
+ return stack.Finalize.Add(&DisableGzip{}, middleware.Before)
+}
+
+// DisableGzip provides the middleware that will
+// disable the underlying http client automatically enabling for gzip
+// decompress content-encoding support.
+type DisableGzip struct{}
+
+// ID returns the id for the middleware.
+func (*DisableGzip) ID() string {
+ return "DisableAcceptEncodingGzip"
+}
+
+// HandleFinalize implements the FinalizeMiddleware interface.
+func (*DisableGzip) HandleFinalize(
+ ctx context.Context, input middleware.FinalizeInput, next middleware.FinalizeHandler,
+) (
+ output middleware.FinalizeOutput, metadata middleware.Metadata, err error,
+) {
+ req, ok := input.Request.(*smithyhttp.Request)
+ if !ok {
+ return output, metadata, &smithy.SerializationError{
+ Err: fmt.Errorf("unknown request type %T", input.Request),
+ }
+ }
+
+ // Explicitly enable gzip support, this will prevent the http client from
+ // auto extracting the zipped content.
+ req.Header.Set(acceptEncodingHeaderKey, "identity")
+
+ return next.HandleFinalize(ctx, input)
+}
+
+// EnableGzip provides a middleware to enable support for
+// gzip responses, with manual decompression. This prevents the underlying HTTP
+// client from performing the gzip decompression automatically.
+type EnableGzip struct{}
+
+// ID returns the id for the middleware.
+func (*EnableGzip) ID() string {
+ return "AcceptEncodingGzip"
+}
+
+// HandleFinalize implements the FinalizeMiddleware interface.
+func (*EnableGzip) HandleFinalize(
+ ctx context.Context, input middleware.FinalizeInput, next middleware.FinalizeHandler,
+) (
+ output middleware.FinalizeOutput, metadata middleware.Metadata, err error,
+) {
+ req, ok := input.Request.(*smithyhttp.Request)
+ if !ok {
+ return output, metadata, &smithy.SerializationError{
+ Err: fmt.Errorf("unknown request type %T", input.Request),
+ }
+ }
+
+ // Explicitly enable gzip support, this will prevent the http client from
+ // auto extracting the zipped content.
+ req.Header.Set(acceptEncodingHeaderKey, "gzip")
+
+ return next.HandleFinalize(ctx, input)
+}
+
+// DecompressGzip provides the middleware for decompressing a gzip
+// response from the service.
+type DecompressGzip struct{}
+
+// ID returns the id for the middleware.
+func (*DecompressGzip) ID() string {
+ return "DecompressGzip"
+}
+
+// HandleDeserialize implements the DeserializeMiddlware interface.
+func (*DecompressGzip) HandleDeserialize(
+ ctx context.Context, input middleware.DeserializeInput, next middleware.DeserializeHandler,
+) (
+ output middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ output, metadata, err = next.HandleDeserialize(ctx, input)
+ if err != nil {
+ return output, metadata, err
+ }
+
+ resp, ok := output.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return output, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("unknown response type %T", output.RawResponse),
+ }
+ }
+ if v := resp.Header.Get(contentEncodingHeaderKey); v != "gzip" {
+ return output, metadata, err
+ }
+
+ // Clear content length since it will no longer be valid once the response
+ // body is decompressed.
+ resp.Header.Del("Content-Length")
+ resp.ContentLength = -1
+
+ resp.Body = wrapGzipReader(resp.Body)
+
+ return output, metadata, err
+}
+
+type gzipReader struct {
+ reader io.ReadCloser
+ gzip *gzip.Reader
+}
+
+func wrapGzipReader(reader io.ReadCloser) *gzipReader {
+ return &gzipReader{
+ reader: reader,
+ }
+}
+
+// Read wraps the gzip reader around the underlying io.Reader to extract the
+// response bytes on the fly.
+func (g *gzipReader) Read(b []byte) (n int, err error) {
+ if g.gzip == nil {
+ g.gzip, err = gzip.NewReader(g.reader)
+ if err != nil {
+ g.gzip = nil // ensure uninitialized gzip value isn't used in close.
+ return 0, fmt.Errorf("failed to decompress gzip response, %w", err)
+ }
+ }
+
+ return g.gzip.Read(b)
+}
+
+func (g *gzipReader) Close() error {
+ if g.gzip == nil {
+ return nil
+ }
+
+ if err := g.gzip.Close(); err != nil {
+ g.reader.Close()
+ return fmt.Errorf("failed to decompress gzip response, %w", err)
+ }
+
+ return g.reader.Close()
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/doc.go
new file mode 100644
index 000000000..3ffac0131
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/doc.go
@@ -0,0 +1,23 @@
+/*
+Package acceptencoding provides customizations associated with Accept Encoding Header.
+
+Accept encoding gzip
+
+The Go HTTP client automatically supports accept-encoding and content-encoding
+gzip by default. This default behavior is not desired by the SDK, and prevents
+validating the response body's checksum. To prevent this the SDK must manually
+control usage of content-encoding gzip.
+
+To control content-encoding, the SDK must always set the `Accept-Encoding`
+header to a value. This prevents the HTTP client from using gzip automatically.
+When gzip is enabled on the API client, the SDK's customization will control
+decompressing the gzip data in order to not break the checksum validation. When
+gzip is disabled, the API client will disable gzip, preventing the HTTP
+client's default behavior.
+
+An `EnableAcceptEncodingGzip` option may or may not be present depending on the client using
+the below middleware. The option if present can be used to enable auto decompressing
+gzip by the SDK.
+
+*/
+package acceptencoding
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/go.mod b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/go.mod
new file mode 100644
index 000000000..fb67aab70
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/go.mod
@@ -0,0 +1,5 @@
+module github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding
+
+go 1.15
+
+require github.com/aws/smithy-go v1.2.0
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/go.sum b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/go.sum
new file mode 100644
index 000000000..74b970643
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/go.sum
@@ -0,0 +1,6 @@
+github.com/aws/smithy-go v1.2.0 h1:0PoGBWXkXDIyVdPaZW9gMhaGzj3UOAgTdiVoHuuZAFA=
+github.com/aws/smithy-go v1.2.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAmR5n+E=
+github.com/google/go-cmp v0.5.4 h1:L8R9j+yAqZuZjsqh/z+F1NCffTKKLShY6zXTItVIZ8M=
+github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4=
+golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/LICENSE.txt b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/LICENSE.txt
new file mode 100644
index 000000000..d64569567
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/LICENSE.txt
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/context.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/context.go
new file mode 100644
index 000000000..cc919701a
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/context.go
@@ -0,0 +1,48 @@
+package presignedurl
+
+import (
+ "context"
+
+ "github.com/aws/smithy-go/middleware"
+)
+
+// WithIsPresigning adds the isPresigning sentinel value to a context to signal
+// that the middleware stack is using the presign flow.
+//
+// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues
+// to clear all stack values.
+func WithIsPresigning(ctx context.Context) context.Context {
+ return middleware.WithStackValue(ctx, isPresigningKey{}, true)
+}
+
+// GetIsPresigning returns if the context contains the isPresigning sentinel
+// value for presigning flows.
+//
+// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues
+// to clear all stack values.
+func GetIsPresigning(ctx context.Context) bool {
+ v, _ := middleware.GetStackValue(ctx, isPresigningKey{}).(bool)
+ return v
+}
+
+type isPresigningKey struct{}
+
+// AddAsIsPresigingMiddleware adds a middleware to the head of the stack that
+// will update the stack's context to be flagged as being invoked for the
+// purpose of presigning.
+func AddAsIsPresigingMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(asIsPresigningMiddleware{}, middleware.Before)
+}
+
+type asIsPresigningMiddleware struct{}
+
+func (asIsPresigningMiddleware) ID() string { return "AsIsPresigningMiddleware" }
+
+func (asIsPresigningMiddleware) HandleInitialize(
+ ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler,
+) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ ctx = WithIsPresigning(ctx)
+ return next.HandleInitialize(ctx, in)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/doc.go
new file mode 100644
index 000000000..1b85375cf
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/doc.go
@@ -0,0 +1,3 @@
+// Package presignedurl provides the customizations for API clients to fill in
+// presigned URLs into input parameters.
+package presignedurl
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/go.mod b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/go.mod
new file mode 100644
index 000000000..e0517203d
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/go.mod
@@ -0,0 +1,11 @@
+module github.com/aws/aws-sdk-go-v2/service/internal/presigned-url
+
+go 1.15
+
+require (
+ github.com/aws/aws-sdk-go-v2 v1.2.1
+ github.com/aws/smithy-go v1.2.0
+ github.com/google/go-cmp v0.5.4
+)
+
+replace github.com/aws/aws-sdk-go-v2 => ../../../
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/go.sum b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/go.sum
new file mode 100644
index 000000000..c3783ae60
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/go.sum
@@ -0,0 +1,13 @@
+github.com/aws/smithy-go v1.2.0 h1:0PoGBWXkXDIyVdPaZW9gMhaGzj3UOAgTdiVoHuuZAFA=
+github.com/aws/smithy-go v1.2.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAmR5n+E=
+github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/google/go-cmp v0.5.4 h1:L8R9j+yAqZuZjsqh/z+F1NCffTKKLShY6zXTItVIZ8M=
+github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
+github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U=
+github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4=
+golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/middleware.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/middleware.go
new file mode 100644
index 000000000..1e2f5c812
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/middleware.go
@@ -0,0 +1,110 @@
+package presignedurl
+
+import (
+ "context"
+ "fmt"
+
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ v4 "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
+
+ "github.com/aws/smithy-go/middleware"
+)
+
+// URLPresigner provides the interface to presign the input parameters in to a
+// presigned URL.
+type URLPresigner interface {
+ // PresignURL presigns a URL.
+ PresignURL(ctx context.Context, srcRegion string, params interface{}) (*v4.PresignedHTTPRequest, error)
+}
+
+// ParameterAccessor provides an collection of accessor to for retrieving and
+// setting the values needed to PresignedURL generation
+type ParameterAccessor struct {
+ // GetPresignedURL accessor points to a function that retrieves a presigned url if present
+ GetPresignedURL func(interface{}) (string, bool, error)
+
+ // GetSourceRegion accessor points to a function that retrieves source region for presigned url
+ GetSourceRegion func(interface{}) (string, bool, error)
+
+ // CopyInput accessor points to a function that takes in an input, and returns a copy.
+ CopyInput func(interface{}) (interface{}, error)
+
+ // SetDestinationRegion accessor points to a function that sets destination region on api input struct
+ SetDestinationRegion func(interface{}, string) error
+
+ // SetPresignedURL accessor points to a function that sets presigned url on api input struct
+ SetPresignedURL func(interface{}, string) error
+}
+
+// Options provides the set of options needed by the presigned URL middleware.
+type Options struct {
+ // Accessor are the parameter accessors used by this middleware
+ Accessor ParameterAccessor
+
+ // Presigner is the URLPresigner used by the middleware
+ Presigner URLPresigner
+}
+
+// AddMiddleware adds the Presign URL middleware to the middleware stack.
+func AddMiddleware(stack *middleware.Stack, opts Options) error {
+ return stack.Initialize.Add(&presign{options: opts}, middleware.Before)
+}
+
+// RemoveMiddleware removes the Presign URL middleware from the stack.
+func RemoveMiddleware(stack *middleware.Stack) error {
+ _, err := stack.Initialize.Remove((*presign)(nil).ID())
+ return err
+}
+
+type presign struct {
+ options Options
+}
+
+func (m *presign) ID() string { return "Presign" }
+
+func (m *presign) HandleInitialize(
+ ctx context.Context, input middleware.InitializeInput, next middleware.InitializeHandler,
+) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ // If PresignedURL is already set ignore middleware.
+ if _, ok, err := m.options.Accessor.GetPresignedURL(input.Parameters); err != nil {
+ return out, metadata, fmt.Errorf("presign middleware failed, %w", err)
+ } else if ok {
+ return next.HandleInitialize(ctx, input)
+ }
+
+ // If have source region is not set ignore middleware.
+ srcRegion, ok, err := m.options.Accessor.GetSourceRegion(input.Parameters)
+ if err != nil {
+ return out, metadata, fmt.Errorf("presign middleware failed, %w", err)
+ } else if !ok || len(srcRegion) == 0 {
+ return next.HandleInitialize(ctx, input)
+ }
+
+ // Create a copy of the original input so the destination region value can
+ // be added. This ensures that value does not leak into the original
+ // request parameters.
+ paramCpy, err := m.options.Accessor.CopyInput(input.Parameters)
+ if err != nil {
+ return out, metadata, fmt.Errorf("unable to create presigned URL, %w", err)
+ }
+
+ // Destination region is the API client's configured region.
+ dstRegion := awsmiddleware.GetRegion(ctx)
+ if err = m.options.Accessor.SetDestinationRegion(paramCpy, dstRegion); err != nil {
+ return out, metadata, fmt.Errorf("presign middleware failed, %w", err)
+ }
+
+ presignedReq, err := m.options.Presigner.PresignURL(ctx, srcRegion, paramCpy)
+ if err != nil {
+ return out, metadata, fmt.Errorf("unable to create presigned URL, %w", err)
+ }
+
+ // Update the original input with the presigned URL value.
+ if err = m.options.Accessor.SetPresignedURL(input.Parameters, presignedReq.URL); err != nil {
+ return out, metadata, fmt.Errorf("presign middleware failed, %w", err)
+ }
+
+ return next.HandleInitialize(ctx, input)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/LICENSE.txt b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/LICENSE.txt
new file mode 100644
index 000000000..d64569567
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/LICENSE.txt
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/arn/accesspoint_arn.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/arn/accesspoint_arn.go
new file mode 100644
index 000000000..93e358a4a
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/arn/accesspoint_arn.go
@@ -0,0 +1,50 @@
+package arn
+
+import (
+ "strings"
+
+ "github.com/aws/aws-sdk-go-v2/aws/arn"
+)
+
+// AccessPointARN provides representation
+type AccessPointARN struct {
+ arn.ARN
+ AccessPointName string
+}
+
+// GetARN returns the base ARN for the Access Point resource
+func (a AccessPointARN) GetARN() arn.ARN {
+ return a.ARN
+}
+
+// ParseAccessPointResource attempts to parse the ARN's resource as an
+// AccessPoint resource.
+//
+// Supported Access point resource format:
+// - Access point format: arn:{partition}:s3:{region}:{accountId}:accesspoint/{accesspointName}
+// - example: arn.aws.s3.us-west-2.012345678901:accesspoint/myaccesspoint
+//
+func ParseAccessPointResource(a arn.ARN, resParts []string) (AccessPointARN, error) {
+ if len(a.Region) == 0 {
+ return AccessPointARN{}, InvalidARNError{ARN: a, Reason: "region not set"}
+ }
+ if len(a.AccountID) == 0 {
+ return AccessPointARN{}, InvalidARNError{ARN: a, Reason: "account-id not set"}
+ }
+ if len(resParts) == 0 {
+ return AccessPointARN{}, InvalidARNError{ARN: a, Reason: "resource-id not set"}
+ }
+ if len(resParts) > 1 {
+ return AccessPointARN{}, InvalidARNError{ARN: a, Reason: "sub resource not supported"}
+ }
+
+ resID := resParts[0]
+ if len(strings.TrimSpace(resID)) == 0 {
+ return AccessPointARN{}, InvalidARNError{ARN: a, Reason: "resource-id not set"}
+ }
+
+ return AccessPointARN{
+ ARN: a,
+ AccessPointName: resID,
+ }, nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/arn/arn.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/arn/arn.go
new file mode 100644
index 000000000..c61534646
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/arn/arn.go
@@ -0,0 +1,69 @@
+package arn
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/aws/aws-sdk-go-v2/aws/arn"
+)
+
+// Resource provides the interfaces abstracting ARNs of specific resource
+// types.
+type Resource interface {
+ GetARN() arn.ARN
+ String() string
+}
+
+// ResourceParser provides the function for parsing an ARN's resource
+// component into a typed resource.
+type ResourceParser func(arn.ARN) (Resource, error)
+
+// ParseResource parses an AWS ARN into a typed resource for the S3 API.
+func ParseResource(a arn.ARN, resParser ResourceParser) (resARN Resource, err error) {
+ if len(a.Partition) == 0 {
+ return nil, InvalidARNError{ARN: a, Reason: "partition not set"}
+ }
+
+ if a.Service != "s3" && a.Service != "s3-outposts" {
+ return nil, InvalidARNError{ARN: a, Reason: "service is not supported"}
+ }
+ if len(a.Resource) == 0 {
+ return nil, InvalidARNError{ARN: a, Reason: "resource not set"}
+ }
+
+ return resParser(a)
+}
+
+// SplitResource splits the resource components by the ARN resource delimiters.
+func SplitResource(v string) []string {
+ var parts []string
+ var offset int
+
+ for offset <= len(v) {
+ idx := strings.IndexAny(v[offset:], "/:")
+ if idx < 0 {
+ parts = append(parts, v[offset:])
+ break
+ }
+ parts = append(parts, v[offset:idx+offset])
+ offset += idx + 1
+ }
+
+ return parts
+}
+
+// IsARN returns whether the given string is an ARN
+func IsARN(s string) bool {
+ return arn.IsARN(s)
+}
+
+// InvalidARNError provides the error for an invalid ARN error.
+type InvalidARNError struct {
+ ARN arn.ARN
+ Reason string
+}
+
+// Error returns a string denoting the occurred InvalidARNError
+func (e InvalidARNError) Error() string {
+ return fmt.Sprintf("invalid Amazon %s ARN, %s, %s", e.ARN.Service, e.Reason, e.ARN.String())
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/arn/outpost_arn.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/arn/outpost_arn.go
new file mode 100644
index 000000000..66a0291fd
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/arn/outpost_arn.go
@@ -0,0 +1,126 @@
+package arn
+
+import (
+ "strings"
+
+ "github.com/aws/aws-sdk-go-v2/aws/arn"
+)
+
+// OutpostARN interface that should be satisfied by outpost ARNs
+type OutpostARN interface {
+ Resource
+ GetOutpostID() string
+}
+
+// ParseOutpostARNResource will parse a provided ARNs resource using the appropriate ARN format
+// and return a specific OutpostARN type
+//
+// Currently supported outpost ARN formats:
+// * Outpost AccessPoint ARN format:
+// - ARN format: arn:{partition}:s3-outposts:{region}:{accountId}:outpost/{outpostId}/accesspoint/{accesspointName}
+// - example: arn:aws:s3-outposts:us-west-2:012345678901:outpost/op-1234567890123456/accesspoint/myaccesspoint
+//
+// * Outpost Bucket ARN format:
+// - ARN format: arn:{partition}:s3-outposts:{region}:{accountId}:outpost/{outpostId}/bucket/{bucketName}
+// - example: arn:aws:s3-outposts:us-west-2:012345678901:outpost/op-1234567890123456/bucket/mybucket
+//
+// Other outpost ARN formats may be supported and added in the future.
+//
+func ParseOutpostARNResource(a arn.ARN, resParts []string) (OutpostARN, error) {
+ if len(a.Region) == 0 {
+ return nil, InvalidARNError{ARN: a, Reason: "region not set"}
+ }
+
+ if len(a.AccountID) == 0 {
+ return nil, InvalidARNError{ARN: a, Reason: "account-id not set"}
+ }
+
+ // verify if outpost id is present and valid
+ if len(resParts) == 0 || len(strings.TrimSpace(resParts[0])) == 0 {
+ return nil, InvalidARNError{ARN: a, Reason: "outpost resource-id not set"}
+ }
+
+ // verify possible resource type exists
+ if len(resParts) < 3 {
+ return nil, InvalidARNError{
+ ARN: a, Reason: "incomplete outpost resource type. Expected bucket or access-point resource to be present",
+ }
+ }
+
+ // Since we know this is a OutpostARN fetch outpostID
+ outpostID := strings.TrimSpace(resParts[0])
+
+ switch resParts[1] {
+ case "accesspoint":
+ accesspointARN, err := ParseAccessPointResource(a, resParts[2:])
+ if err != nil {
+ return OutpostAccessPointARN{}, err
+ }
+ return OutpostAccessPointARN{
+ AccessPointARN: accesspointARN,
+ OutpostID: outpostID,
+ }, nil
+
+ case "bucket":
+ bucketName, err := parseBucketResource(a, resParts[2:])
+ if err != nil {
+ return nil, err
+ }
+ return OutpostBucketARN{
+ ARN: a,
+ BucketName: bucketName,
+ OutpostID: outpostID,
+ }, nil
+
+ default:
+ return nil, InvalidARNError{ARN: a, Reason: "unknown resource set for outpost ARN"}
+ }
+}
+
+// OutpostAccessPointARN represents outpost access point ARN.
+type OutpostAccessPointARN struct {
+ AccessPointARN
+ OutpostID string
+}
+
+// GetOutpostID returns the outpost id of outpost access point arn
+func (o OutpostAccessPointARN) GetOutpostID() string {
+ return o.OutpostID
+}
+
+// OutpostBucketARN represents the outpost bucket ARN.
+type OutpostBucketARN struct {
+ arn.ARN
+ BucketName string
+ OutpostID string
+}
+
+// GetOutpostID returns the outpost id of outpost bucket arn
+func (o OutpostBucketARN) GetOutpostID() string {
+ return o.OutpostID
+}
+
+// GetARN retrives the base ARN from outpost bucket ARN resource
+func (o OutpostBucketARN) GetARN() arn.ARN {
+ return o.ARN
+}
+
+// parseBucketResource attempts to parse the ARN's bucket resource and retrieve the
+// bucket resource id.
+//
+// parseBucketResource only parses the bucket resource id.
+//
+func parseBucketResource(a arn.ARN, resParts []string) (bucketName string, err error) {
+ if len(resParts) == 0 {
+ return bucketName, InvalidARNError{ARN: a, Reason: "bucket resource-id not set"}
+ }
+ if len(resParts) > 1 {
+ return bucketName, InvalidARNError{ARN: a, Reason: "sub resource not supported"}
+ }
+
+ bucketName = strings.TrimSpace(resParts[0])
+ if len(bucketName) == 0 {
+ return bucketName, InvalidARNError{ARN: a, Reason: "bucket resource-id not set"}
+ }
+ return bucketName, err
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/arn_lookup.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/arn_lookup.go
new file mode 100644
index 000000000..b51532085
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/arn_lookup.go
@@ -0,0 +1,73 @@
+package s3shared
+
+import (
+ "context"
+ "fmt"
+
+ "github.com/aws/smithy-go/middleware"
+
+ "github.com/aws/aws-sdk-go-v2/aws/arn"
+)
+
+// ARNLookup is the initial middleware that looks up if an arn is provided.
+// This middleware is responsible for fetching ARN from a arnable field, and registering the ARN on
+// middleware context. This middleware must be executed before input validation step or any other
+// arn processing middleware.
+type ARNLookup struct {
+
+ // GetARNValue takes in a input interface and returns a ptr to string and a bool
+ GetARNValue func(interface{}) (*string, bool)
+}
+
+// ID for the middleware
+func (m *ARNLookup) ID() string {
+ return "S3Shared:ARNLookup"
+}
+
+// HandleInitialize handles the behavior of this initialize step
+func (m *ARNLookup) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ // check if GetARNValue is supported
+ if m.GetARNValue == nil {
+ return next.HandleInitialize(ctx, in)
+ }
+
+ // check is input resource is an ARN; if not go to next
+ v, ok := m.GetARNValue(in.Parameters)
+ if !ok || v == nil || !arn.IsARN(*v) {
+ return next.HandleInitialize(ctx, in)
+ }
+
+ // if ARN process ResourceRequest and put it on ctx
+ av, err := arn.Parse(*v)
+ if err != nil {
+ return out, metadata, fmt.Errorf("error parsing arn: %w", err)
+ }
+ // set parsed arn on context
+ ctx = setARNResourceOnContext(ctx, av)
+
+ return next.HandleInitialize(ctx, in)
+}
+
+// arnResourceKey is the key set on context used to identify, retrive an ARN resource
+// if present on the context.
+type arnResourceKey struct{}
+
+// SetARNResourceOnContext sets the S3 ARN on the context.
+//
+// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues
+// to clear all stack values.
+func setARNResourceOnContext(ctx context.Context, value arn.ARN) context.Context {
+ return middleware.WithStackValue(ctx, arnResourceKey{}, value)
+}
+
+// GetARNResourceFromContext returns an ARN from context and a bool indicating
+// presence of ARN on ctx.
+//
+// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues
+// to clear all stack values.
+func GetARNResourceFromContext(ctx context.Context) (arn.ARN, bool) {
+ v, ok := middleware.GetStackValue(ctx, arnResourceKey{}).(arn.ARN)
+ return v, ok
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/config/config.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/config/config.go
new file mode 100644
index 000000000..8926e5970
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/config/config.go
@@ -0,0 +1,22 @@
+package config
+
+import "context"
+
+// UseARNRegionProvider is an interface for retrieving external configuration value for UseARNRegion
+type UseARNRegionProvider interface {
+ GetS3UseARNRegion(ctx context.Context) (value bool, found bool, err error)
+}
+
+// ResolveUseARNRegion extracts the first instance of a UseARNRegion from the config slice.
+// Additionally returns a boolean to indicate if the value was found in provided configs, and error if one is encountered.
+func ResolveUseARNRegion(ctx context.Context, configs []interface{}) (value bool, found bool, err error) {
+ for _, cfg := range configs {
+ if p, ok := cfg.(UseARNRegionProvider); ok {
+ value, found, err = p.GetS3UseARNRegion(ctx)
+ if err != nil || found {
+ break
+ }
+ }
+ }
+ return
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/endpoint_error.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/endpoint_error.go
new file mode 100644
index 000000000..46f6197e1
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/endpoint_error.go
@@ -0,0 +1,170 @@
+package s3shared
+
+import (
+ "fmt"
+
+ "github.com/aws/aws-sdk-go-v2/service/internal/s3shared/arn"
+)
+
+// TODO: fix these error statements to be relevant to v2 sdk
+
+const (
+ invalidARNErrorErrCode = "InvalidARNError"
+ configurationErrorErrCode = "ConfigurationError"
+)
+
+// InvalidARNError denotes the error for Invalid ARN
+type InvalidARNError struct {
+ message string
+ resource arn.Resource
+ origErr error
+}
+
+// Error returns the InvalidARN error string
+func (e InvalidARNError) Error() string {
+ var extra string
+ if e.resource != nil {
+ extra = "ARN: " + e.resource.String()
+ }
+ msg := invalidARNErrorErrCode + " : " + e.message
+ if extra != "" {
+ msg = msg + "\n\t" + extra
+ }
+
+ return msg
+}
+
+// OrigErr is the original error wrapped by Invalid ARN Error
+func (e InvalidARNError) Unwrap() error {
+ return e.origErr
+}
+
+// NewInvalidARNError denotes invalid arn error
+func NewInvalidARNError(resource arn.Resource, err error) InvalidARNError {
+ return InvalidARNError{
+ message: "invalid ARN",
+ origErr: err,
+ resource: resource,
+ }
+}
+
+// NewInvalidARNWithUnsupportedPartitionError ARN not supported for the target partition
+func NewInvalidARNWithUnsupportedPartitionError(resource arn.Resource, err error) InvalidARNError {
+ return InvalidARNError{
+ message: "resource ARN not supported for the target ARN partition",
+ origErr: err,
+ resource: resource,
+ }
+}
+
+// NewInvalidARNWithFIPSError ARN not supported for FIPS region
+func NewInvalidARNWithFIPSError(resource arn.Resource, err error) InvalidARNError {
+ return InvalidARNError{
+ message: "resource ARN not supported for FIPS region",
+ resource: resource,
+ origErr: err,
+ }
+}
+
+// ConfigurationError is used to denote a client configuration error
+type ConfigurationError struct {
+ message string
+ resource arn.Resource
+ clientPartitionID string
+ clientRegion string
+ origErr error
+}
+
+// Error returns the Configuration error string
+func (e ConfigurationError) Error() string {
+ extra := fmt.Sprintf("ARN: %s, client partition: %s, client region: %s",
+ e.resource, e.clientPartitionID, e.clientRegion)
+
+ msg := configurationErrorErrCode + " : " + e.message
+ if extra != "" {
+ msg = msg + "\n\t" + extra
+ }
+ return msg
+}
+
+// OrigErr is the original error wrapped by Configuration Error
+func (e ConfigurationError) Unwrap() error {
+ return e.origErr
+}
+
+// NewClientPartitionMismatchError stub
+func NewClientPartitionMismatchError(resource arn.Resource, clientPartitionID, clientRegion string, err error) ConfigurationError {
+ return ConfigurationError{
+ message: "client partition does not match provided ARN partition",
+ origErr: err,
+ resource: resource,
+ clientPartitionID: clientPartitionID,
+ clientRegion: clientRegion,
+ }
+}
+
+// NewClientRegionMismatchError denotes cross region access error
+func NewClientRegionMismatchError(resource arn.Resource, clientPartitionID, clientRegion string, err error) ConfigurationError {
+ return ConfigurationError{
+ message: "client region does not match provided ARN region",
+ origErr: err,
+ resource: resource,
+ clientPartitionID: clientPartitionID,
+ clientRegion: clientRegion,
+ }
+}
+
+// NewFailedToResolveEndpointError denotes endpoint resolving error
+func NewFailedToResolveEndpointError(resource arn.Resource, clientPartitionID, clientRegion string, err error) ConfigurationError {
+ return ConfigurationError{
+ message: "endpoint resolver failed to find an endpoint for the provided ARN region",
+ origErr: err,
+ resource: resource,
+ clientPartitionID: clientPartitionID,
+ clientRegion: clientRegion,
+ }
+}
+
+// NewClientConfiguredForFIPSError denotes client config error for unsupported cross region FIPS access
+func NewClientConfiguredForFIPSError(resource arn.Resource, clientPartitionID, clientRegion string, err error) ConfigurationError {
+ return ConfigurationError{
+ message: "client configured for fips but cross-region resource ARN provided",
+ origErr: err,
+ resource: resource,
+ clientPartitionID: clientPartitionID,
+ clientRegion: clientRegion,
+ }
+}
+
+// NewClientConfiguredForAccelerateError denotes client config error for unsupported S3 accelerate
+func NewClientConfiguredForAccelerateError(resource arn.Resource, clientPartitionID, clientRegion string, err error) ConfigurationError {
+ return ConfigurationError{
+ message: "client configured for S3 Accelerate but is not supported with resource ARN",
+ origErr: err,
+ resource: resource,
+ clientPartitionID: clientPartitionID,
+ clientRegion: clientRegion,
+ }
+}
+
+// NewClientConfiguredForCrossRegionFIPSError denotes client config error for unsupported cross region FIPS request
+func NewClientConfiguredForCrossRegionFIPSError(resource arn.Resource, clientPartitionID, clientRegion string, err error) ConfigurationError {
+ return ConfigurationError{
+ message: "client configured for FIPS with cross-region enabled but is supported with cross-region resource ARN",
+ origErr: err,
+ resource: resource,
+ clientPartitionID: clientPartitionID,
+ clientRegion: clientRegion,
+ }
+}
+
+// NewClientConfiguredForDualStackError denotes client config error for unsupported S3 Dual-stack
+func NewClientConfiguredForDualStackError(resource arn.Resource, clientPartitionID, clientRegion string, err error) ConfigurationError {
+ return ConfigurationError{
+ message: "client configured for S3 Dual-stack but is not supported with resource ARN",
+ origErr: err,
+ resource: resource,
+ clientPartitionID: clientPartitionID,
+ clientRegion: clientRegion,
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/go.mod b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/go.mod
new file mode 100644
index 000000000..be2a32480
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/go.mod
@@ -0,0 +1,10 @@
+module github.com/aws/aws-sdk-go-v2/service/internal/s3shared
+
+go 1.15
+
+require (
+ github.com/aws/aws-sdk-go-v2 v1.2.1
+ github.com/aws/smithy-go v1.2.0
+)
+
+replace github.com/aws/aws-sdk-go-v2 => ../../../
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/go.sum b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/go.sum
new file mode 100644
index 000000000..c3783ae60
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/go.sum
@@ -0,0 +1,13 @@
+github.com/aws/smithy-go v1.2.0 h1:0PoGBWXkXDIyVdPaZW9gMhaGzj3UOAgTdiVoHuuZAFA=
+github.com/aws/smithy-go v1.2.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAmR5n+E=
+github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/google/go-cmp v0.5.4 h1:L8R9j+yAqZuZjsqh/z+F1NCffTKKLShY6zXTItVIZ8M=
+github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
+github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U=
+github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4=
+golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/host_id.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/host_id.go
new file mode 100644
index 000000000..85b60d2a1
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/host_id.go
@@ -0,0 +1,29 @@
+package s3shared
+
+import (
+ "github.com/aws/smithy-go/middleware"
+)
+
+// hostID is used to retrieve host id from response metadata
+type hostID struct {
+}
+
+// SetHostIDMetadata sets the provided host id over middleware metadata
+func SetHostIDMetadata(metadata *middleware.Metadata, id string) {
+ metadata.Set(hostID{}, id)
+}
+
+// GetHostIDMetadata retrieves the host id from middleware metadata
+// returns host id as string along with a boolean indicating presence of
+// hostId on middleware metadata.
+func GetHostIDMetadata(metadata middleware.Metadata) (string, bool) {
+ if !metadata.Has(hostID{}) {
+ return "", false
+ }
+
+ v, ok := metadata.Get(hostID{}).(string)
+ if !ok {
+ return "", true
+ }
+ return v, true
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/metadata.go
new file mode 100644
index 000000000..f02604cb6
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/metadata.go
@@ -0,0 +1,28 @@
+package s3shared
+
+import (
+ "context"
+
+ "github.com/aws/smithy-go/middleware"
+)
+
+// clonedInputKey used to denote if request input was cloned.
+type clonedInputKey struct{}
+
+// SetClonedInputKey sets a key on context to denote input was cloned previously.
+//
+// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues
+// to clear all stack values.
+func SetClonedInputKey(ctx context.Context, value bool) context.Context {
+ return middleware.WithStackValue(ctx, clonedInputKey{}, value)
+}
+
+// IsClonedInput retrieves if context key for cloned input was set.
+// If set, we can infer that the reuqest input was cloned previously.
+//
+// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues
+// to clear all stack values.
+func IsClonedInput(ctx context.Context) bool {
+ v, _ := middleware.GetStackValue(ctx, clonedInputKey{}).(bool)
+ return v
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/metadata_retriever.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/metadata_retriever.go
new file mode 100644
index 000000000..f52f2f11e
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/metadata_retriever.go
@@ -0,0 +1,52 @@
+package s3shared
+
+import (
+ "context"
+
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+const metadataRetrieverID = "S3MetadataRetriever"
+
+// AddMetadataRetrieverMiddleware adds request id, host id retriever middleware
+func AddMetadataRetrieverMiddleware(stack *middleware.Stack) error {
+ // add metadata retriever middleware before operation deserializers so that it can retrieve metadata such as
+ // host id, request id from response header returned by operation deserializers
+ return stack.Deserialize.Insert(&metadataRetriever{}, "OperationDeserializer", middleware.Before)
+}
+
+type metadataRetriever struct {
+}
+
+// ID returns the middleware identifier
+func (m *metadataRetriever) ID() string {
+ return metadataRetrieverID
+}
+
+func (m *metadataRetriever) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+
+ resp, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ // No raw response to wrap with.
+ return out, metadata, err
+ }
+
+ // check for header for Request id
+ if v := resp.Header.Get("X-Amz-Request-Id"); len(v) != 0 {
+ // set reqID on metadata for successful responses.
+ awsmiddleware.SetRequestIDMetadata(&metadata, v)
+ }
+
+ // look up host-id
+ if v := resp.Header.Get("X-Amz-Id-2"); len(v) != 0 {
+ // set reqID on metadata for successful responses.
+ SetHostIDMetadata(&metadata, v)
+ }
+
+ return out, metadata, err
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/resource_request.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/resource_request.go
new file mode 100644
index 000000000..2d8a95358
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/resource_request.go
@@ -0,0 +1,75 @@
+package s3shared
+
+import (
+ "fmt"
+ "strings"
+
+ awsarn "github.com/aws/aws-sdk-go-v2/aws/arn"
+ "github.com/aws/aws-sdk-go-v2/service/internal/s3shared/arn"
+)
+
+// ResourceRequest represents an ARN resource and api request metadata
+type ResourceRequest struct {
+ Resource arn.Resource
+ // RequestRegion is the region configured on the request config
+ RequestRegion string
+
+ // SigningRegion is the signing region resolved for the request
+ SigningRegion string
+
+ // PartitionID is the resolved partition id for the provided request region
+ PartitionID string
+
+ // UseARNRegion indicates if client should use the region provided in an ARN resource
+ UseARNRegion bool
+}
+
+// ARN returns the resource ARN
+func (r ResourceRequest) ARN() awsarn.ARN {
+ return r.Resource.GetARN()
+}
+
+// UseFips returns true if request config region is FIPS region.
+func (r ResourceRequest) UseFips() bool {
+ return IsFIPS(r.RequestRegion)
+}
+
+// ResourceConfiguredForFIPS returns true if resource ARNs region is FIPS
+func (r ResourceRequest) ResourceConfiguredForFIPS() bool {
+ return IsFIPS(r.ARN().Region)
+}
+
+// AllowCrossRegion returns a bool value to denote if S3UseARNRegion flag is set
+func (r ResourceRequest) AllowCrossRegion() bool {
+ return r.UseARNRegion
+}
+
+// IsCrossPartition returns true if request is configured for region of another partition, than
+// the partition that resource ARN region resolves to. IsCrossPartition will not return an error,
+// if request is not configured with a specific partition id. This might happen if customer provides
+// custom endpoint url, but does not associate a partition id with it.
+func (r ResourceRequest) IsCrossPartition() (bool, error) {
+ rv := r.PartitionID
+ if len(rv) == 0 {
+ return false, nil
+ }
+
+ av := r.Resource.GetARN().Partition
+ if len(av) == 0 {
+ return false, fmt.Errorf("no partition id for provided ARN")
+ }
+
+ return !strings.EqualFold(rv, av), nil
+}
+
+// IsCrossRegion returns true if request signing region is not same as arn region
+func (r ResourceRequest) IsCrossRegion() bool {
+ v := r.SigningRegion
+ return !strings.EqualFold(v, r.Resource.GetARN().Region)
+}
+
+// IsFIPS returns true if region is a fips region
+func IsFIPS(clientRegion string) bool {
+ return (strings.HasPrefix(clientRegion, "fips-") ||
+ strings.HasSuffix(clientRegion, "-fips"))
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/response_error.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/response_error.go
new file mode 100644
index 000000000..857336243
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/response_error.go
@@ -0,0 +1,33 @@
+package s3shared
+
+import (
+ "errors"
+ "fmt"
+
+ awshttp "github.com/aws/aws-sdk-go-v2/aws/transport/http"
+)
+
+// ResponseError provides the HTTP centric error type wrapping the underlying error
+// with the HTTP response value and the deserialized RequestID.
+type ResponseError struct {
+ *awshttp.ResponseError
+
+ // HostID associated with response error
+ HostID string
+}
+
+// ServiceHostID returns the host id associated with Response Error
+func (e *ResponseError) ServiceHostID() string { return e.HostID }
+
+// Error returns the formatted error
+func (e *ResponseError) Error() string {
+ return fmt.Sprintf(
+ "https response error StatusCode: %d, RequestID: %s, HostID: %s, %v",
+ e.Response.StatusCode, e.RequestID, e.HostID, e.Err)
+}
+
+// As populates target and returns true if the type of target is a error type that
+// the ResponseError embeds, (e.g.S3 HTTP ResponseError)
+func (e *ResponseError) As(target interface{}) bool {
+ return errors.As(e.ResponseError, target)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/response_error_middleware.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/response_error_middleware.go
new file mode 100644
index 000000000..543576245
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/response_error_middleware.go
@@ -0,0 +1,60 @@
+package s3shared
+
+import (
+ "context"
+
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ awshttp "github.com/aws/aws-sdk-go-v2/aws/transport/http"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// AddResponseErrorMiddleware adds response error wrapper middleware
+func AddResponseErrorMiddleware(stack *middleware.Stack) error {
+ // add error wrapper middleware before request id retriever middleware so that it can wrap the error response
+ // returned by operation deserializers
+ return stack.Deserialize.Insert(&errorWrapper{}, metadataRetrieverID, middleware.Before)
+}
+
+type errorWrapper struct {
+}
+
+// ID returns the middleware identifier
+func (m *errorWrapper) ID() string {
+ return "ResponseErrorWrapper"
+}
+
+func (m *errorWrapper) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err == nil {
+ // Nothing to do when there is no error.
+ return out, metadata, err
+ }
+
+ resp, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ // No raw response to wrap with.
+ return out, metadata, err
+ }
+
+ // look for request id in metadata
+ reqID, _ := awsmiddleware.GetRequestIDMetadata(metadata)
+ // look for host id in metadata
+ hostID, _ := GetHostIDMetadata(metadata)
+
+ // Wrap the returned smithy error with the request id retrieved from the metadata
+ err = &ResponseError{
+ ResponseError: &awshttp.ResponseError{
+ ResponseError: &smithyhttp.ResponseError{
+ Response: resp,
+ Err: err,
+ },
+ RequestID: reqID,
+ },
+ HostID: hostID,
+ }
+
+ return out, metadata, err
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/update_endpoint.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/update_endpoint.go
new file mode 100644
index 000000000..1f811a58a
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/update_endpoint.go
@@ -0,0 +1,76 @@
+package s3shared
+
+import (
+ "context"
+ "fmt"
+ "strings"
+
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+
+ awsmiddle "github.com/aws/aws-sdk-go-v2/aws/middleware"
+)
+
+// EnableDualstack represents middleware struct for enabling dualstack support
+type EnableDualstack struct {
+ // UseDualstack indicates if dualstack endpoint resolving is to be enabled
+ UseDualstack bool
+
+ // DefaultServiceID is the service id prefix used in endpoint resolving
+ // by default service-id is 's3' and 's3-control' for service s3, s3control.
+ DefaultServiceID string
+}
+
+// ID returns the middleware ID.
+func (*EnableDualstack) ID() string {
+ return "EnableDualstack"
+}
+
+// HandleSerialize handles serializer middleware behavior when middleware is executed
+func (u *EnableDualstack) HandleSerialize(
+ ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler,
+) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+
+ // check for host name immutable property
+ if smithyhttp.GetHostnameImmutable(ctx) {
+ return next.HandleSerialize(ctx, in)
+ }
+
+ serviceID := awsmiddle.GetServiceID(ctx)
+
+ // s3-control may be represented as `S3 Control` as in model
+ if serviceID == "S3 Control" {
+ serviceID = "s3-control"
+ }
+
+ if len(serviceID) == 0 {
+ // default service id
+ serviceID = u.DefaultServiceID
+ }
+
+ req, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown request type %T", req)
+ }
+
+ if u.UseDualstack {
+ parts := strings.Split(req.URL.Host, ".")
+ if len(parts) < 3 {
+ return out, metadata, fmt.Errorf("unable to update endpoint host for dualstack, hostname invalid, %s", req.URL.Host)
+ }
+
+ for i := 0; i+1 < len(parts); i++ {
+ if strings.EqualFold(parts[i], serviceID) {
+ parts[i] = parts[i] + ".dualstack"
+ break
+ }
+ }
+
+ // construct the url host
+ req.URL.Host = strings.Join(parts, ".")
+ }
+
+ return next.HandleSerialize(ctx, in)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/xml_utils.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/xml_utils.go
new file mode 100644
index 000000000..65fd07e00
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/xml_utils.go
@@ -0,0 +1,89 @@
+package s3shared
+
+import (
+ "encoding/xml"
+ "fmt"
+ "io"
+ "net/http"
+ "strings"
+)
+
+// ErrorComponents represents the error response fields
+// that will be deserialized from an xml error response body
+type ErrorComponents struct {
+ Code string `xml:"Code"`
+ Message string `xml:"Message"`
+ RequestID string `xml:"RequestId"`
+ HostID string `xml:"HostId"`
+}
+
+// GetUnwrappedErrorResponseComponents returns the error fields from an xml error response body
+func GetUnwrappedErrorResponseComponents(r io.Reader) (ErrorComponents, error) {
+ var errComponents ErrorComponents
+ if err := xml.NewDecoder(r).Decode(&errComponents); err != nil && err != io.EOF {
+ return ErrorComponents{}, fmt.Errorf("error while deserializing xml error response : %w", err)
+ }
+ return errComponents, nil
+}
+
+// GetWrappedErrorResponseComponents returns the error fields from an xml error response body
+// in which error code, and message are wrapped by a tag
+func GetWrappedErrorResponseComponents(r io.Reader) (ErrorComponents, error) {
+ var errComponents struct {
+ Code string `xml:"Error>Code"`
+ Message string `xml:"Error>Message"`
+ RequestID string `xml:"RequestId"`
+ HostID string `xml:"HostId"`
+ }
+
+ if err := xml.NewDecoder(r).Decode(&errComponents); err != nil && err != io.EOF {
+ return ErrorComponents{}, fmt.Errorf("error while deserializing xml error response : %w", err)
+ }
+
+ return ErrorComponents{
+ Code: errComponents.Code,
+ Message: errComponents.Message,
+ RequestID: errComponents.RequestID,
+ HostID: errComponents.HostID,
+ }, nil
+}
+
+// GetErrorResponseComponents retrieves error components according to passed in options
+func GetErrorResponseComponents(r io.Reader, options ErrorResponseDeserializerOptions) (ErrorComponents, error) {
+ var errComponents ErrorComponents
+ var err error
+
+ if options.IsWrappedWithErrorTag {
+ errComponents, err = GetWrappedErrorResponseComponents(r)
+ } else {
+ errComponents, err = GetUnwrappedErrorResponseComponents(r)
+ }
+
+ if err != nil {
+ return ErrorComponents{}, err
+ }
+
+ // If an error code or message is not retrieved, it is derived from the http status code
+ // eg, for S3 service, we derive err code and message, if none is found
+ if options.UseStatusCode && len(errComponents.Code) == 0 &&
+ len(errComponents.Message) == 0 {
+ // derive code and message from status code
+ statusText := http.StatusText(options.StatusCode)
+ errComponents.Code = strings.Replace(statusText, " ", "", -1)
+ errComponents.Message = statusText
+ }
+ return errComponents, nil
+}
+
+// ErrorResponseDeserializerOptions represents error response deserializer options for s3 and s3-control service
+type ErrorResponseDeserializerOptions struct {
+ // UseStatusCode denotes if status code should be used to retrieve error code, msg
+ UseStatusCode bool
+
+ // StatusCode is status code of error response
+ StatusCode int
+
+ //IsWrappedWithErrorTag represents if error response's code, msg is wrapped within an
+ // additional tag
+ IsWrappedWithErrorTag bool
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/LICENSE.txt b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/LICENSE.txt
new file mode 100644
index 000000000..d64569567
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/LICENSE.txt
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_client.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_client.go
new file mode 100644
index 000000000..3fad5be9d
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_client.go
@@ -0,0 +1,447 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package s3
+
+import (
+ "context"
+ "fmt"
+ "github.com/aws/aws-sdk-go-v2/aws"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/aws/retry"
+ "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
+ awshttp "github.com/aws/aws-sdk-go-v2/aws/transport/http"
+ acceptencodingcust "github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding"
+ presignedurlcust "github.com/aws/aws-sdk-go-v2/service/internal/presigned-url"
+ "github.com/aws/aws-sdk-go-v2/service/internal/s3shared"
+ s3sharedconfig "github.com/aws/aws-sdk-go-v2/service/internal/s3shared/config"
+ s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations"
+ smithy "github.com/aws/smithy-go"
+ "github.com/aws/smithy-go/logging"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+ "net/http"
+ "time"
+)
+
+const ServiceID = "S3"
+const ServiceAPIVersion = "2006-03-01"
+
+// Client provides the API client to make operations call for Amazon Simple Storage
+// Service.
+type Client struct {
+ options Options
+}
+
+// New returns an initialized Client based on the functional options. Provide
+// additional functional options to further configure the behavior of the client,
+// such as changing the client's endpoint or adding custom middleware behavior.
+func New(options Options, optFns ...func(*Options)) *Client {
+ options = options.Copy()
+
+ resolveDefaultLogger(&options)
+
+ resolveRetryer(&options)
+
+ resolveHTTPClient(&options)
+
+ resolveHTTPSignerV4(&options)
+
+ resolveDefaultEndpointConfiguration(&options)
+
+ for _, fn := range optFns {
+ fn(&options)
+ }
+
+ client := &Client{
+ options: options,
+ }
+
+ return client
+}
+
+type Options struct {
+ // Set of options to modify how an operation is invoked. These apply to all
+ // operations invoked for this client. Use functional options on operation call to
+ // modify this list for per operation behavior.
+ APIOptions []func(*middleware.Stack) error
+
+ // Configures the events that will be sent to the configured logger.
+ ClientLogMode aws.ClientLogMode
+
+ // The credentials object to use when signing requests.
+ Credentials aws.CredentialsProvider
+
+ // The endpoint options to be used when attempting to resolve an endpoint.
+ EndpointOptions EndpointResolverOptions
+
+ // The service endpoint resolver.
+ EndpointResolver EndpointResolver
+
+ // Signature Version 4 (SigV4) Signer
+ HTTPSignerV4 HTTPSignerV4
+
+ // The logger writer interface to write logging messages to.
+ Logger logging.Logger
+
+ // The region to send requests to. (Required)
+ Region string
+
+ // Retryer guides how HTTP requests should be retried in case of recoverable
+ // failures. When nil the API client will use a default retryer.
+ Retryer aws.Retryer
+
+ // Allows you to enable arn region support for the service.
+ UseARNRegion bool
+
+ // Allows you to enable S3 Accelerate feature. All operations compatible with S3
+ // Accelerate will use the accelerate endpoint for requests. Requests not
+ // compatible will fall back to normal S3 requests. The bucket must be enabled for
+ // accelerate to be used with S3 client with accelerate enabled. If the bucket is
+ // not enabled for accelerate an error will be returned. The bucket name must be
+ // DNS compatible to work with accelerate.
+ UseAccelerate bool
+
+ // Allows you to enable Dualstack endpoint support for the service.
+ UseDualstack bool
+
+ // Allows you to enable the client to use path-style addressing, i.e.,
+ // https://s3.amazonaws.com/BUCKET/KEY. By default, the S3 client will use virtual
+ // hosted bucket addressing when possible(https://BUCKET.s3.amazonaws.com/KEY).
+ UsePathStyle bool
+
+ // The HTTP client to invoke API calls with. Defaults to client's default HTTP
+ // implementation if nil.
+ HTTPClient HTTPClient
+}
+
+// WithAPIOptions returns a functional option for setting the Client's APIOptions
+// option.
+func WithAPIOptions(optFns ...func(*middleware.Stack) error) func(*Options) {
+ return func(o *Options) {
+ o.APIOptions = append(o.APIOptions, optFns...)
+ }
+}
+
+// WithEndpointResolver returns a functional option for setting the Client's
+// EndpointResolver option.
+func WithEndpointResolver(v EndpointResolver) func(*Options) {
+ return func(o *Options) {
+ o.EndpointResolver = v
+ }
+}
+
+type HTTPClient interface {
+ Do(*http.Request) (*http.Response, error)
+}
+
+// Copy creates a clone where the APIOptions list is deep copied.
+func (o Options) Copy() Options {
+ to := o
+ to.APIOptions = make([]func(*middleware.Stack) error, len(o.APIOptions))
+ copy(to.APIOptions, o.APIOptions)
+ return to
+}
+func (c *Client) invokeOperation(ctx context.Context, opID string, params interface{}, optFns []func(*Options), stackFns ...func(*middleware.Stack, Options) error) (result interface{}, metadata middleware.Metadata, err error) {
+ ctx = middleware.ClearStackValues(ctx)
+ stack := middleware.NewStack(opID, smithyhttp.NewStackRequest)
+ options := c.options.Copy()
+ for _, fn := range optFns {
+ fn(&options)
+ }
+
+ for _, fn := range stackFns {
+ if err := fn(stack, options); err != nil {
+ return nil, metadata, err
+ }
+ }
+
+ for _, fn := range options.APIOptions {
+ if err := fn(stack); err != nil {
+ return nil, metadata, err
+ }
+ }
+
+ handler := middleware.DecorateHandler(smithyhttp.NewClientHandler(options.HTTPClient), stack)
+ result, metadata, err = handler.Handle(ctx, params)
+ if err != nil {
+ err = &smithy.OperationError{
+ ServiceID: ServiceID,
+ OperationName: opID,
+ Err: err,
+ }
+ }
+ return result, metadata, err
+}
+
+func resolveDefaultLogger(o *Options) {
+ if o.Logger != nil {
+ return
+ }
+ o.Logger = logging.Nop{}
+}
+
+func addSetLoggerMiddleware(stack *middleware.Stack, o Options) error {
+ return middleware.AddSetLoggerMiddleware(stack, o.Logger)
+}
+
+// NewFromConfig returns a new client from the provided config.
+func NewFromConfig(cfg aws.Config, optFns ...func(*Options)) *Client {
+ opts := Options{
+ Region: cfg.Region,
+ HTTPClient: cfg.HTTPClient,
+ Credentials: cfg.Credentials,
+ APIOptions: cfg.APIOptions,
+ Logger: cfg.Logger,
+ ClientLogMode: cfg.ClientLogMode,
+ }
+ resolveAWSRetryerProvider(cfg, &opts)
+ resolveAWSEndpointResolver(cfg, &opts)
+ resolveClientConfig(cfg, &opts)
+ return New(opts, optFns...)
+}
+
+func resolveHTTPClient(o *Options) {
+ if o.HTTPClient != nil {
+ return
+ }
+ o.HTTPClient = awshttp.NewBuildableClient()
+}
+
+func resolveRetryer(o *Options) {
+ if o.Retryer != nil {
+ return
+ }
+ o.Retryer = retry.NewStandard()
+}
+
+func resolveAWSRetryerProvider(cfg aws.Config, o *Options) {
+ if cfg.Retryer == nil {
+ return
+ }
+ o.Retryer = cfg.Retryer()
+}
+
+func resolveAWSEndpointResolver(cfg aws.Config, o *Options) {
+ if cfg.EndpointResolver == nil {
+ return
+ }
+ o.EndpointResolver = withEndpointResolver(cfg.EndpointResolver, NewDefaultEndpointResolver())
+}
+
+func addClientUserAgent(stack *middleware.Stack) error {
+ return awsmiddleware.AddRequestUserAgentMiddleware(stack)
+}
+
+func addHTTPSignerV4Middleware(stack *middleware.Stack, o Options) error {
+ mw := v4.NewSignHTTPRequestMiddleware(v4.SignHTTPRequestMiddlewareOptions{
+ CredentialsProvider: o.Credentials,
+ Signer: o.HTTPSignerV4,
+ LogSigning: o.ClientLogMode.IsSigning(),
+ })
+ return stack.Finalize.Add(mw, middleware.After)
+}
+
+type HTTPSignerV4 interface {
+ SignHTTP(ctx context.Context, credentials aws.Credentials, r *http.Request, payloadHash string, service string, region string, signingTime time.Time, optFns ...func(*v4.SignerOptions)) error
+}
+
+func resolveHTTPSignerV4(o *Options) {
+ if o.HTTPSignerV4 != nil {
+ return
+ }
+ o.HTTPSignerV4 = newDefaultV4Signer(*o)
+}
+
+func newDefaultV4Signer(o Options) *v4.Signer {
+ return v4.NewSigner(func(so *v4.SignerOptions) {
+ so.Logger = o.Logger
+ so.LogSigning = o.ClientLogMode.IsSigning()
+ so.DisableURIPathEscaping = true
+ })
+}
+
+func addRetryMiddlewares(stack *middleware.Stack, o Options) error {
+ mo := retry.AddRetryMiddlewaresOptions{
+ Retryer: o.Retryer,
+ LogRetryAttempts: o.ClientLogMode.IsRetries(),
+ }
+ return retry.AddRetryMiddlewares(stack, mo)
+}
+
+// resolves client config
+func resolveClientConfig(cfg aws.Config, o *Options) error {
+ if len(cfg.ConfigSources) == 0 {
+ return nil
+ }
+ value, found, err := s3sharedconfig.ResolveUseARNRegion(context.Background(), cfg.ConfigSources)
+ if err != nil {
+ return err
+ }
+ if found {
+ o.UseARNRegion = value
+ }
+ return nil
+}
+
+func addMetadataRetrieverMiddleware(stack *middleware.Stack) error {
+ return s3shared.AddMetadataRetrieverMiddleware(stack)
+}
+
+// nopGetBucketAccessor is no-op accessor for operation that don't support bucket
+// member as input
+func nopGetBucketAccessor(input interface{}) (*string, bool) {
+ return nil, false
+}
+
+func addResponseErrorMiddleware(stack *middleware.Stack) error {
+ return s3shared.AddResponseErrorMiddleware(stack)
+}
+
+func disableAcceptEncodingGzip(stack *middleware.Stack) error {
+ return acceptencodingcust.AddAcceptEncodingGzip(stack, acceptencodingcust.AddAcceptEncodingGzipOptions{})
+}
+
+// ResponseError provides the HTTP centric error type wrapping the underlying error
+// with the HTTP response value and the deserialized RequestID.
+type ResponseError interface {
+ error
+
+ ServiceHostID() string
+ ServiceRequestID() string
+}
+
+var _ ResponseError = (*s3shared.ResponseError)(nil)
+
+// GetHostIDMetadata retrieves the host id from middleware metadata returns host id
+// as string along with a boolean indicating presence of hostId on middleware
+// metadata.
+func GetHostIDMetadata(metadata middleware.Metadata) (string, bool) {
+ return s3shared.GetHostIDMetadata(metadata)
+}
+
+// HTTPPresignerV4 represents presigner interface used by presign url client
+type HTTPPresignerV4 interface {
+ PresignHTTP(
+ ctx context.Context, credentials aws.Credentials, r *http.Request,
+ payloadHash string, service string, region string, signingTime time.Time,
+ optFns ...func(*v4.SignerOptions),
+ ) (url string, signedHeader http.Header, err error)
+}
+
+// PresignOptions represents the presign client options
+type PresignOptions struct {
+
+ // ClientOptions are list of functional options to mutate client options used by
+ // the presign client.
+ ClientOptions []func(*Options)
+
+ // Presigner is the presigner used by the presign url client
+ Presigner HTTPPresignerV4
+
+ // Expires sets the expiration duration for the generated presign url. This should
+ // be the duration in seconds the presigned URL should be considered valid for. If
+ // not set or set to zero, presign url would default to expire after 900 seconds.
+ Expires time.Duration
+}
+
+func (o PresignOptions) copy() PresignOptions {
+ clientOptions := make([]func(*Options), len(o.ClientOptions))
+ copy(clientOptions, o.ClientOptions)
+ o.ClientOptions = clientOptions
+ return o
+}
+
+// WithPresignClientFromClientOptions is a helper utility to retrieve a function
+// that takes PresignOption as input
+func WithPresignClientFromClientOptions(optFns ...func(*Options)) func(*PresignOptions) {
+ return withPresignClientFromClientOptions(optFns).options
+}
+
+type withPresignClientFromClientOptions []func(*Options)
+
+func (w withPresignClientFromClientOptions) options(o *PresignOptions) {
+ o.ClientOptions = append(o.ClientOptions, w...)
+}
+
+// WithPresignExpires is a helper utility to append Expires value on presign
+// options optional function
+func WithPresignExpires(dur time.Duration) func(*PresignOptions) {
+ return withPresignExpires(dur).options
+}
+
+type withPresignExpires time.Duration
+
+func (w withPresignExpires) options(o *PresignOptions) {
+ o.Expires = time.Duration(w)
+}
+
+// PresignClient represents the presign url client
+type PresignClient struct {
+ client *Client
+ options PresignOptions
+}
+
+// NewPresignClient generates a presign client using provided API Client and
+// presign options
+func NewPresignClient(c *Client, optFns ...func(*PresignOptions)) *PresignClient {
+ var options PresignOptions
+ for _, fn := range optFns {
+ fn(&options)
+ }
+ if len(options.ClientOptions) != 0 {
+ c = New(c.options, options.ClientOptions...)
+ }
+
+ if options.Presigner == nil {
+ options.Presigner = newDefaultV4Signer(c.options)
+ }
+
+ return &PresignClient{
+ client: c,
+ options: options,
+ }
+}
+
+func withNopHTTPClientAPIOption(o *Options) {
+ o.HTTPClient = smithyhttp.NopClient{}
+}
+
+type presignConverter PresignOptions
+
+func (c presignConverter) convertToPresignMiddleware(stack *middleware.Stack, options Options) (err error) {
+ stack.Finalize.Clear()
+ stack.Deserialize.Clear()
+ stack.Build.Remove((*awsmiddleware.ClientRequestID)(nil).ID())
+ pmw := v4.NewPresignHTTPRequestMiddleware(v4.PresignHTTPRequestMiddlewareOptions{
+ CredentialsProvider: options.Credentials,
+ Presigner: c.Presigner,
+ LogSigning: options.ClientLogMode.IsSigning(),
+ })
+ err = stack.Finalize.Add(pmw, middleware.After)
+ if err != nil {
+ return err
+ }
+ if c.Expires < 0 {
+ return fmt.Errorf("presign URL duration must be 0 or greater, %v", c.Expires)
+ }
+ // add middleware to set expiration for s3 presigned url, if expiration is set to
+ // 0, this middleware sets a default expiration of 900 seconds
+ err = stack.Build.Add(&s3cust.AddExpiresOnPresignedURL{Expires: c.Expires}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = presignedurlcust.AddAsIsPresigingMiddleware(stack)
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+func addRequestResponseLogging(stack *middleware.Stack, o Options) error {
+ return stack.Deserialize.Add(&smithyhttp.RequestResponseLogger{
+ LogRequest: o.ClientLogMode.IsRequest(),
+ LogRequestWithBody: o.ClientLogMode.IsRequestWithBody(),
+ LogResponse: o.ClientLogMode.IsResponse(),
+ LogResponseWithBody: o.ClientLogMode.IsResponseWithBody(),
+ }, middleware.After)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_AbortMultipartUpload.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_AbortMultipartUpload.go
new file mode 100644
index 000000000..10886dda6
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_AbortMultipartUpload.go
@@ -0,0 +1,224 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package s3
+
+import (
+ "context"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
+ s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations"
+ "github.com/aws/aws-sdk-go-v2/service/s3/types"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// This operation aborts a multipart upload. After a multipart upload is aborted,
+// no additional parts can be uploaded using that upload ID. The storage consumed
+// by any previously uploaded parts will be freed. However, if any part uploads are
+// currently in progress, those part uploads might or might not succeed. As a
+// result, it might be necessary to abort a given multipart upload multiple times
+// in order to completely free all storage consumed by all parts. To verify that
+// all parts have been removed, so you don't get charged for the part storage, you
+// should call the ListParts
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html) operation
+// and ensure that the parts list is empty. For information about permissions
+// required to use the multipart upload API, see Multipart Upload API and
+// Permissions
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html). The
+// following operations are related to AbortMultipartUpload:
+//
+// *
+// CreateMultipartUpload
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html)
+//
+// *
+// UploadPart
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html)
+//
+// *
+// CompleteMultipartUpload
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html)
+//
+// *
+// ListParts
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html)
+//
+// *
+// ListMultipartUploads
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html)
+func (c *Client) AbortMultipartUpload(ctx context.Context, params *AbortMultipartUploadInput, optFns ...func(*Options)) (*AbortMultipartUploadOutput, error) {
+ if params == nil {
+ params = &AbortMultipartUploadInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "AbortMultipartUpload", params, optFns, addOperationAbortMultipartUploadMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*AbortMultipartUploadOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type AbortMultipartUploadInput struct {
+
+ // The bucket name to which the upload was taking place. When using this API with
+ // an access point, you must direct requests to the access point hostname. The
+ // access point hostname takes the form
+ // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this
+ // operation with an access point through the AWS SDKs, you provide the access
+ // point ARN in place of the bucket name. For more information about access point
+ // ARNs, see Using Access Points
+ // (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) in
+ // the Amazon Simple Storage Service Developer Guide. When using this API with
+ // Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname.
+ // The S3 on Outposts hostname takes the form
+ // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using
+ // this operation using S3 on Outposts through the AWS SDKs, you provide the
+ // Outposts bucket ARN in place of the bucket name. For more information about S3
+ // on Outposts ARNs, see Using S3 on Outposts
+ // (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html) in the
+ // Amazon Simple Storage Service Developer Guide.
+ //
+ // This member is required.
+ Bucket *string
+
+ // Key of the object for which the multipart upload was initiated.
+ //
+ // This member is required.
+ Key *string
+
+ // Upload ID that identifies the multipart upload.
+ //
+ // This member is required.
+ UploadId *string
+
+ // The account id of the expected bucket owner. If the bucket is owned by a
+ // different account, the request will fail with an HTTP 403 (Access Denied) error.
+ ExpectedBucketOwner *string
+
+ // Confirms that the requester knows that they will be charged for the request.
+ // Bucket owners need not specify this parameter in their requests. For information
+ // about downloading objects from requester pays buckets, see Downloading Objects
+ // in Requestor Pays Buckets
+ // (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html)
+ // in the Amazon S3 Developer Guide.
+ RequestPayer types.RequestPayer
+}
+
+type AbortMultipartUploadOutput struct {
+
+ // If present, indicates that the requester was successfully charged for the
+ // request.
+ RequestCharged types.RequestCharged
+
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+}
+
+func addOperationAbortMultipartUploadMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ err = stack.Serialize.Add(&awsRestxml_serializeOpAbortMultipartUpload{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsRestxml_deserializeOpAbortMultipartUpload{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
+ return err
+ }
+ if err = addRetryMiddlewares(stack, options); err != nil {
+ return err
+ }
+ if err = addHTTPSignerV4Middleware(stack, options); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addOpAbortMultipartUploadValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opAbortMultipartUpload(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addMetadataRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addAbortMultipartUploadUpdateEndpoint(stack, options); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil {
+ return err
+ }
+ if err = disableAcceptEncodingGzip(stack); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ return nil
+}
+
+func newServiceMetadataMiddleware_opAbortMultipartUpload(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ SigningName: "s3",
+ OperationName: "AbortMultipartUpload",
+ }
+}
+
+// getAbortMultipartUploadBucketMember returns a pointer to string denoting a
+// provided bucket member valueand a boolean indicating if the input has a modeled
+// bucket name,
+func getAbortMultipartUploadBucketMember(input interface{}) (*string, bool) {
+ in := input.(*AbortMultipartUploadInput)
+ if in.Bucket == nil {
+ return nil, false
+ }
+ return in.Bucket, true
+}
+func addAbortMultipartUploadUpdateEndpoint(stack *middleware.Stack, options Options) error {
+ return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{
+ Accessor: s3cust.UpdateEndpointParameterAccessor{
+ GetBucketFromInput: getAbortMultipartUploadBucketMember,
+ },
+ UsePathStyle: options.UsePathStyle,
+ UseAccelerate: options.UseAccelerate,
+ SupportsAccelerate: true,
+ EndpointResolver: options.EndpointResolver,
+ EndpointResolverOptions: options.EndpointOptions,
+ UseDualstack: options.UseDualstack,
+ UseARNRegion: options.UseARNRegion,
+ })
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CompleteMultipartUpload.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CompleteMultipartUpload.go
new file mode 100644
index 000000000..0ec9c0c0c
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CompleteMultipartUpload.go
@@ -0,0 +1,319 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package s3
+
+import (
+ "context"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
+ s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations"
+ "github.com/aws/aws-sdk-go-v2/service/s3/types"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// Completes a multipart upload by assembling previously uploaded parts. You first
+// initiate the multipart upload and then upload all parts using the UploadPart
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) operation.
+// After successfully uploading all relevant parts of an upload, you call this
+// operation to complete the upload. Upon receiving this request, Amazon S3
+// concatenates all the parts in ascending order by part number to create a new
+// object. In the Complete Multipart Upload request, you must provide the parts
+// list. You must ensure that the parts list is complete. This operation
+// concatenates the parts that you provide in the list. For each part in the list,
+// you must provide the part number and the ETag value, returned after that part
+// was uploaded. Processing of a Complete Multipart Upload request could take
+// several minutes to complete. After Amazon S3 begins processing the request, it
+// sends an HTTP response header that specifies a 200 OK response. While processing
+// is in progress, Amazon S3 periodically sends white space characters to keep the
+// connection from timing out. Because a request could fail after the initial 200
+// OK response has been sent, it is important that you check the response body to
+// determine whether the request succeeded. Note that if CompleteMultipartUpload
+// fails, applications should be prepared to retry the failed requests. For more
+// information, see Amazon S3 Error Best Practices
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/ErrorBestPractices.html). For
+// more information about multipart uploads, see Uploading Objects Using Multipart
+// Upload (https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html).
+// For information about permissions required to use the multipart upload API, see
+// Multipart Upload API and Permissions
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html).
+// CompleteMultipartUpload has the following special errors:
+//
+// * Error code:
+// EntityTooSmall
+//
+// * Description: Your proposed upload is smaller than the minimum
+// allowed object size. Each part must be at least 5 MB in size, except the last
+// part.
+//
+// * 400 Bad Request
+//
+// * Error code: InvalidPart
+//
+// * Description: One or more
+// of the specified parts could not be found. The part might not have been
+// uploaded, or the specified entity tag might not have matched the part's entity
+// tag.
+//
+// * 400 Bad Request
+//
+// * Error code: InvalidPartOrder
+//
+// * Description: The list
+// of parts was not in ascending order. The parts list must be specified in order
+// by part number.
+//
+// * 400 Bad Request
+//
+// * Error code: NoSuchUpload
+//
+// * Description:
+// The specified multipart upload does not exist. The upload ID might be invalid,
+// or the multipart upload might have been aborted or completed.
+//
+// * 404 Not
+// Found
+//
+// The following operations are related to CompleteMultipartUpload:
+//
+// *
+// CreateMultipartUpload
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html)
+//
+// *
+// UploadPart
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html)
+//
+// *
+// AbortMultipartUpload
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html)
+//
+// *
+// ListParts
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html)
+//
+// *
+// ListMultipartUploads
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html)
+func (c *Client) CompleteMultipartUpload(ctx context.Context, params *CompleteMultipartUploadInput, optFns ...func(*Options)) (*CompleteMultipartUploadOutput, error) {
+ if params == nil {
+ params = &CompleteMultipartUploadInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "CompleteMultipartUpload", params, optFns, addOperationCompleteMultipartUploadMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*CompleteMultipartUploadOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type CompleteMultipartUploadInput struct {
+
+ // Name of the bucket to which the multipart upload was initiated.
+ //
+ // This member is required.
+ Bucket *string
+
+ // Object key for which the multipart upload was initiated.
+ //
+ // This member is required.
+ Key *string
+
+ // ID for the initiated multipart upload.
+ //
+ // This member is required.
+ UploadId *string
+
+ // The account id of the expected bucket owner. If the bucket is owned by a
+ // different account, the request will fail with an HTTP 403 (Access Denied) error.
+ ExpectedBucketOwner *string
+
+ // The container for the multipart upload request information.
+ MultipartUpload *types.CompletedMultipartUpload
+
+ // Confirms that the requester knows that they will be charged for the request.
+ // Bucket owners need not specify this parameter in their requests. For information
+ // about downloading objects from requester pays buckets, see Downloading Objects
+ // in Requestor Pays Buckets
+ // (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html)
+ // in the Amazon S3 Developer Guide.
+ RequestPayer types.RequestPayer
+}
+
+type CompleteMultipartUploadOutput struct {
+
+ // The name of the bucket that contains the newly created object. When using this
+ // API with an access point, you must direct requests to the access point hostname.
+ // The access point hostname takes the form
+ // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this
+ // operation with an access point through the AWS SDKs, you provide the access
+ // point ARN in place of the bucket name. For more information about access point
+ // ARNs, see Using Access Points
+ // (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) in
+ // the Amazon Simple Storage Service Developer Guide. When using this API with
+ // Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname.
+ // The S3 on Outposts hostname takes the form
+ // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using
+ // this operation using S3 on Outposts through the AWS SDKs, you provide the
+ // Outposts bucket ARN in place of the bucket name. For more information about S3
+ // on Outposts ARNs, see Using S3 on Outposts
+ // (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html) in the
+ // Amazon Simple Storage Service Developer Guide.
+ Bucket *string
+
+ // Indicates whether the multipart upload uses an S3 Bucket Key for server-side
+ // encryption with AWS KMS (SSE-KMS).
+ BucketKeyEnabled bool
+
+ // Entity tag that identifies the newly created object's data. Objects with
+ // different object data will have different entity tags. The entity tag is an
+ // opaque string. The entity tag may or may not be an MD5 digest of the object
+ // data. If the entity tag is not an MD5 digest of the object data, it will contain
+ // one or more nonhexadecimal characters and/or will consist of less than 32 or
+ // more than 32 hexadecimal digits.
+ ETag *string
+
+ // If the object expiration is configured, this will contain the expiration date
+ // (expiry-date) and rule ID (rule-id). The value of rule-id is URL encoded.
+ Expiration *string
+
+ // The object key of the newly created object.
+ Key *string
+
+ // The URI that identifies the newly created object.
+ Location *string
+
+ // If present, indicates that the requester was successfully charged for the
+ // request.
+ RequestCharged types.RequestCharged
+
+ // If present, specifies the ID of the AWS Key Management Service (AWS KMS)
+ // symmetric customer managed customer master key (CMK) that was used for the
+ // object.
+ SSEKMSKeyId *string
+
+ // If you specified server-side encryption either with an Amazon S3-managed
+ // encryption key or an AWS KMS customer master key (CMK) in your initiate
+ // multipart upload request, the response includes this header. It confirms the
+ // encryption algorithm that Amazon S3 used to encrypt the object.
+ ServerSideEncryption types.ServerSideEncryption
+
+ // Version ID of the newly created object, in case the bucket has versioning turned
+ // on.
+ VersionId *string
+
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+}
+
+func addOperationCompleteMultipartUploadMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ err = stack.Serialize.Add(&awsRestxml_serializeOpCompleteMultipartUpload{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsRestxml_deserializeOpCompleteMultipartUpload{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
+ return err
+ }
+ if err = addRetryMiddlewares(stack, options); err != nil {
+ return err
+ }
+ if err = addHTTPSignerV4Middleware(stack, options); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addOpCompleteMultipartUploadValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opCompleteMultipartUpload(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addMetadataRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addCompleteMultipartUploadUpdateEndpoint(stack, options); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil {
+ return err
+ }
+ if err = disableAcceptEncodingGzip(stack); err != nil {
+ return err
+ }
+ if err = s3cust.HandleResponseErrorWith200Status(stack); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ return nil
+}
+
+func newServiceMetadataMiddleware_opCompleteMultipartUpload(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ SigningName: "s3",
+ OperationName: "CompleteMultipartUpload",
+ }
+}
+
+// getCompleteMultipartUploadBucketMember returns a pointer to string denoting a
+// provided bucket member valueand a boolean indicating if the input has a modeled
+// bucket name,
+func getCompleteMultipartUploadBucketMember(input interface{}) (*string, bool) {
+ in := input.(*CompleteMultipartUploadInput)
+ if in.Bucket == nil {
+ return nil, false
+ }
+ return in.Bucket, true
+}
+func addCompleteMultipartUploadUpdateEndpoint(stack *middleware.Stack, options Options) error {
+ return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{
+ Accessor: s3cust.UpdateEndpointParameterAccessor{
+ GetBucketFromInput: getCompleteMultipartUploadBucketMember,
+ },
+ UsePathStyle: options.UsePathStyle,
+ UseAccelerate: options.UseAccelerate,
+ SupportsAccelerate: true,
+ EndpointResolver: options.EndpointResolver,
+ EndpointResolverOptions: options.EndpointOptions,
+ UseDualstack: options.UseDualstack,
+ UseARNRegion: options.UseARNRegion,
+ })
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CopyObject.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CopyObject.go
new file mode 100644
index 000000000..033af456b
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CopyObject.go
@@ -0,0 +1,545 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package s3
+
+import (
+ "context"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
+ s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations"
+ "github.com/aws/aws-sdk-go-v2/service/s3/types"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+ "time"
+)
+
+// Creates a copy of an object that is already stored in Amazon S3. You can store
+// individual objects of up to 5 TB in Amazon S3. You create a copy of your object
+// up to 5 GB in size in a single atomic operation using this API. However, to copy
+// an object greater than 5 GB, you must use the multipart upload Upload Part -
+// Copy API. For more information, see Copy Object Using the REST Multipart Upload
+// API
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/CopyingObjctsUsingRESTMPUapi.html).
+// All copy requests must be authenticated. Additionally, you must have read access
+// to the source object and write access to the destination bucket. For more
+// information, see REST Authentication
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html). Both
+// the Region that you want to copy the object from and the Region that you want to
+// copy the object to must be enabled for your account. A copy request might return
+// an error when Amazon S3 receives the copy request or while Amazon S3 is copying
+// the files. If the error occurs before the copy operation starts, you receive a
+// standard Amazon S3 error. If the error occurs during the copy operation, the
+// error response is embedded in the 200 OK response. This means that a 200 OK
+// response can contain either a success or an error. Design your application to
+// parse the contents of the response and handle it appropriately. If the copy is
+// successful, you receive a response with information about the copied object. If
+// the request is an HTTP 1.1 request, the response is chunk encoded. If it were
+// not, it would not contain the content-length, and you would need to read the
+// entire body. The copy request charge is based on the storage class and Region
+// that you specify for the destination object. For pricing information, see Amazon
+// S3 pricing (https://aws.amazon.com/s3/pricing/). Amazon S3 transfer acceleration
+// does not support cross-Region copies. If you request a cross-Region copy using a
+// transfer acceleration endpoint, you get a 400 Bad Request error. For more
+// information, see Transfer Acceleration
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html).
+// Metadata When copying an object, you can preserve all metadata (default) or
+// specify new metadata. However, the ACL is not preserved and is set to private
+// for the user making the request. To override the default ACL setting, specify a
+// new ACL when generating a copy request. For more information, see Using ACLs
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3_ACLs_UsingACLs.html). To
+// specify whether you want the object metadata copied from the source object or
+// replaced with metadata provided in the request, you can optionally add the
+// x-amz-metadata-directive header. When you grant permissions, you can use the
+// s3:x-amz-metadata-directive condition key to enforce certain metadata behavior
+// when objects are uploaded. For more information, see Specifying Conditions in a
+// Policy
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/amazon-s3-policy-keys.html) in
+// the Amazon S3 Developer Guide. For a complete list of Amazon S3-specific
+// condition keys, see Actions, Resources, and Condition Keys for Amazon S3
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/list_amazons3.html).
+// x-amz-copy-source-if Headers To only copy an object under certain conditions,
+// such as whether the Etag matches or whether the object was modified before or
+// after a specified date, use the following request parameters:
+//
+// *
+// x-amz-copy-source-if-match
+//
+// * x-amz-copy-source-if-none-match
+//
+// *
+// x-amz-copy-source-if-unmodified-since
+//
+// * x-amz-copy-source-if-modified-since
+//
+// If
+// both the x-amz-copy-source-if-match and x-amz-copy-source-if-unmodified-since
+// headers are present in the request and evaluate as follows, Amazon S3 returns
+// 200 OK and copies the data:
+//
+// * x-amz-copy-source-if-match condition evaluates to
+// true
+//
+// * x-amz-copy-source-if-unmodified-since condition evaluates to false
+//
+// If
+// both the x-amz-copy-source-if-none-match and x-amz-copy-source-if-modified-since
+// headers are present in the request and evaluate as follows, Amazon S3 returns
+// the 412 Precondition Failed response code:
+//
+// * x-amz-copy-source-if-none-match
+// condition evaluates to false
+//
+// * x-amz-copy-source-if-modified-since condition
+// evaluates to true
+//
+// All headers with the x-amz- prefix, including
+// x-amz-copy-source, must be signed. Server-side encryption When you perform a
+// CopyObject operation, you can optionally use the appropriate encryption-related
+// headers to encrypt the object using server-side encryption with AWS managed
+// encryption keys (SSE-S3 or SSE-KMS) or a customer-provided encryption key. With
+// server-side encryption, Amazon S3 encrypts your data as it writes it to disks in
+// its data centers and decrypts the data when you access it. For more information
+// about server-side encryption, see Using Server-Side Encryption
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html). If
+// a target object uses SSE-KMS, you can enable an S3 Bucket Key for the object.
+// For more information, see Amazon S3 Bucket Keys
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-key.html) in the Amazon
+// Simple Storage Service Developer Guide. Access Control List (ACL)-Specific
+// Request Headers When copying an object, you can optionally use headers to grant
+// ACL-based permissions. By default, all objects are private. Only the owner has
+// full access control. When adding a new object, you can grant permissions to
+// individual AWS accounts or to predefined groups defined by Amazon S3. These
+// permissions are then added to the ACL on the object. For more information, see
+// Access Control List (ACL) Overview
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html) and Managing
+// ACLs Using the REST API
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-using-rest-api.html).
+// Storage Class Options You can use the CopyObject operation to change the storage
+// class of an object that is already stored in Amazon S3 using the StorageClass
+// parameter. For more information, see Storage Classes
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html) in
+// the Amazon S3 Service Developer Guide. Versioning By default, x-amz-copy-source
+// identifies the current version of an object to copy. If the current version is a
+// delete marker, Amazon S3 behaves as if the object was deleted. To copy a
+// different version, use the versionId subresource. If you enable versioning on
+// the target bucket, Amazon S3 generates a unique version ID for the object being
+// copied. This version ID is different from the version ID of the source object.
+// Amazon S3 returns the version ID of the copied object in the x-amz-version-id
+// response header in the response. If you do not enable versioning or suspend it
+// on the target bucket, the version ID that Amazon S3 generates is always null. If
+// the source object's storage class is GLACIER, you must restore a copy of this
+// object before you can use it as a source object for the copy operation. For more
+// information, see RestoreObject
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_RestoreObject.html). The
+// following operations are related to CopyObject:
+//
+// * PutObject
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html)
+//
+// *
+// GetObject
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html)
+//
+// For more
+// information, see Copying Objects
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/CopyingObjectsExamples.html).
+func (c *Client) CopyObject(ctx context.Context, params *CopyObjectInput, optFns ...func(*Options)) (*CopyObjectOutput, error) {
+ if params == nil {
+ params = &CopyObjectInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "CopyObject", params, optFns, addOperationCopyObjectMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*CopyObjectOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type CopyObjectInput struct {
+
+ // The name of the destination bucket. When using this API with an access point,
+ // you must direct requests to the access point hostname. The access point hostname
+ // takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com.
+ // When using this operation with an access point through the AWS SDKs, you provide
+ // the access point ARN in place of the bucket name. For more information about
+ // access point ARNs, see Using Access Points
+ // (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) in
+ // the Amazon Simple Storage Service Developer Guide. When using this API with
+ // Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname.
+ // The S3 on Outposts hostname takes the form
+ // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using
+ // this operation using S3 on Outposts through the AWS SDKs, you provide the
+ // Outposts bucket ARN in place of the bucket name. For more information about S3
+ // on Outposts ARNs, see Using S3 on Outposts
+ // (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html) in the
+ // Amazon Simple Storage Service Developer Guide.
+ //
+ // This member is required.
+ Bucket *string
+
+ // Specifies the source object for the copy operation. You specify the value in one
+ // of two formats, depending on whether you want to access the source object
+ // through an access point
+ // (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-points.html):
+ //
+ // * For
+ // objects not accessed through an access point, specify the name of the source
+ // bucket and the key of the source object, separated by a slash (/). For example,
+ // to copy the object reports/january.pdf from the bucket awsexamplebucket, use
+ // awsexamplebucket/reports/january.pdf. The value must be URL encoded.
+ //
+ // * For
+ // objects accessed through access points, specify the Amazon Resource Name (ARN)
+ // of the object as accessed through the access point, in the format
+ // arn:aws:s3:::accesspoint//object/. For example, to copy the object
+ // reports/january.pdf through access point my-access-point owned by account
+ // 123456789012 in Region us-west-2, use the URL encoding of
+ // arn:aws:s3:us-west-2:123456789012:accesspoint/my-access-point/object/reports/january.pdf.
+ // The value must be URL encoded. Amazon S3 supports copy operations using access
+ // points only when the source and destination buckets are in the same AWS Region.
+ // Alternatively, for objects accessed through Amazon S3 on Outposts, specify the
+ // ARN of the object as accessed in the format
+ // arn:aws:s3-outposts:::outpost//object/. For example, to copy the object
+ // reports/january.pdf through outpost my-outpost owned by account 123456789012 in
+ // Region us-west-2, use the URL encoding of
+ // arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/object/reports/january.pdf.
+ // The value must be URL encoded.
+ //
+ // To copy a specific version of an object, append
+ // ?versionId= to the value (for example,
+ // awsexamplebucket/reports/january.pdf?versionId=QUpfdndhfd8438MNFDN93jdnJFkdmqnh893).
+ // If you don't specify a version ID, Amazon S3 copies the latest version of the
+ // source object.
+ //
+ // This member is required.
+ CopySource *string
+
+ // The key of the destination object.
+ //
+ // This member is required.
+ Key *string
+
+ // The canned ACL to apply to the object. This action is not supported by Amazon S3
+ // on Outposts.
+ ACL types.ObjectCannedACL
+
+ // Specifies whether Amazon S3 should use an S3 Bucket Key for object encryption
+ // with server-side encryption using AWS KMS (SSE-KMS). Setting this header to true
+ // causes Amazon S3 to use an S3 Bucket Key for object encryption with SSE-KMS.
+ // Specifying this header with a COPY operation doesn’t affect bucket-level
+ // settings for S3 Bucket Key.
+ BucketKeyEnabled bool
+
+ // Specifies caching behavior along the request/reply chain.
+ CacheControl *string
+
+ // Specifies presentational information for the object.
+ ContentDisposition *string
+
+ // Specifies what content encodings have been applied to the object and thus what
+ // decoding mechanisms must be applied to obtain the media-type referenced by the
+ // Content-Type header field.
+ ContentEncoding *string
+
+ // The language the content is in.
+ ContentLanguage *string
+
+ // A standard MIME type describing the format of the object data.
+ ContentType *string
+
+ // Copies the object if its entity tag (ETag) matches the specified tag.
+ CopySourceIfMatch *string
+
+ // Copies the object if it has been modified since the specified time.
+ CopySourceIfModifiedSince *time.Time
+
+ // Copies the object if its entity tag (ETag) is different than the specified ETag.
+ CopySourceIfNoneMatch *string
+
+ // Copies the object if it hasn't been modified since the specified time.
+ CopySourceIfUnmodifiedSince *time.Time
+
+ // Specifies the algorithm to use when decrypting the source object (for example,
+ // AES256).
+ CopySourceSSECustomerAlgorithm *string
+
+ // Specifies the customer-provided encryption key for Amazon S3 to use to decrypt
+ // the source object. The encryption key provided in this header must be one that
+ // was used when the source object was created.
+ CopySourceSSECustomerKey *string
+
+ // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321.
+ // Amazon S3 uses this header for a message integrity check to ensure that the
+ // encryption key was transmitted without error.
+ CopySourceSSECustomerKeyMD5 *string
+
+ // The account id of the expected destination bucket owner. If the destination
+ // bucket is owned by a different account, the request will fail with an HTTP 403
+ // (Access Denied) error.
+ ExpectedBucketOwner *string
+
+ // The account id of the expected source bucket owner. If the source bucket is
+ // owned by a different account, the request will fail with an HTTP 403 (Access
+ // Denied) error.
+ ExpectedSourceBucketOwner *string
+
+ // The date and time at which the object is no longer cacheable.
+ Expires *time.Time
+
+ // Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object. This
+ // action is not supported by Amazon S3 on Outposts.
+ GrantFullControl *string
+
+ // Allows grantee to read the object data and its metadata. This action is not
+ // supported by Amazon S3 on Outposts.
+ GrantRead *string
+
+ // Allows grantee to read the object ACL. This action is not supported by Amazon S3
+ // on Outposts.
+ GrantReadACP *string
+
+ // Allows grantee to write the ACL for the applicable object. This action is not
+ // supported by Amazon S3 on Outposts.
+ GrantWriteACP *string
+
+ // A map of metadata to store with the object in S3.
+ Metadata map[string]string
+
+ // Specifies whether the metadata is copied from the source object or replaced with
+ // metadata provided in the request.
+ MetadataDirective types.MetadataDirective
+
+ // Specifies whether you want to apply a Legal Hold to the copied object.
+ ObjectLockLegalHoldStatus types.ObjectLockLegalHoldStatus
+
+ // The Object Lock mode that you want to apply to the copied object.
+ ObjectLockMode types.ObjectLockMode
+
+ // The date and time when you want the copied object's Object Lock to expire.
+ ObjectLockRetainUntilDate *time.Time
+
+ // Confirms that the requester knows that they will be charged for the request.
+ // Bucket owners need not specify this parameter in their requests. For information
+ // about downloading objects from requester pays buckets, see Downloading Objects
+ // in Requestor Pays Buckets
+ // (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html)
+ // in the Amazon S3 Developer Guide.
+ RequestPayer types.RequestPayer
+
+ // Specifies the algorithm to use to when encrypting the object (for example,
+ // AES256).
+ SSECustomerAlgorithm *string
+
+ // Specifies the customer-provided encryption key for Amazon S3 to use in
+ // encrypting data. This value is used to store the object and then it is
+ // discarded; Amazon S3 does not store the encryption key. The key must be
+ // appropriate for use with the algorithm specified in the
+ // x-amz-server-side-encryption-customer-algorithm header.
+ SSECustomerKey *string
+
+ // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321.
+ // Amazon S3 uses this header for a message integrity check to ensure that the
+ // encryption key was transmitted without error.
+ SSECustomerKeyMD5 *string
+
+ // Specifies the AWS KMS Encryption Context to use for object encryption. The value
+ // of this header is a base64-encoded UTF-8 string holding JSON with the encryption
+ // context key-value pairs.
+ SSEKMSEncryptionContext *string
+
+ // Specifies the AWS KMS key ID to use for object encryption. All GET and PUT
+ // requests for an object protected by AWS KMS will fail if not made via SSL or
+ // using SigV4. For information about configuring using any of the officially
+ // supported AWS SDKs and AWS CLI, see Specifying the Signature Version in Request
+ // Authentication
+ // (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version)
+ // in the Amazon S3 Developer Guide.
+ SSEKMSKeyId *string
+
+ // The server-side encryption algorithm used when storing this object in Amazon S3
+ // (for example, AES256, aws:kms).
+ ServerSideEncryption types.ServerSideEncryption
+
+ // By default, Amazon S3 uses the STANDARD Storage Class to store newly created
+ // objects. The STANDARD storage class provides high durability and high
+ // availability. Depending on performance needs, you can specify a different
+ // Storage Class. Amazon S3 on Outposts only uses the OUTPOSTS Storage Class. For
+ // more information, see Storage Classes
+ // (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html) in
+ // the Amazon S3 Service Developer Guide.
+ StorageClass types.StorageClass
+
+ // The tag-set for the object destination object this value must be used in
+ // conjunction with the TaggingDirective. The tag-set must be encoded as URL Query
+ // parameters.
+ Tagging *string
+
+ // Specifies whether the object tag-set are copied from the source object or
+ // replaced with tag-set provided in the request.
+ TaggingDirective types.TaggingDirective
+
+ // If the bucket is configured as a website, redirects requests for this object to
+ // another object in the same bucket or to an external URL. Amazon S3 stores the
+ // value of this header in the object metadata.
+ WebsiteRedirectLocation *string
+}
+
+type CopyObjectOutput struct {
+
+ // Indicates whether the copied object uses an S3 Bucket Key for server-side
+ // encryption with AWS KMS (SSE-KMS).
+ BucketKeyEnabled bool
+
+ // Container for all response elements.
+ CopyObjectResult *types.CopyObjectResult
+
+ // Version of the copied object in the destination bucket.
+ CopySourceVersionId *string
+
+ // If the object expiration is configured, the response includes this header.
+ Expiration *string
+
+ // If present, indicates that the requester was successfully charged for the
+ // request.
+ RequestCharged types.RequestCharged
+
+ // If server-side encryption with a customer-provided encryption key was requested,
+ // the response will include this header confirming the encryption algorithm used.
+ SSECustomerAlgorithm *string
+
+ // If server-side encryption with a customer-provided encryption key was requested,
+ // the response will include this header to provide round-trip message integrity
+ // verification of the customer-provided encryption key.
+ SSECustomerKeyMD5 *string
+
+ // If present, specifies the AWS KMS Encryption Context to use for object
+ // encryption. The value of this header is a base64-encoded UTF-8 string holding
+ // JSON with the encryption context key-value pairs.
+ SSEKMSEncryptionContext *string
+
+ // If present, specifies the ID of the AWS Key Management Service (AWS KMS)
+ // symmetric customer managed customer master key (CMK) that was used for the
+ // object.
+ SSEKMSKeyId *string
+
+ // The server-side encryption algorithm used when storing this object in Amazon S3
+ // (for example, AES256, aws:kms).
+ ServerSideEncryption types.ServerSideEncryption
+
+ // Version ID of the newly created copy.
+ VersionId *string
+
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+}
+
+func addOperationCopyObjectMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ err = stack.Serialize.Add(&awsRestxml_serializeOpCopyObject{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsRestxml_deserializeOpCopyObject{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
+ return err
+ }
+ if err = addRetryMiddlewares(stack, options); err != nil {
+ return err
+ }
+ if err = addHTTPSignerV4Middleware(stack, options); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addOpCopyObjectValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opCopyObject(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addMetadataRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addCopyObjectUpdateEndpoint(stack, options); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil {
+ return err
+ }
+ if err = disableAcceptEncodingGzip(stack); err != nil {
+ return err
+ }
+ if err = s3cust.HandleResponseErrorWith200Status(stack); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ return nil
+}
+
+func newServiceMetadataMiddleware_opCopyObject(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ SigningName: "s3",
+ OperationName: "CopyObject",
+ }
+}
+
+// getCopyObjectBucketMember returns a pointer to string denoting a provided bucket
+// member valueand a boolean indicating if the input has a modeled bucket name,
+func getCopyObjectBucketMember(input interface{}) (*string, bool) {
+ in := input.(*CopyObjectInput)
+ if in.Bucket == nil {
+ return nil, false
+ }
+ return in.Bucket, true
+}
+func addCopyObjectUpdateEndpoint(stack *middleware.Stack, options Options) error {
+ return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{
+ Accessor: s3cust.UpdateEndpointParameterAccessor{
+ GetBucketFromInput: getCopyObjectBucketMember,
+ },
+ UsePathStyle: options.UsePathStyle,
+ UseAccelerate: options.UseAccelerate,
+ SupportsAccelerate: true,
+ EndpointResolver: options.EndpointResolver,
+ EndpointResolverOptions: options.EndpointOptions,
+ UseDualstack: options.UseDualstack,
+ UseARNRegion: options.UseARNRegion,
+ })
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CreateBucket.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CreateBucket.go
new file mode 100644
index 000000000..0acb6492e
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CreateBucket.go
@@ -0,0 +1,268 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package s3
+
+import (
+ "context"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
+ s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations"
+ "github.com/aws/aws-sdk-go-v2/service/s3/types"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// Creates a new S3 bucket. To create a bucket, you must register with Amazon S3
+// and have a valid AWS Access Key ID to authenticate requests. Anonymous requests
+// are never allowed to create buckets. By creating the bucket, you become the
+// bucket owner. Not every string is an acceptable bucket name. For information
+// about bucket naming restrictions, see Working with Amazon S3 buckets
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html). If you want
+// to create an Amazon S3 on Outposts bucket, see Create Bucket
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_CreateBucket.html).
+// By default, the bucket is created in the US East (N. Virginia) Region. You can
+// optionally specify a Region in the request body. You might choose a Region to
+// optimize latency, minimize costs, or address regulatory requirements. For
+// example, if you reside in Europe, you will probably find it advantageous to
+// create buckets in the Europe (Ireland) Region. For more information, see
+// Accessing a bucket
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html#access-bucket-intro).
+// If you send your create bucket request to the s3.amazonaws.com endpoint, the
+// request goes to the us-east-1 Region. Accordingly, the signature calculations in
+// Signature Version 4 must use us-east-1 as the Region, even if the location
+// constraint in the request specifies another Region where the bucket is to be
+// created. If you create a bucket in a Region other than US East (N. Virginia),
+// your application must be able to handle 307 redirect. For more information, see
+// Virtual hosting of buckets
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html). When
+// creating a bucket using this operation, you can optionally specify the accounts
+// or groups that should be granted specific permissions on the bucket. There are
+// two ways to grant the appropriate permissions using the request headers.
+//
+// *
+// Specify a canned ACL using the x-amz-acl request header. Amazon S3 supports a
+// set of predefined ACLs, known as canned ACLs. Each canned ACL has a predefined
+// set of grantees and permissions. For more information, see Canned ACL
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL).
+//
+// *
+// Specify access permissions explicitly using the x-amz-grant-read,
+// x-amz-grant-write, x-amz-grant-read-acp, x-amz-grant-write-acp, and
+// x-amz-grant-full-control headers. These headers map to the set of permissions
+// Amazon S3 supports in an ACL. For more information, see Access control list
+// (ACL) overview
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html). You specify
+// each grantee as a type=value pair, where the type is one of the following:
+//
+// * id
+// – if the value specified is the canonical user ID of an AWS account
+//
+// * uri – if
+// you are granting permissions to a predefined group
+//
+// * emailAddress – if the
+// value specified is the email address of an AWS account Using email addresses to
+// specify a grantee is only supported in the following AWS Regions:
+//
+// * US East (N.
+// Virginia)
+//
+// * US West (N. California)
+//
+// * US West (Oregon)
+//
+// * Asia Pacific
+// (Singapore)
+//
+// * Asia Pacific (Sydney)
+//
+// * Asia Pacific (Tokyo)
+//
+// * Europe
+// (Ireland)
+//
+// * South America (São Paulo)
+//
+// For a list of all the Amazon S3
+// supported Regions and endpoints, see Regions and Endpoints
+// (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) in the AWS
+// General Reference.
+//
+// For example, the following x-amz-grant-read header grants
+// the AWS accounts identified by account IDs permissions to read object data and
+// its metadata: x-amz-grant-read: id="11112222333", id="444455556666"
+//
+// You can use
+// either a canned ACL or specify access permissions explicitly. You cannot do
+// both. The following operations are related to CreateBucket:
+//
+// * PutObject
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html)
+//
+// *
+// DeleteBucket
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucket.html)
+func (c *Client) CreateBucket(ctx context.Context, params *CreateBucketInput, optFns ...func(*Options)) (*CreateBucketOutput, error) {
+ if params == nil {
+ params = &CreateBucketInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "CreateBucket", params, optFns, addOperationCreateBucketMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*CreateBucketOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type CreateBucketInput struct {
+
+ // The name of the bucket to create.
+ //
+ // This member is required.
+ Bucket *string
+
+ // The canned ACL to apply to the bucket.
+ ACL types.BucketCannedACL
+
+ // The configuration information for the bucket.
+ CreateBucketConfiguration *types.CreateBucketConfiguration
+
+ // Allows grantee the read, write, read ACP, and write ACP permissions on the
+ // bucket.
+ GrantFullControl *string
+
+ // Allows grantee to list the objects in the bucket.
+ GrantRead *string
+
+ // Allows grantee to read the bucket ACL.
+ GrantReadACP *string
+
+ // Allows grantee to create, overwrite, and delete any object in the bucket.
+ GrantWrite *string
+
+ // Allows grantee to write the ACL for the applicable bucket.
+ GrantWriteACP *string
+
+ // Specifies whether you want S3 Object Lock to be enabled for the new bucket.
+ ObjectLockEnabledForBucket bool
+}
+
+type CreateBucketOutput struct {
+
+ // Specifies the Region where the bucket will be created. If you are creating a
+ // bucket on the US East (N. Virginia) Region (us-east-1), you do not need to
+ // specify the location.
+ Location *string
+
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+}
+
+func addOperationCreateBucketMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ err = stack.Serialize.Add(&awsRestxml_serializeOpCreateBucket{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsRestxml_deserializeOpCreateBucket{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
+ return err
+ }
+ if err = addRetryMiddlewares(stack, options); err != nil {
+ return err
+ }
+ if err = addHTTPSignerV4Middleware(stack, options); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addOpCreateBucketValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opCreateBucket(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addMetadataRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addCreateBucketUpdateEndpoint(stack, options); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil {
+ return err
+ }
+ if err = disableAcceptEncodingGzip(stack); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ return nil
+}
+
+func newServiceMetadataMiddleware_opCreateBucket(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ SigningName: "s3",
+ OperationName: "CreateBucket",
+ }
+}
+
+// getCreateBucketBucketMember returns a pointer to string denoting a provided
+// bucket member valueand a boolean indicating if the input has a modeled bucket
+// name,
+func getCreateBucketBucketMember(input interface{}) (*string, bool) {
+ in := input.(*CreateBucketInput)
+ if in.Bucket == nil {
+ return nil, false
+ }
+ return in.Bucket, true
+}
+func addCreateBucketUpdateEndpoint(stack *middleware.Stack, options Options) error {
+ return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{
+ Accessor: s3cust.UpdateEndpointParameterAccessor{
+ GetBucketFromInput: getCreateBucketBucketMember,
+ },
+ UsePathStyle: options.UsePathStyle,
+ UseAccelerate: options.UseAccelerate,
+ SupportsAccelerate: false,
+ EndpointResolver: options.EndpointResolver,
+ EndpointResolverOptions: options.EndpointOptions,
+ UseDualstack: options.UseDualstack,
+ UseARNRegion: options.UseARNRegion,
+ })
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CreateMultipartUpload.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CreateMultipartUpload.go
new file mode 100644
index 000000000..d39625e0d
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CreateMultipartUpload.go
@@ -0,0 +1,575 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package s3
+
+import (
+ "context"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
+ s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations"
+ "github.com/aws/aws-sdk-go-v2/service/s3/types"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+ "time"
+)
+
+// This operation initiates a multipart upload and returns an upload ID. This
+// upload ID is used to associate all of the parts in the specific multipart
+// upload. You specify this upload ID in each of your subsequent upload part
+// requests (see UploadPart
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html)). You also
+// include this upload ID in the final request to either complete or abort the
+// multipart upload request. For more information about multipart uploads, see
+// Multipart Upload Overview
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html). If you have
+// configured a lifecycle rule to abort incomplete multipart uploads, the upload
+// must complete within the number of days specified in the bucket lifecycle
+// configuration. Otherwise, the incomplete multipart upload becomes eligible for
+// an abort operation and Amazon S3 aborts the multipart upload. For more
+// information, see Aborting Incomplete Multipart Uploads Using a Bucket Lifecycle
+// Policy
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config).
+// For information about the permissions required to use the multipart upload API,
+// see Multipart Upload API and Permissions
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html). For
+// request signing, multipart upload is just a series of regular requests. You
+// initiate a multipart upload, send one or more requests to upload parts, and then
+// complete the multipart upload process. You sign each request individually. There
+// is nothing special about signing multipart upload requests. For more information
+// about signing, see Authenticating Requests (AWS Signature Version 4)
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html).
+// After you initiate a multipart upload and upload one or more parts, to stop
+// being charged for storing the uploaded parts, you must either complete or abort
+// the multipart upload. Amazon S3 frees up the space used to store the parts and
+// stop charging you for storing them only after you either complete or abort a
+// multipart upload. You can optionally request server-side encryption. For
+// server-side encryption, Amazon S3 encrypts your data as it writes it to disks in
+// its data centers and decrypts it when you access it. You can provide your own
+// encryption key, or use AWS Key Management Service (AWS KMS) customer master keys
+// (CMKs) or Amazon S3-managed encryption keys. If you choose to provide your own
+// encryption key, the request headers you provide in UploadPart and UploadPartCopy
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html)
+// requests must match the headers you used in the request to initiate the upload
+// by using CreateMultipartUpload. To perform a multipart upload with encryption
+// using an AWS KMS CMK, the requester must have permission to the kms:Encrypt,
+// kms:Decrypt, kms:ReEncrypt*, kms:GenerateDataKey*, and kms:DescribeKey actions
+// on the key. These permissions are required because Amazon S3 must decrypt and
+// read data from the encrypted file parts before it completes the multipart
+// upload. If your AWS Identity and Access Management (IAM) user or role is in the
+// same AWS account as the AWS KMS CMK, then you must have these permissions on the
+// key policy. If your IAM user or role belongs to a different account than the
+// key, then you must have the permissions on both the key policy and your IAM user
+// or role. For more information, see Protecting Data Using Server-Side Encryption
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html).
+// Access Permissions When copying an object, you can optionally specify the
+// accounts or groups that should be granted specific permissions on the new
+// object. There are two ways to grant the permissions using the request
+// headers:
+//
+// * Specify a canned ACL with the x-amz-acl request header. For more
+// information, see Canned ACL
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL).
+//
+// *
+// Specify access permissions explicitly with the x-amz-grant-read,
+// x-amz-grant-read-acp, x-amz-grant-write-acp, and x-amz-grant-full-control
+// headers. These parameters map to the set of permissions that Amazon S3 supports
+// in an ACL. For more information, see Access Control List (ACL) Overview
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html).
+//
+// You can
+// use either a canned ACL or specify access permissions explicitly. You cannot do
+// both. Server-Side- Encryption-Specific Request Headers You can optionally tell
+// Amazon S3 to encrypt data at rest using server-side encryption. Server-side
+// encryption is for data encryption at rest. Amazon S3 encrypts your data as it
+// writes it to disks in its data centers and decrypts it when you access it. The
+// option you use depends on whether you want to use AWS managed encryption keys or
+// provide your own encryption key.
+//
+// * Use encryption keys managed by Amazon S3 or
+// customer master keys (CMKs) stored in AWS Key Management Service (AWS KMS) – If
+// you want AWS to manage the keys used to encrypt data, specify the following
+// headers in the request.
+//
+// * x-amz-server-side-encryption
+//
+// *
+// x-amz-server-side-encryption-aws-kms-key-id
+//
+// *
+// x-amz-server-side-encryption-context
+//
+// If you specify
+// x-amz-server-side-encryption:aws:kms, but don't provide
+// x-amz-server-side-encryption-aws-kms-key-id, Amazon S3 uses the AWS managed CMK
+// in AWS KMS to protect the data. All GET and PUT requests for an object protected
+// by AWS KMS fail if you don't make them with SSL or by using SigV4. For more
+// information about server-side encryption with CMKs stored in AWS KMS (SSE-KMS),
+// see Protecting Data Using Server-Side Encryption with CMKs stored in AWS KMS
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingKMSEncryption.html).
+//
+// *
+// Use customer-provided encryption keys – If you want to manage your own
+// encryption keys, provide all the following headers in the request.
+//
+// *
+// x-amz-server-side-encryption-customer-algorithm
+//
+// *
+// x-amz-server-side-encryption-customer-key
+//
+// *
+// x-amz-server-side-encryption-customer-key-MD5
+//
+// For more information about
+// server-side encryption with CMKs stored in AWS KMS (SSE-KMS), see Protecting
+// Data Using Server-Side Encryption with CMKs stored in AWS KMS
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingKMSEncryption.html).
+//
+// Access-Control-List
+// (ACL)-Specific Request Headers You also can use the following access
+// control–related headers with this operation. By default, all objects are
+// private. Only the owner has full access control. When adding a new object, you
+// can grant permissions to individual AWS accounts or to predefined groups defined
+// by Amazon S3. These permissions are then added to the access control list (ACL)
+// on the object. For more information, see Using ACLs
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3_ACLs_UsingACLs.html). With
+// this operation, you can grant access permissions using one of the following two
+// methods:
+//
+// * Specify a canned ACL (x-amz-acl) — Amazon S3 supports a set of
+// predefined ACLs, known as canned ACLs. Each canned ACL has a predefined set of
+// grantees and permissions. For more information, see Canned ACL
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL).
+//
+// *
+// Specify access permissions explicitly — To explicitly grant access permissions
+// to specific AWS accounts or groups, use the following headers. Each header maps
+// to specific permissions that Amazon S3 supports in an ACL. For more information,
+// see Access Control List (ACL) Overview
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html). In the
+// header, you specify a list of grantees who get the specific permission. To grant
+// permissions explicitly, use:
+//
+// * x-amz-grant-read
+//
+// * x-amz-grant-write
+//
+// *
+// x-amz-grant-read-acp
+//
+// * x-amz-grant-write-acp
+//
+// * x-amz-grant-full-control
+//
+// You
+// specify each grantee as a type=value pair, where the type is one of the
+// following:
+//
+// * id – if the value specified is the canonical user ID of an AWS
+// account
+//
+// * uri – if you are granting permissions to a predefined group
+//
+// *
+// emailAddress – if the value specified is the email address of an AWS account
+// Using email addresses to specify a grantee is only supported in the following
+// AWS Regions:
+//
+// * US East (N. Virginia)
+//
+// * US West (N. California)
+//
+// * US West
+// (Oregon)
+//
+// * Asia Pacific (Singapore)
+//
+// * Asia Pacific (Sydney)
+//
+// * Asia Pacific
+// (Tokyo)
+//
+// * Europe (Ireland)
+//
+// * South America (São Paulo)
+//
+// For a list of all the
+// Amazon S3 supported Regions and endpoints, see Regions and Endpoints
+// (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) in the AWS
+// General Reference.
+//
+// For example, the following x-amz-grant-read header grants
+// the AWS accounts identified by account IDs permissions to read object data and
+// its metadata: x-amz-grant-read: id="11112222333", id="444455556666"
+//
+// The
+// following operations are related to CreateMultipartUpload:
+//
+// * UploadPart
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html)
+//
+// *
+// CompleteMultipartUpload
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html)
+//
+// *
+// AbortMultipartUpload
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html)
+//
+// *
+// ListParts
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html)
+//
+// *
+// ListMultipartUploads
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html)
+func (c *Client) CreateMultipartUpload(ctx context.Context, params *CreateMultipartUploadInput, optFns ...func(*Options)) (*CreateMultipartUploadOutput, error) {
+ if params == nil {
+ params = &CreateMultipartUploadInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "CreateMultipartUpload", params, optFns, addOperationCreateMultipartUploadMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*CreateMultipartUploadOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type CreateMultipartUploadInput struct {
+
+ // The name of the bucket to which to initiate the upload When using this API with
+ // an access point, you must direct requests to the access point hostname. The
+ // access point hostname takes the form
+ // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this
+ // operation with an access point through the AWS SDKs, you provide the access
+ // point ARN in place of the bucket name. For more information about access point
+ // ARNs, see Using Access Points
+ // (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) in
+ // the Amazon Simple Storage Service Developer Guide. When using this API with
+ // Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname.
+ // The S3 on Outposts hostname takes the form
+ // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using
+ // this operation using S3 on Outposts through the AWS SDKs, you provide the
+ // Outposts bucket ARN in place of the bucket name. For more information about S3
+ // on Outposts ARNs, see Using S3 on Outposts
+ // (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html) in the
+ // Amazon Simple Storage Service Developer Guide.
+ //
+ // This member is required.
+ Bucket *string
+
+ // Object key for which the multipart upload is to be initiated.
+ //
+ // This member is required.
+ Key *string
+
+ // The canned ACL to apply to the object. This action is not supported by Amazon S3
+ // on Outposts.
+ ACL types.ObjectCannedACL
+
+ // Specifies whether Amazon S3 should use an S3 Bucket Key for object encryption
+ // with server-side encryption using AWS KMS (SSE-KMS). Setting this header to true
+ // causes Amazon S3 to use an S3 Bucket Key for object encryption with SSE-KMS.
+ // Specifying this header with an object operation doesn’t affect bucket-level
+ // settings for S3 Bucket Key.
+ BucketKeyEnabled bool
+
+ // Specifies caching behavior along the request/reply chain.
+ CacheControl *string
+
+ // Specifies presentational information for the object.
+ ContentDisposition *string
+
+ // Specifies what content encodings have been applied to the object and thus what
+ // decoding mechanisms must be applied to obtain the media-type referenced by the
+ // Content-Type header field.
+ ContentEncoding *string
+
+ // The language the content is in.
+ ContentLanguage *string
+
+ // A standard MIME type describing the format of the object data.
+ ContentType *string
+
+ // The account id of the expected bucket owner. If the bucket is owned by a
+ // different account, the request will fail with an HTTP 403 (Access Denied) error.
+ ExpectedBucketOwner *string
+
+ // The date and time at which the object is no longer cacheable.
+ Expires *time.Time
+
+ // Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object. This
+ // action is not supported by Amazon S3 on Outposts.
+ GrantFullControl *string
+
+ // Allows grantee to read the object data and its metadata. This action is not
+ // supported by Amazon S3 on Outposts.
+ GrantRead *string
+
+ // Allows grantee to read the object ACL. This action is not supported by Amazon S3
+ // on Outposts.
+ GrantReadACP *string
+
+ // Allows grantee to write the ACL for the applicable object. This action is not
+ // supported by Amazon S3 on Outposts.
+ GrantWriteACP *string
+
+ // A map of metadata to store with the object in S3.
+ Metadata map[string]string
+
+ // Specifies whether you want to apply a Legal Hold to the uploaded object.
+ ObjectLockLegalHoldStatus types.ObjectLockLegalHoldStatus
+
+ // Specifies the Object Lock mode that you want to apply to the uploaded object.
+ ObjectLockMode types.ObjectLockMode
+
+ // Specifies the date and time when you want the Object Lock to expire.
+ ObjectLockRetainUntilDate *time.Time
+
+ // Confirms that the requester knows that they will be charged for the request.
+ // Bucket owners need not specify this parameter in their requests. For information
+ // about downloading objects from requester pays buckets, see Downloading Objects
+ // in Requestor Pays Buckets
+ // (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html)
+ // in the Amazon S3 Developer Guide.
+ RequestPayer types.RequestPayer
+
+ // Specifies the algorithm to use to when encrypting the object (for example,
+ // AES256).
+ SSECustomerAlgorithm *string
+
+ // Specifies the customer-provided encryption key for Amazon S3 to use in
+ // encrypting data. This value is used to store the object and then it is
+ // discarded; Amazon S3 does not store the encryption key. The key must be
+ // appropriate for use with the algorithm specified in the
+ // x-amz-server-side-encryption-customer-algorithm header.
+ SSECustomerKey *string
+
+ // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321.
+ // Amazon S3 uses this header for a message integrity check to ensure that the
+ // encryption key was transmitted without error.
+ SSECustomerKeyMD5 *string
+
+ // Specifies the AWS KMS Encryption Context to use for object encryption. The value
+ // of this header is a base64-encoded UTF-8 string holding JSON with the encryption
+ // context key-value pairs.
+ SSEKMSEncryptionContext *string
+
+ // Specifies the ID of the symmetric customer managed AWS KMS CMK to use for object
+ // encryption. All GET and PUT requests for an object protected by AWS KMS will
+ // fail if not made via SSL or using SigV4. For information about configuring using
+ // any of the officially supported AWS SDKs and AWS CLI, see Specifying the
+ // Signature Version in Request Authentication
+ // (https://docs.aws.amazon.com/http:/docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version)
+ // in the Amazon S3 Developer Guide.
+ SSEKMSKeyId *string
+
+ // The server-side encryption algorithm used when storing this object in Amazon S3
+ // (for example, AES256, aws:kms).
+ ServerSideEncryption types.ServerSideEncryption
+
+ // By default, Amazon S3 uses the STANDARD Storage Class to store newly created
+ // objects. The STANDARD storage class provides high durability and high
+ // availability. Depending on performance needs, you can specify a different
+ // Storage Class. Amazon S3 on Outposts only uses the OUTPOSTS Storage Class. For
+ // more information, see Storage Classes
+ // (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html) in
+ // the Amazon S3 Service Developer Guide.
+ StorageClass types.StorageClass
+
+ // The tag-set for the object. The tag-set must be encoded as URL Query parameters.
+ Tagging *string
+
+ // If the bucket is configured as a website, redirects requests for this object to
+ // another object in the same bucket or to an external URL. Amazon S3 stores the
+ // value of this header in the object metadata.
+ WebsiteRedirectLocation *string
+}
+
+type CreateMultipartUploadOutput struct {
+
+ // If the bucket has a lifecycle rule configured with an action to abort incomplete
+ // multipart uploads and the prefix in the lifecycle rule matches the object name
+ // in the request, the response includes this header. The header indicates when the
+ // initiated multipart upload becomes eligible for an abort operation. For more
+ // information, see Aborting Incomplete Multipart Uploads Using a Bucket Lifecycle
+ // Policy
+ // (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config).
+ // The response also includes the x-amz-abort-rule-id header that provides the ID
+ // of the lifecycle configuration rule that defines this action.
+ AbortDate *time.Time
+
+ // This header is returned along with the x-amz-abort-date header. It identifies
+ // the applicable lifecycle configuration rule that defines the action to abort
+ // incomplete multipart uploads.
+ AbortRuleId *string
+
+ // The name of the bucket to which the multipart upload was initiated. When using
+ // this API with an access point, you must direct requests to the access point
+ // hostname. The access point hostname takes the form
+ // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this
+ // operation with an access point through the AWS SDKs, you provide the access
+ // point ARN in place of the bucket name. For more information about access point
+ // ARNs, see Using Access Points
+ // (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) in
+ // the Amazon Simple Storage Service Developer Guide. When using this API with
+ // Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname.
+ // The S3 on Outposts hostname takes the form
+ // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using
+ // this operation using S3 on Outposts through the AWS SDKs, you provide the
+ // Outposts bucket ARN in place of the bucket name. For more information about S3
+ // on Outposts ARNs, see Using S3 on Outposts
+ // (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html) in the
+ // Amazon Simple Storage Service Developer Guide.
+ Bucket *string
+
+ // Indicates whether the multipart upload uses an S3 Bucket Key for server-side
+ // encryption with AWS KMS (SSE-KMS).
+ BucketKeyEnabled bool
+
+ // Object key for which the multipart upload was initiated.
+ Key *string
+
+ // If present, indicates that the requester was successfully charged for the
+ // request.
+ RequestCharged types.RequestCharged
+
+ // If server-side encryption with a customer-provided encryption key was requested,
+ // the response will include this header confirming the encryption algorithm used.
+ SSECustomerAlgorithm *string
+
+ // If server-side encryption with a customer-provided encryption key was requested,
+ // the response will include this header to provide round-trip message integrity
+ // verification of the customer-provided encryption key.
+ SSECustomerKeyMD5 *string
+
+ // If present, specifies the AWS KMS Encryption Context to use for object
+ // encryption. The value of this header is a base64-encoded UTF-8 string holding
+ // JSON with the encryption context key-value pairs.
+ SSEKMSEncryptionContext *string
+
+ // If present, specifies the ID of the AWS Key Management Service (AWS KMS)
+ // symmetric customer managed customer master key (CMK) that was used for the
+ // object.
+ SSEKMSKeyId *string
+
+ // The server-side encryption algorithm used when storing this object in Amazon S3
+ // (for example, AES256, aws:kms).
+ ServerSideEncryption types.ServerSideEncryption
+
+ // ID for the initiated multipart upload.
+ UploadId *string
+
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+}
+
+func addOperationCreateMultipartUploadMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ err = stack.Serialize.Add(&awsRestxml_serializeOpCreateMultipartUpload{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsRestxml_deserializeOpCreateMultipartUpload{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
+ return err
+ }
+ if err = addRetryMiddlewares(stack, options); err != nil {
+ return err
+ }
+ if err = addHTTPSignerV4Middleware(stack, options); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addOpCreateMultipartUploadValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opCreateMultipartUpload(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addMetadataRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addCreateMultipartUploadUpdateEndpoint(stack, options); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil {
+ return err
+ }
+ if err = disableAcceptEncodingGzip(stack); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ return nil
+}
+
+func newServiceMetadataMiddleware_opCreateMultipartUpload(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ SigningName: "s3",
+ OperationName: "CreateMultipartUpload",
+ }
+}
+
+// getCreateMultipartUploadBucketMember returns a pointer to string denoting a
+// provided bucket member valueand a boolean indicating if the input has a modeled
+// bucket name,
+func getCreateMultipartUploadBucketMember(input interface{}) (*string, bool) {
+ in := input.(*CreateMultipartUploadInput)
+ if in.Bucket == nil {
+ return nil, false
+ }
+ return in.Bucket, true
+}
+func addCreateMultipartUploadUpdateEndpoint(stack *middleware.Stack, options Options) error {
+ return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{
+ Accessor: s3cust.UpdateEndpointParameterAccessor{
+ GetBucketFromInput: getCreateMultipartUploadBucketMember,
+ },
+ UsePathStyle: options.UsePathStyle,
+ UseAccelerate: options.UseAccelerate,
+ SupportsAccelerate: true,
+ EndpointResolver: options.EndpointResolver,
+ EndpointResolverOptions: options.EndpointOptions,
+ UseDualstack: options.UseDualstack,
+ UseARNRegion: options.UseARNRegion,
+ })
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucket.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucket.go
new file mode 100644
index 000000000..cd1eee14c
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucket.go
@@ -0,0 +1,160 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package s3
+
+import (
+ "context"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
+ s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// Deletes the S3 bucket. All objects (including all object versions and delete
+// markers) in the bucket must be deleted before the bucket itself can be deleted.
+// Related Resources
+//
+// * CreateBucket
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html)
+//
+// *
+// DeleteObject
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html)
+func (c *Client) DeleteBucket(ctx context.Context, params *DeleteBucketInput, optFns ...func(*Options)) (*DeleteBucketOutput, error) {
+ if params == nil {
+ params = &DeleteBucketInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "DeleteBucket", params, optFns, addOperationDeleteBucketMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*DeleteBucketOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type DeleteBucketInput struct {
+
+ // Specifies the bucket being deleted.
+ //
+ // This member is required.
+ Bucket *string
+
+ // The account id of the expected bucket owner. If the bucket is owned by a
+ // different account, the request will fail with an HTTP 403 (Access Denied) error.
+ ExpectedBucketOwner *string
+}
+
+type DeleteBucketOutput struct {
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+}
+
+func addOperationDeleteBucketMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ err = stack.Serialize.Add(&awsRestxml_serializeOpDeleteBucket{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsRestxml_deserializeOpDeleteBucket{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
+ return err
+ }
+ if err = addRetryMiddlewares(stack, options); err != nil {
+ return err
+ }
+ if err = addHTTPSignerV4Middleware(stack, options); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addOpDeleteBucketValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDeleteBucket(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addMetadataRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addDeleteBucketUpdateEndpoint(stack, options); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil {
+ return err
+ }
+ if err = disableAcceptEncodingGzip(stack); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ return nil
+}
+
+func newServiceMetadataMiddleware_opDeleteBucket(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ SigningName: "s3",
+ OperationName: "DeleteBucket",
+ }
+}
+
+// getDeleteBucketBucketMember returns a pointer to string denoting a provided
+// bucket member valueand a boolean indicating if the input has a modeled bucket
+// name,
+func getDeleteBucketBucketMember(input interface{}) (*string, bool) {
+ in := input.(*DeleteBucketInput)
+ if in.Bucket == nil {
+ return nil, false
+ }
+ return in.Bucket, true
+}
+func addDeleteBucketUpdateEndpoint(stack *middleware.Stack, options Options) error {
+ return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{
+ Accessor: s3cust.UpdateEndpointParameterAccessor{
+ GetBucketFromInput: getDeleteBucketBucketMember,
+ },
+ UsePathStyle: options.UsePathStyle,
+ UseAccelerate: options.UseAccelerate,
+ SupportsAccelerate: false,
+ EndpointResolver: options.EndpointResolver,
+ EndpointResolverOptions: options.EndpointOptions,
+ UseDualstack: options.UseDualstack,
+ UseARNRegion: options.UseARNRegion,
+ })
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketAnalyticsConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketAnalyticsConfiguration.go
new file mode 100644
index 000000000..77260700a
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketAnalyticsConfiguration.go
@@ -0,0 +1,180 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package s3
+
+import (
+ "context"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
+ s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// Deletes an analytics configuration for the bucket (specified by the analytics
+// configuration ID). To use this operation, you must have permissions to perform
+// the s3:PutAnalyticsConfiguration action. The bucket owner has this permission by
+// default. The bucket owner can grant this permission to others. For more
+// information about permissions, see Permissions Related to Bucket Subresource
+// Operations
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources)
+// and Managing Access Permissions to Your Amazon S3 Resources
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). For
+// information about the Amazon S3 analytics feature, see Amazon S3 Analytics –
+// Storage Class Analysis
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/analytics-storage-class.html).
+// The following operations are related to DeleteBucketAnalyticsConfiguration:
+//
+// *
+// GetBucketAnalyticsConfiguration
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketAnalyticsConfiguration.html)
+//
+// *
+// ListBucketAnalyticsConfigurations
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketAnalyticsConfigurations.html)
+//
+// *
+// PutBucketAnalyticsConfiguration
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketAnalyticsConfiguration.html)
+func (c *Client) DeleteBucketAnalyticsConfiguration(ctx context.Context, params *DeleteBucketAnalyticsConfigurationInput, optFns ...func(*Options)) (*DeleteBucketAnalyticsConfigurationOutput, error) {
+ if params == nil {
+ params = &DeleteBucketAnalyticsConfigurationInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "DeleteBucketAnalyticsConfiguration", params, optFns, addOperationDeleteBucketAnalyticsConfigurationMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*DeleteBucketAnalyticsConfigurationOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type DeleteBucketAnalyticsConfigurationInput struct {
+
+ // The name of the bucket from which an analytics configuration is deleted.
+ //
+ // This member is required.
+ Bucket *string
+
+ // The ID that identifies the analytics configuration.
+ //
+ // This member is required.
+ Id *string
+
+ // The account id of the expected bucket owner. If the bucket is owned by a
+ // different account, the request will fail with an HTTP 403 (Access Denied) error.
+ ExpectedBucketOwner *string
+}
+
+type DeleteBucketAnalyticsConfigurationOutput struct {
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+}
+
+func addOperationDeleteBucketAnalyticsConfigurationMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ err = stack.Serialize.Add(&awsRestxml_serializeOpDeleteBucketAnalyticsConfiguration{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsRestxml_deserializeOpDeleteBucketAnalyticsConfiguration{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
+ return err
+ }
+ if err = addRetryMiddlewares(stack, options); err != nil {
+ return err
+ }
+ if err = addHTTPSignerV4Middleware(stack, options); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addOpDeleteBucketAnalyticsConfigurationValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDeleteBucketAnalyticsConfiguration(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addMetadataRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addDeleteBucketAnalyticsConfigurationUpdateEndpoint(stack, options); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil {
+ return err
+ }
+ if err = disableAcceptEncodingGzip(stack); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ return nil
+}
+
+func newServiceMetadataMiddleware_opDeleteBucketAnalyticsConfiguration(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ SigningName: "s3",
+ OperationName: "DeleteBucketAnalyticsConfiguration",
+ }
+}
+
+// getDeleteBucketAnalyticsConfigurationBucketMember returns a pointer to string
+// denoting a provided bucket member valueand a boolean indicating if the input has
+// a modeled bucket name,
+func getDeleteBucketAnalyticsConfigurationBucketMember(input interface{}) (*string, bool) {
+ in := input.(*DeleteBucketAnalyticsConfigurationInput)
+ if in.Bucket == nil {
+ return nil, false
+ }
+ return in.Bucket, true
+}
+func addDeleteBucketAnalyticsConfigurationUpdateEndpoint(stack *middleware.Stack, options Options) error {
+ return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{
+ Accessor: s3cust.UpdateEndpointParameterAccessor{
+ GetBucketFromInput: getDeleteBucketAnalyticsConfigurationBucketMember,
+ },
+ UsePathStyle: options.UsePathStyle,
+ UseAccelerate: options.UseAccelerate,
+ SupportsAccelerate: true,
+ EndpointResolver: options.EndpointResolver,
+ EndpointResolverOptions: options.EndpointOptions,
+ UseDualstack: options.UseDualstack,
+ UseARNRegion: options.UseARNRegion,
+ })
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketCors.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketCors.go
new file mode 100644
index 000000000..49eea49b2
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketCors.go
@@ -0,0 +1,163 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package s3
+
+import (
+ "context"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
+ s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// Deletes the cors configuration information set for the bucket. To use this
+// operation, you must have permission to perform the s3:PutBucketCORS action. The
+// bucket owner has this permission by default and can grant this permission to
+// others. For information about cors, see Enabling Cross-Origin Resource Sharing
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html) in the Amazon Simple
+// Storage Service Developer Guide. Related Resources:
+//
+// * PutBucketCors
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketCors.html)
+//
+// *
+// RESTOPTIONSobject
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTOPTIONSobject.html)
+func (c *Client) DeleteBucketCors(ctx context.Context, params *DeleteBucketCorsInput, optFns ...func(*Options)) (*DeleteBucketCorsOutput, error) {
+ if params == nil {
+ params = &DeleteBucketCorsInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "DeleteBucketCors", params, optFns, addOperationDeleteBucketCorsMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*DeleteBucketCorsOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type DeleteBucketCorsInput struct {
+
+ // Specifies the bucket whose cors configuration is being deleted.
+ //
+ // This member is required.
+ Bucket *string
+
+ // The account id of the expected bucket owner. If the bucket is owned by a
+ // different account, the request will fail with an HTTP 403 (Access Denied) error.
+ ExpectedBucketOwner *string
+}
+
+type DeleteBucketCorsOutput struct {
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+}
+
+func addOperationDeleteBucketCorsMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ err = stack.Serialize.Add(&awsRestxml_serializeOpDeleteBucketCors{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsRestxml_deserializeOpDeleteBucketCors{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
+ return err
+ }
+ if err = addRetryMiddlewares(stack, options); err != nil {
+ return err
+ }
+ if err = addHTTPSignerV4Middleware(stack, options); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addOpDeleteBucketCorsValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDeleteBucketCors(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addMetadataRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addDeleteBucketCorsUpdateEndpoint(stack, options); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil {
+ return err
+ }
+ if err = disableAcceptEncodingGzip(stack); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ return nil
+}
+
+func newServiceMetadataMiddleware_opDeleteBucketCors(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ SigningName: "s3",
+ OperationName: "DeleteBucketCors",
+ }
+}
+
+// getDeleteBucketCorsBucketMember returns a pointer to string denoting a provided
+// bucket member valueand a boolean indicating if the input has a modeled bucket
+// name,
+func getDeleteBucketCorsBucketMember(input interface{}) (*string, bool) {
+ in := input.(*DeleteBucketCorsInput)
+ if in.Bucket == nil {
+ return nil, false
+ }
+ return in.Bucket, true
+}
+func addDeleteBucketCorsUpdateEndpoint(stack *middleware.Stack, options Options) error {
+ return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{
+ Accessor: s3cust.UpdateEndpointParameterAccessor{
+ GetBucketFromInput: getDeleteBucketCorsBucketMember,
+ },
+ UsePathStyle: options.UsePathStyle,
+ UseAccelerate: options.UseAccelerate,
+ SupportsAccelerate: true,
+ EndpointResolver: options.EndpointResolver,
+ EndpointResolverOptions: options.EndpointOptions,
+ UseDualstack: options.UseDualstack,
+ UseARNRegion: options.UseARNRegion,
+ })
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketEncryption.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketEncryption.go
new file mode 100644
index 000000000..45dc15fab
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketEncryption.go
@@ -0,0 +1,172 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package s3
+
+import (
+ "context"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
+ s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// This implementation of the DELETE operation removes default encryption from the
+// bucket. For information about the Amazon S3 default encryption feature, see
+// Amazon S3 Default Bucket Encryption
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html) in the
+// Amazon Simple Storage Service Developer Guide. To use this operation, you must
+// have permissions to perform the s3:PutEncryptionConfiguration action. The bucket
+// owner has this permission by default. The bucket owner can grant this permission
+// to others. For more information about permissions, see Permissions Related to
+// Bucket Subresource Operations
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources)
+// and Managing Access Permissions to your Amazon S3 Resources
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html) in the
+// Amazon Simple Storage Service Developer Guide. Related Resources
+//
+// *
+// PutBucketEncryption
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketEncryption.html)
+//
+// *
+// GetBucketEncryption
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketEncryption.html)
+func (c *Client) DeleteBucketEncryption(ctx context.Context, params *DeleteBucketEncryptionInput, optFns ...func(*Options)) (*DeleteBucketEncryptionOutput, error) {
+ if params == nil {
+ params = &DeleteBucketEncryptionInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "DeleteBucketEncryption", params, optFns, addOperationDeleteBucketEncryptionMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*DeleteBucketEncryptionOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type DeleteBucketEncryptionInput struct {
+
+ // The name of the bucket containing the server-side encryption configuration to
+ // delete.
+ //
+ // This member is required.
+ Bucket *string
+
+ // The account id of the expected bucket owner. If the bucket is owned by a
+ // different account, the request will fail with an HTTP 403 (Access Denied) error.
+ ExpectedBucketOwner *string
+}
+
+type DeleteBucketEncryptionOutput struct {
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+}
+
+func addOperationDeleteBucketEncryptionMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ err = stack.Serialize.Add(&awsRestxml_serializeOpDeleteBucketEncryption{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsRestxml_deserializeOpDeleteBucketEncryption{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
+ return err
+ }
+ if err = addRetryMiddlewares(stack, options); err != nil {
+ return err
+ }
+ if err = addHTTPSignerV4Middleware(stack, options); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addOpDeleteBucketEncryptionValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDeleteBucketEncryption(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addMetadataRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addDeleteBucketEncryptionUpdateEndpoint(stack, options); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil {
+ return err
+ }
+ if err = disableAcceptEncodingGzip(stack); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ return nil
+}
+
+func newServiceMetadataMiddleware_opDeleteBucketEncryption(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ SigningName: "s3",
+ OperationName: "DeleteBucketEncryption",
+ }
+}
+
+// getDeleteBucketEncryptionBucketMember returns a pointer to string denoting a
+// provided bucket member valueand a boolean indicating if the input has a modeled
+// bucket name,
+func getDeleteBucketEncryptionBucketMember(input interface{}) (*string, bool) {
+ in := input.(*DeleteBucketEncryptionInput)
+ if in.Bucket == nil {
+ return nil, false
+ }
+ return in.Bucket, true
+}
+func addDeleteBucketEncryptionUpdateEndpoint(stack *middleware.Stack, options Options) error {
+ return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{
+ Accessor: s3cust.UpdateEndpointParameterAccessor{
+ GetBucketFromInput: getDeleteBucketEncryptionBucketMember,
+ },
+ UsePathStyle: options.UsePathStyle,
+ UseAccelerate: options.UseAccelerate,
+ SupportsAccelerate: true,
+ EndpointResolver: options.EndpointResolver,
+ EndpointResolverOptions: options.EndpointOptions,
+ UseDualstack: options.UseDualstack,
+ UseARNRegion: options.UseARNRegion,
+ })
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketIntelligentTieringConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketIntelligentTieringConfiguration.go
new file mode 100644
index 000000000..167f0fa57
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketIntelligentTieringConfiguration.go
@@ -0,0 +1,179 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package s3
+
+import (
+ "context"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
+ s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// Deletes the S3 Intelligent-Tiering configuration from the specified bucket. The
+// S3 Intelligent-Tiering storage class is designed to optimize storage costs by
+// automatically moving data to the most cost-effective storage access tier,
+// without additional operational overhead. S3 Intelligent-Tiering delivers
+// automatic cost savings by moving data between access tiers, when access patterns
+// change. The S3 Intelligent-Tiering storage class is suitable for objects larger
+// than 128 KB that you plan to store for at least 30 days. If the size of an
+// object is less than 128 KB, it is not eligible for auto-tiering. Smaller objects
+// can be stored, but they are always charged at the frequent access tier rates in
+// the S3 Intelligent-Tiering storage class. If you delete an object before the end
+// of the 30-day minimum storage duration period, you are charged for 30 days. For
+// more information, see Storage class for automatically optimizing frequently and
+// infrequently accessed objects
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html#sc-dynamic-data-access).
+// Operations related to DeleteBucketIntelligentTieringConfiguration include:
+//
+// *
+// GetBucketIntelligentTieringConfiguration
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketIntelligentTieringConfiguration.html)
+//
+// *
+// PutBucketIntelligentTieringConfiguration
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketIntelligentTieringConfiguration.html)
+//
+// *
+// ListBucketIntelligentTieringConfigurations
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketIntelligentTieringConfigurations.html)
+func (c *Client) DeleteBucketIntelligentTieringConfiguration(ctx context.Context, params *DeleteBucketIntelligentTieringConfigurationInput, optFns ...func(*Options)) (*DeleteBucketIntelligentTieringConfigurationOutput, error) {
+ if params == nil {
+ params = &DeleteBucketIntelligentTieringConfigurationInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "DeleteBucketIntelligentTieringConfiguration", params, optFns, addOperationDeleteBucketIntelligentTieringConfigurationMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*DeleteBucketIntelligentTieringConfigurationOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type DeleteBucketIntelligentTieringConfigurationInput struct {
+
+ // The name of the Amazon S3 bucket whose configuration you want to modify or
+ // retrieve.
+ //
+ // This member is required.
+ Bucket *string
+
+ // The ID used to identify the S3 Intelligent-Tiering configuration.
+ //
+ // This member is required.
+ Id *string
+}
+
+type DeleteBucketIntelligentTieringConfigurationOutput struct {
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+}
+
+func addOperationDeleteBucketIntelligentTieringConfigurationMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ err = stack.Serialize.Add(&awsRestxml_serializeOpDeleteBucketIntelligentTieringConfiguration{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsRestxml_deserializeOpDeleteBucketIntelligentTieringConfiguration{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
+ return err
+ }
+ if err = addRetryMiddlewares(stack, options); err != nil {
+ return err
+ }
+ if err = addHTTPSignerV4Middleware(stack, options); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addOpDeleteBucketIntelligentTieringConfigurationValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDeleteBucketIntelligentTieringConfiguration(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addMetadataRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addDeleteBucketIntelligentTieringConfigurationUpdateEndpoint(stack, options); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil {
+ return err
+ }
+ if err = disableAcceptEncodingGzip(stack); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ return nil
+}
+
+func newServiceMetadataMiddleware_opDeleteBucketIntelligentTieringConfiguration(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ SigningName: "s3",
+ OperationName: "DeleteBucketIntelligentTieringConfiguration",
+ }
+}
+
+// getDeleteBucketIntelligentTieringConfigurationBucketMember returns a pointer to
+// string denoting a provided bucket member valueand a boolean indicating if the
+// input has a modeled bucket name,
+func getDeleteBucketIntelligentTieringConfigurationBucketMember(input interface{}) (*string, bool) {
+ in := input.(*DeleteBucketIntelligentTieringConfigurationInput)
+ if in.Bucket == nil {
+ return nil, false
+ }
+ return in.Bucket, true
+}
+func addDeleteBucketIntelligentTieringConfigurationUpdateEndpoint(stack *middleware.Stack, options Options) error {
+ return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{
+ Accessor: s3cust.UpdateEndpointParameterAccessor{
+ GetBucketFromInput: getDeleteBucketIntelligentTieringConfigurationBucketMember,
+ },
+ UsePathStyle: options.UsePathStyle,
+ UseAccelerate: options.UseAccelerate,
+ SupportsAccelerate: true,
+ EndpointResolver: options.EndpointResolver,
+ EndpointResolverOptions: options.EndpointOptions,
+ UseDualstack: options.UseDualstack,
+ UseARNRegion: options.UseARNRegion,
+ })
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketInventoryConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketInventoryConfiguration.go
new file mode 100644
index 000000000..0eec2938d
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketInventoryConfiguration.go
@@ -0,0 +1,179 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package s3
+
+import (
+ "context"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
+ s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// Deletes an inventory configuration (identified by the inventory ID) from the
+// bucket. To use this operation, you must have permissions to perform the
+// s3:PutInventoryConfiguration action. The bucket owner has this permission by
+// default. The bucket owner can grant this permission to others. For more
+// information about permissions, see Permissions Related to Bucket Subresource
+// Operations
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources)
+// and Managing Access Permissions to Your Amazon S3 Resources
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). For
+// information about the Amazon S3 inventory feature, see Amazon S3 Inventory
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-inventory.html).
+// Operations related to DeleteBucketInventoryConfiguration include:
+//
+// *
+// GetBucketInventoryConfiguration
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketInventoryConfiguration.html)
+//
+// *
+// PutBucketInventoryConfiguration
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketInventoryConfiguration.html)
+//
+// *
+// ListBucketInventoryConfigurations
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketInventoryConfigurations.html)
+func (c *Client) DeleteBucketInventoryConfiguration(ctx context.Context, params *DeleteBucketInventoryConfigurationInput, optFns ...func(*Options)) (*DeleteBucketInventoryConfigurationOutput, error) {
+ if params == nil {
+ params = &DeleteBucketInventoryConfigurationInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "DeleteBucketInventoryConfiguration", params, optFns, addOperationDeleteBucketInventoryConfigurationMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*DeleteBucketInventoryConfigurationOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type DeleteBucketInventoryConfigurationInput struct {
+
+ // The name of the bucket containing the inventory configuration to delete.
+ //
+ // This member is required.
+ Bucket *string
+
+ // The ID used to identify the inventory configuration.
+ //
+ // This member is required.
+ Id *string
+
+ // The account id of the expected bucket owner. If the bucket is owned by a
+ // different account, the request will fail with an HTTP 403 (Access Denied) error.
+ ExpectedBucketOwner *string
+}
+
+type DeleteBucketInventoryConfigurationOutput struct {
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+}
+
+func addOperationDeleteBucketInventoryConfigurationMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ err = stack.Serialize.Add(&awsRestxml_serializeOpDeleteBucketInventoryConfiguration{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsRestxml_deserializeOpDeleteBucketInventoryConfiguration{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
+ return err
+ }
+ if err = addRetryMiddlewares(stack, options); err != nil {
+ return err
+ }
+ if err = addHTTPSignerV4Middleware(stack, options); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addOpDeleteBucketInventoryConfigurationValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDeleteBucketInventoryConfiguration(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addMetadataRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addDeleteBucketInventoryConfigurationUpdateEndpoint(stack, options); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil {
+ return err
+ }
+ if err = disableAcceptEncodingGzip(stack); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ return nil
+}
+
+func newServiceMetadataMiddleware_opDeleteBucketInventoryConfiguration(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ SigningName: "s3",
+ OperationName: "DeleteBucketInventoryConfiguration",
+ }
+}
+
+// getDeleteBucketInventoryConfigurationBucketMember returns a pointer to string
+// denoting a provided bucket member valueand a boolean indicating if the input has
+// a modeled bucket name,
+func getDeleteBucketInventoryConfigurationBucketMember(input interface{}) (*string, bool) {
+ in := input.(*DeleteBucketInventoryConfigurationInput)
+ if in.Bucket == nil {
+ return nil, false
+ }
+ return in.Bucket, true
+}
+func addDeleteBucketInventoryConfigurationUpdateEndpoint(stack *middleware.Stack, options Options) error {
+ return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{
+ Accessor: s3cust.UpdateEndpointParameterAccessor{
+ GetBucketFromInput: getDeleteBucketInventoryConfigurationBucketMember,
+ },
+ UsePathStyle: options.UsePathStyle,
+ UseAccelerate: options.UseAccelerate,
+ SupportsAccelerate: true,
+ EndpointResolver: options.EndpointResolver,
+ EndpointResolverOptions: options.EndpointOptions,
+ UseDualstack: options.UseDualstack,
+ UseARNRegion: options.UseARNRegion,
+ })
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketLifecycle.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketLifecycle.go
new file mode 100644
index 000000000..d68b5197f
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketLifecycle.go
@@ -0,0 +1,169 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package s3
+
+import (
+ "context"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
+ s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// Deletes the lifecycle configuration from the specified bucket. Amazon S3 removes
+// all the lifecycle configuration rules in the lifecycle subresource associated
+// with the bucket. Your objects never expire, and Amazon S3 no longer
+// automatically deletes any objects on the basis of rules contained in the deleted
+// lifecycle configuration. To use this operation, you must have permission to
+// perform the s3:PutLifecycleConfiguration action. By default, the bucket owner
+// has this permission and the bucket owner can grant this permission to others.
+// There is usually some time lag before lifecycle configuration deletion is fully
+// propagated to all the Amazon S3 systems. For more information about the object
+// expiration, see Elements to Describe Lifecycle Actions
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/intro-lifecycle-rules.html#intro-lifecycle-rules-actions).
+// Related actions include:
+//
+// * PutBucketLifecycleConfiguration
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html)
+//
+// *
+// GetBucketLifecycleConfiguration
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycleConfiguration.html)
+func (c *Client) DeleteBucketLifecycle(ctx context.Context, params *DeleteBucketLifecycleInput, optFns ...func(*Options)) (*DeleteBucketLifecycleOutput, error) {
+ if params == nil {
+ params = &DeleteBucketLifecycleInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "DeleteBucketLifecycle", params, optFns, addOperationDeleteBucketLifecycleMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*DeleteBucketLifecycleOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type DeleteBucketLifecycleInput struct {
+
+ // The bucket name of the lifecycle to delete.
+ //
+ // This member is required.
+ Bucket *string
+
+ // The account id of the expected bucket owner. If the bucket is owned by a
+ // different account, the request will fail with an HTTP 403 (Access Denied) error.
+ ExpectedBucketOwner *string
+}
+
+type DeleteBucketLifecycleOutput struct {
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+}
+
+func addOperationDeleteBucketLifecycleMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ err = stack.Serialize.Add(&awsRestxml_serializeOpDeleteBucketLifecycle{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsRestxml_deserializeOpDeleteBucketLifecycle{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
+ return err
+ }
+ if err = addRetryMiddlewares(stack, options); err != nil {
+ return err
+ }
+ if err = addHTTPSignerV4Middleware(stack, options); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addOpDeleteBucketLifecycleValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDeleteBucketLifecycle(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addMetadataRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addDeleteBucketLifecycleUpdateEndpoint(stack, options); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil {
+ return err
+ }
+ if err = disableAcceptEncodingGzip(stack); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ return nil
+}
+
+func newServiceMetadataMiddleware_opDeleteBucketLifecycle(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ SigningName: "s3",
+ OperationName: "DeleteBucketLifecycle",
+ }
+}
+
+// getDeleteBucketLifecycleBucketMember returns a pointer to string denoting a
+// provided bucket member valueand a boolean indicating if the input has a modeled
+// bucket name,
+func getDeleteBucketLifecycleBucketMember(input interface{}) (*string, bool) {
+ in := input.(*DeleteBucketLifecycleInput)
+ if in.Bucket == nil {
+ return nil, false
+ }
+ return in.Bucket, true
+}
+func addDeleteBucketLifecycleUpdateEndpoint(stack *middleware.Stack, options Options) error {
+ return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{
+ Accessor: s3cust.UpdateEndpointParameterAccessor{
+ GetBucketFromInput: getDeleteBucketLifecycleBucketMember,
+ },
+ UsePathStyle: options.UsePathStyle,
+ UseAccelerate: options.UseAccelerate,
+ SupportsAccelerate: true,
+ EndpointResolver: options.EndpointResolver,
+ EndpointResolverOptions: options.EndpointOptions,
+ UseDualstack: options.UseDualstack,
+ UseARNRegion: options.UseARNRegion,
+ })
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketMetricsConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketMetricsConfiguration.go
new file mode 100644
index 000000000..14e7bc4cc
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketMetricsConfiguration.go
@@ -0,0 +1,185 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package s3
+
+import (
+ "context"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
+ s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// Deletes a metrics configuration for the Amazon CloudWatch request metrics
+// (specified by the metrics configuration ID) from the bucket. Note that this
+// doesn't include the daily storage metrics. To use this operation, you must have
+// permissions to perform the s3:PutMetricsConfiguration action. The bucket owner
+// has this permission by default. The bucket owner can grant this permission to
+// others. For more information about permissions, see Permissions Related to
+// Bucket Subresource Operations
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources)
+// and Managing Access Permissions to Your Amazon S3 Resources
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). For
+// information about CloudWatch request metrics for Amazon S3, see Monitoring
+// Metrics with Amazon CloudWatch
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/cloudwatch-monitoring.html).
+// The following operations are related to DeleteBucketMetricsConfiguration:
+//
+// *
+// GetBucketMetricsConfiguration
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketMetricsConfiguration.html)
+//
+// *
+// PutBucketMetricsConfiguration
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketMetricsConfiguration.html)
+//
+// *
+// ListBucketMetricsConfigurations
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketMetricsConfigurations.html)
+//
+// *
+// Monitoring Metrics with Amazon CloudWatch
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/cloudwatch-monitoring.html)
+func (c *Client) DeleteBucketMetricsConfiguration(ctx context.Context, params *DeleteBucketMetricsConfigurationInput, optFns ...func(*Options)) (*DeleteBucketMetricsConfigurationOutput, error) {
+ if params == nil {
+ params = &DeleteBucketMetricsConfigurationInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "DeleteBucketMetricsConfiguration", params, optFns, addOperationDeleteBucketMetricsConfigurationMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*DeleteBucketMetricsConfigurationOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type DeleteBucketMetricsConfigurationInput struct {
+
+ // The name of the bucket containing the metrics configuration to delete.
+ //
+ // This member is required.
+ Bucket *string
+
+ // The ID used to identify the metrics configuration.
+ //
+ // This member is required.
+ Id *string
+
+ // The account id of the expected bucket owner. If the bucket is owned by a
+ // different account, the request will fail with an HTTP 403 (Access Denied) error.
+ ExpectedBucketOwner *string
+}
+
+type DeleteBucketMetricsConfigurationOutput struct {
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+}
+
+func addOperationDeleteBucketMetricsConfigurationMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ err = stack.Serialize.Add(&awsRestxml_serializeOpDeleteBucketMetricsConfiguration{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsRestxml_deserializeOpDeleteBucketMetricsConfiguration{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
+ return err
+ }
+ if err = addRetryMiddlewares(stack, options); err != nil {
+ return err
+ }
+ if err = addHTTPSignerV4Middleware(stack, options); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addOpDeleteBucketMetricsConfigurationValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDeleteBucketMetricsConfiguration(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addMetadataRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addDeleteBucketMetricsConfigurationUpdateEndpoint(stack, options); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil {
+ return err
+ }
+ if err = disableAcceptEncodingGzip(stack); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ return nil
+}
+
+func newServiceMetadataMiddleware_opDeleteBucketMetricsConfiguration(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ SigningName: "s3",
+ OperationName: "DeleteBucketMetricsConfiguration",
+ }
+}
+
+// getDeleteBucketMetricsConfigurationBucketMember returns a pointer to string
+// denoting a provided bucket member valueand a boolean indicating if the input has
+// a modeled bucket name,
+func getDeleteBucketMetricsConfigurationBucketMember(input interface{}) (*string, bool) {
+ in := input.(*DeleteBucketMetricsConfigurationInput)
+ if in.Bucket == nil {
+ return nil, false
+ }
+ return in.Bucket, true
+}
+func addDeleteBucketMetricsConfigurationUpdateEndpoint(stack *middleware.Stack, options Options) error {
+ return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{
+ Accessor: s3cust.UpdateEndpointParameterAccessor{
+ GetBucketFromInput: getDeleteBucketMetricsConfigurationBucketMember,
+ },
+ UsePathStyle: options.UsePathStyle,
+ UseAccelerate: options.UseAccelerate,
+ SupportsAccelerate: true,
+ EndpointResolver: options.EndpointResolver,
+ EndpointResolverOptions: options.EndpointOptions,
+ UseDualstack: options.UseDualstack,
+ UseARNRegion: options.UseARNRegion,
+ })
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketOwnershipControls.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketOwnershipControls.go
new file mode 100644
index 000000000..1522ab63c
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketOwnershipControls.go
@@ -0,0 +1,162 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package s3
+
+import (
+ "context"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
+ s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// Removes OwnershipControls for an Amazon S3 bucket. To use this operation, you
+// must have the s3:PutBucketOwnershipControls permission. For more information
+// about Amazon S3 permissions, see Specifying Permissions in a Policy
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html).
+// For information about Amazon S3 Object Ownership, see Using Object Ownership
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/about-object-ownership.html).
+// The following operations are related to DeleteBucketOwnershipControls:
+//
+// *
+// GetBucketOwnershipControls
+//
+// * PutBucketOwnershipControls
+func (c *Client) DeleteBucketOwnershipControls(ctx context.Context, params *DeleteBucketOwnershipControlsInput, optFns ...func(*Options)) (*DeleteBucketOwnershipControlsOutput, error) {
+ if params == nil {
+ params = &DeleteBucketOwnershipControlsInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "DeleteBucketOwnershipControls", params, optFns, addOperationDeleteBucketOwnershipControlsMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*DeleteBucketOwnershipControlsOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type DeleteBucketOwnershipControlsInput struct {
+
+ // The Amazon S3 bucket whose OwnershipControls you want to delete.
+ //
+ // This member is required.
+ Bucket *string
+
+ // The account id of the expected bucket owner. If the bucket is owned by a
+ // different account, the request will fail with an HTTP 403 (Access Denied) error.
+ ExpectedBucketOwner *string
+}
+
+type DeleteBucketOwnershipControlsOutput struct {
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+}
+
+func addOperationDeleteBucketOwnershipControlsMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ err = stack.Serialize.Add(&awsRestxml_serializeOpDeleteBucketOwnershipControls{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsRestxml_deserializeOpDeleteBucketOwnershipControls{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
+ return err
+ }
+ if err = addRetryMiddlewares(stack, options); err != nil {
+ return err
+ }
+ if err = addHTTPSignerV4Middleware(stack, options); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addOpDeleteBucketOwnershipControlsValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDeleteBucketOwnershipControls(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addMetadataRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addDeleteBucketOwnershipControlsUpdateEndpoint(stack, options); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil {
+ return err
+ }
+ if err = disableAcceptEncodingGzip(stack); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ return nil
+}
+
+func newServiceMetadataMiddleware_opDeleteBucketOwnershipControls(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ SigningName: "s3",
+ OperationName: "DeleteBucketOwnershipControls",
+ }
+}
+
+// getDeleteBucketOwnershipControlsBucketMember returns a pointer to string
+// denoting a provided bucket member valueand a boolean indicating if the input has
+// a modeled bucket name,
+func getDeleteBucketOwnershipControlsBucketMember(input interface{}) (*string, bool) {
+ in := input.(*DeleteBucketOwnershipControlsInput)
+ if in.Bucket == nil {
+ return nil, false
+ }
+ return in.Bucket, true
+}
+func addDeleteBucketOwnershipControlsUpdateEndpoint(stack *middleware.Stack, options Options) error {
+ return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{
+ Accessor: s3cust.UpdateEndpointParameterAccessor{
+ GetBucketFromInput: getDeleteBucketOwnershipControlsBucketMember,
+ },
+ UsePathStyle: options.UsePathStyle,
+ UseAccelerate: options.UseAccelerate,
+ SupportsAccelerate: true,
+ EndpointResolver: options.EndpointResolver,
+ EndpointResolverOptions: options.EndpointOptions,
+ UseDualstack: options.UseDualstack,
+ UseARNRegion: options.UseARNRegion,
+ })
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketPolicy.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketPolicy.go
new file mode 100644
index 000000000..40f58449f
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketPolicy.go
@@ -0,0 +1,171 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package s3
+
+import (
+ "context"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
+ s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// This implementation of the DELETE operation uses the policy subresource to
+// delete the policy of a specified bucket. If you are using an identity other than
+// the root user of the AWS account that owns the bucket, the calling identity must
+// have the DeleteBucketPolicy permissions on the specified bucket and belong to
+// the bucket owner's account to use this operation. If you don't have
+// DeleteBucketPolicy permissions, Amazon S3 returns a 403 Access Denied error. If
+// you have the correct permissions, but you're not using an identity that belongs
+// to the bucket owner's account, Amazon S3 returns a 405 Method Not Allowed error.
+// As a security precaution, the root user of the AWS account that owns a bucket
+// can always use this operation, even if the policy explicitly denies the root
+// user the ability to perform this action. For more information about bucket
+// policies, see Using Bucket Policies and UserPolicies
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html). The
+// following operations are related to DeleteBucketPolicy
+//
+// * CreateBucket
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html)
+//
+// *
+// DeleteObject
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html)
+func (c *Client) DeleteBucketPolicy(ctx context.Context, params *DeleteBucketPolicyInput, optFns ...func(*Options)) (*DeleteBucketPolicyOutput, error) {
+ if params == nil {
+ params = &DeleteBucketPolicyInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "DeleteBucketPolicy", params, optFns, addOperationDeleteBucketPolicyMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*DeleteBucketPolicyOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type DeleteBucketPolicyInput struct {
+
+ // The bucket name.
+ //
+ // This member is required.
+ Bucket *string
+
+ // The account id of the expected bucket owner. If the bucket is owned by a
+ // different account, the request will fail with an HTTP 403 (Access Denied) error.
+ ExpectedBucketOwner *string
+}
+
+type DeleteBucketPolicyOutput struct {
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+}
+
+func addOperationDeleteBucketPolicyMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ err = stack.Serialize.Add(&awsRestxml_serializeOpDeleteBucketPolicy{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsRestxml_deserializeOpDeleteBucketPolicy{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
+ return err
+ }
+ if err = addRetryMiddlewares(stack, options); err != nil {
+ return err
+ }
+ if err = addHTTPSignerV4Middleware(stack, options); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addOpDeleteBucketPolicyValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDeleteBucketPolicy(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addMetadataRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addDeleteBucketPolicyUpdateEndpoint(stack, options); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil {
+ return err
+ }
+ if err = disableAcceptEncodingGzip(stack); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ return nil
+}
+
+func newServiceMetadataMiddleware_opDeleteBucketPolicy(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ SigningName: "s3",
+ OperationName: "DeleteBucketPolicy",
+ }
+}
+
+// getDeleteBucketPolicyBucketMember returns a pointer to string denoting a
+// provided bucket member valueand a boolean indicating if the input has a modeled
+// bucket name,
+func getDeleteBucketPolicyBucketMember(input interface{}) (*string, bool) {
+ in := input.(*DeleteBucketPolicyInput)
+ if in.Bucket == nil {
+ return nil, false
+ }
+ return in.Bucket, true
+}
+func addDeleteBucketPolicyUpdateEndpoint(stack *middleware.Stack, options Options) error {
+ return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{
+ Accessor: s3cust.UpdateEndpointParameterAccessor{
+ GetBucketFromInput: getDeleteBucketPolicyBucketMember,
+ },
+ UsePathStyle: options.UsePathStyle,
+ UseAccelerate: options.UseAccelerate,
+ SupportsAccelerate: true,
+ EndpointResolver: options.EndpointResolver,
+ EndpointResolverOptions: options.EndpointOptions,
+ UseDualstack: options.UseDualstack,
+ UseARNRegion: options.UseARNRegion,
+ })
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketReplication.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketReplication.go
new file mode 100644
index 000000000..805db12a3
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketReplication.go
@@ -0,0 +1,170 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package s3
+
+import (
+ "context"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
+ s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// Deletes the replication configuration from the bucket. To use this operation,
+// you must have permissions to perform the s3:PutReplicationConfiguration action.
+// The bucket owner has these permissions by default and can grant it to others.
+// For more information about permissions, see Permissions Related to Bucket
+// Subresource Operations
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources)
+// and Managing Access Permissions to Your Amazon S3 Resources
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). It can
+// take a while for the deletion of a replication configuration to fully propagate.
+// For information about replication configuration, see Replication
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication.html) in the Amazon
+// S3 Developer Guide. The following operations are related to
+// DeleteBucketReplication:
+//
+// * PutBucketReplication
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketReplication.html)
+//
+// *
+// GetBucketReplication
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketReplication.html)
+func (c *Client) DeleteBucketReplication(ctx context.Context, params *DeleteBucketReplicationInput, optFns ...func(*Options)) (*DeleteBucketReplicationOutput, error) {
+ if params == nil {
+ params = &DeleteBucketReplicationInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "DeleteBucketReplication", params, optFns, addOperationDeleteBucketReplicationMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*DeleteBucketReplicationOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type DeleteBucketReplicationInput struct {
+
+ // The bucket name.
+ //
+ // This member is required.
+ Bucket *string
+
+ // The account id of the expected bucket owner. If the bucket is owned by a
+ // different account, the request will fail with an HTTP 403 (Access Denied) error.
+ ExpectedBucketOwner *string
+}
+
+type DeleteBucketReplicationOutput struct {
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+}
+
+func addOperationDeleteBucketReplicationMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ err = stack.Serialize.Add(&awsRestxml_serializeOpDeleteBucketReplication{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsRestxml_deserializeOpDeleteBucketReplication{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
+ return err
+ }
+ if err = addRetryMiddlewares(stack, options); err != nil {
+ return err
+ }
+ if err = addHTTPSignerV4Middleware(stack, options); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addOpDeleteBucketReplicationValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDeleteBucketReplication(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addMetadataRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addDeleteBucketReplicationUpdateEndpoint(stack, options); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil {
+ return err
+ }
+ if err = disableAcceptEncodingGzip(stack); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ return nil
+}
+
+func newServiceMetadataMiddleware_opDeleteBucketReplication(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ SigningName: "s3",
+ OperationName: "DeleteBucketReplication",
+ }
+}
+
+// getDeleteBucketReplicationBucketMember returns a pointer to string denoting a
+// provided bucket member valueand a boolean indicating if the input has a modeled
+// bucket name,
+func getDeleteBucketReplicationBucketMember(input interface{}) (*string, bool) {
+ in := input.(*DeleteBucketReplicationInput)
+ if in.Bucket == nil {
+ return nil, false
+ }
+ return in.Bucket, true
+}
+func addDeleteBucketReplicationUpdateEndpoint(stack *middleware.Stack, options Options) error {
+ return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{
+ Accessor: s3cust.UpdateEndpointParameterAccessor{
+ GetBucketFromInput: getDeleteBucketReplicationBucketMember,
+ },
+ UsePathStyle: options.UsePathStyle,
+ UseAccelerate: options.UseAccelerate,
+ SupportsAccelerate: true,
+ EndpointResolver: options.EndpointResolver,
+ EndpointResolverOptions: options.EndpointOptions,
+ UseDualstack: options.UseDualstack,
+ UseARNRegion: options.UseARNRegion,
+ })
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketTagging.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketTagging.go
new file mode 100644
index 000000000..fbb8ece17
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketTagging.go
@@ -0,0 +1,161 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package s3
+
+import (
+ "context"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
+ s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// Deletes the tags from the bucket. To use this operation, you must have
+// permission to perform the s3:PutBucketTagging action. By default, the bucket
+// owner has this permission and can grant this permission to others. The following
+// operations are related to DeleteBucketTagging:
+//
+// * GetBucketTagging
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketTagging.html)
+//
+// *
+// PutBucketTagging
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketTagging.html)
+func (c *Client) DeleteBucketTagging(ctx context.Context, params *DeleteBucketTaggingInput, optFns ...func(*Options)) (*DeleteBucketTaggingOutput, error) {
+ if params == nil {
+ params = &DeleteBucketTaggingInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "DeleteBucketTagging", params, optFns, addOperationDeleteBucketTaggingMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*DeleteBucketTaggingOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type DeleteBucketTaggingInput struct {
+
+ // The bucket that has the tag set to be removed.
+ //
+ // This member is required.
+ Bucket *string
+
+ // The account id of the expected bucket owner. If the bucket is owned by a
+ // different account, the request will fail with an HTTP 403 (Access Denied) error.
+ ExpectedBucketOwner *string
+}
+
+type DeleteBucketTaggingOutput struct {
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+}
+
+func addOperationDeleteBucketTaggingMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ err = stack.Serialize.Add(&awsRestxml_serializeOpDeleteBucketTagging{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsRestxml_deserializeOpDeleteBucketTagging{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
+ return err
+ }
+ if err = addRetryMiddlewares(stack, options); err != nil {
+ return err
+ }
+ if err = addHTTPSignerV4Middleware(stack, options); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addOpDeleteBucketTaggingValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDeleteBucketTagging(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addMetadataRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addDeleteBucketTaggingUpdateEndpoint(stack, options); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil {
+ return err
+ }
+ if err = disableAcceptEncodingGzip(stack); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ return nil
+}
+
+func newServiceMetadataMiddleware_opDeleteBucketTagging(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ SigningName: "s3",
+ OperationName: "DeleteBucketTagging",
+ }
+}
+
+// getDeleteBucketTaggingBucketMember returns a pointer to string denoting a
+// provided bucket member valueand a boolean indicating if the input has a modeled
+// bucket name,
+func getDeleteBucketTaggingBucketMember(input interface{}) (*string, bool) {
+ in := input.(*DeleteBucketTaggingInput)
+ if in.Bucket == nil {
+ return nil, false
+ }
+ return in.Bucket, true
+}
+func addDeleteBucketTaggingUpdateEndpoint(stack *middleware.Stack, options Options) error {
+ return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{
+ Accessor: s3cust.UpdateEndpointParameterAccessor{
+ GetBucketFromInput: getDeleteBucketTaggingBucketMember,
+ },
+ UsePathStyle: options.UsePathStyle,
+ UseAccelerate: options.UseAccelerate,
+ SupportsAccelerate: true,
+ EndpointResolver: options.EndpointResolver,
+ EndpointResolverOptions: options.EndpointOptions,
+ UseDualstack: options.UseDualstack,
+ UseARNRegion: options.UseARNRegion,
+ })
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketWebsite.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketWebsite.go
new file mode 100644
index 000000000..eafabc3b0
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketWebsite.go
@@ -0,0 +1,170 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package s3
+
+import (
+ "context"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
+ s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// This operation removes the website configuration for a bucket. Amazon S3 returns
+// a 200 OK response upon successfully deleting a website configuration on the
+// specified bucket. You will get a 200 OK response if the website configuration
+// you are trying to delete does not exist on the bucket. Amazon S3 returns a 404
+// response if the bucket specified in the request does not exist. This DELETE
+// operation requires the S3:DeleteBucketWebsite permission. By default, only the
+// bucket owner can delete the website configuration attached to a bucket. However,
+// bucket owners can grant other users permission to delete the website
+// configuration by writing a bucket policy granting them the
+// S3:DeleteBucketWebsite permission. For more information about hosting websites,
+// see Hosting Websites on Amazon S3
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteHosting.html). The
+// following operations are related to DeleteBucketWebsite:
+//
+// * GetBucketWebsite
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketWebsite.html)
+//
+// *
+// PutBucketWebsite
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketWebsite.html)
+func (c *Client) DeleteBucketWebsite(ctx context.Context, params *DeleteBucketWebsiteInput, optFns ...func(*Options)) (*DeleteBucketWebsiteOutput, error) {
+ if params == nil {
+ params = &DeleteBucketWebsiteInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "DeleteBucketWebsite", params, optFns, addOperationDeleteBucketWebsiteMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*DeleteBucketWebsiteOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type DeleteBucketWebsiteInput struct {
+
+ // The bucket name for which you want to remove the website configuration.
+ //
+ // This member is required.
+ Bucket *string
+
+ // The account id of the expected bucket owner. If the bucket is owned by a
+ // different account, the request will fail with an HTTP 403 (Access Denied) error.
+ ExpectedBucketOwner *string
+}
+
+type DeleteBucketWebsiteOutput struct {
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+}
+
+func addOperationDeleteBucketWebsiteMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ err = stack.Serialize.Add(&awsRestxml_serializeOpDeleteBucketWebsite{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsRestxml_deserializeOpDeleteBucketWebsite{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
+ return err
+ }
+ if err = addRetryMiddlewares(stack, options); err != nil {
+ return err
+ }
+ if err = addHTTPSignerV4Middleware(stack, options); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addOpDeleteBucketWebsiteValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDeleteBucketWebsite(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addMetadataRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addDeleteBucketWebsiteUpdateEndpoint(stack, options); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil {
+ return err
+ }
+ if err = disableAcceptEncodingGzip(stack); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ return nil
+}
+
+func newServiceMetadataMiddleware_opDeleteBucketWebsite(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ SigningName: "s3",
+ OperationName: "DeleteBucketWebsite",
+ }
+}
+
+// getDeleteBucketWebsiteBucketMember returns a pointer to string denoting a
+// provided bucket member valueand a boolean indicating if the input has a modeled
+// bucket name,
+func getDeleteBucketWebsiteBucketMember(input interface{}) (*string, bool) {
+ in := input.(*DeleteBucketWebsiteInput)
+ if in.Bucket == nil {
+ return nil, false
+ }
+ return in.Bucket, true
+}
+func addDeleteBucketWebsiteUpdateEndpoint(stack *middleware.Stack, options Options) error {
+ return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{
+ Accessor: s3cust.UpdateEndpointParameterAccessor{
+ GetBucketFromInput: getDeleteBucketWebsiteBucketMember,
+ },
+ UsePathStyle: options.UsePathStyle,
+ UseAccelerate: options.UseAccelerate,
+ SupportsAccelerate: true,
+ EndpointResolver: options.EndpointResolver,
+ EndpointResolverOptions: options.EndpointOptions,
+ UseDualstack: options.UseDualstack,
+ UseARNRegion: options.UseARNRegion,
+ })
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteObject.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteObject.go
new file mode 100644
index 000000000..aae9e85f9
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteObject.go
@@ -0,0 +1,228 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package s3
+
+import (
+ "context"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
+ s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations"
+ "github.com/aws/aws-sdk-go-v2/service/s3/types"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// Removes the null version (if there is one) of an object and inserts a delete
+// marker, which becomes the latest version of the object. If there isn't a null
+// version, Amazon S3 does not remove any objects. To remove a specific version,
+// you must be the bucket owner and you must use the version Id subresource. Using
+// this subresource permanently deletes the version. If the object deleted is a
+// delete marker, Amazon S3 sets the response header, x-amz-delete-marker, to true.
+// If the object you want to delete is in a bucket where the bucket versioning
+// configuration is MFA Delete enabled, you must include the x-amz-mfa request
+// header in the DELETE versionId request. Requests that include x-amz-mfa must use
+// HTTPS. For more information about MFA Delete, see Using MFA Delete
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMFADelete.html). To see
+// sample requests that use versioning, see Sample Request
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectDELETE.html#ExampleVersionObjectDelete).
+// You can delete objects by explicitly calling the DELETE Object API or configure
+// its lifecycle (PutBucketLifecycle
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycle.html))
+// to enable Amazon S3 to remove them for you. If you want to block users or
+// accounts from removing or deleting objects from your bucket, you must deny them
+// the s3:DeleteObject, s3:DeleteObjectVersion, and s3:PutLifeCycleConfiguration
+// actions. The following operation is related to DeleteObject:
+//
+// * PutObject
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html)
+func (c *Client) DeleteObject(ctx context.Context, params *DeleteObjectInput, optFns ...func(*Options)) (*DeleteObjectOutput, error) {
+ if params == nil {
+ params = &DeleteObjectInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "DeleteObject", params, optFns, addOperationDeleteObjectMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*DeleteObjectOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type DeleteObjectInput struct {
+
+ // The bucket name of the bucket containing the object. When using this API with an
+ // access point, you must direct requests to the access point hostname. The access
+ // point hostname takes the form
+ // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this
+ // operation with an access point through the AWS SDKs, you provide the access
+ // point ARN in place of the bucket name. For more information about access point
+ // ARNs, see Using Access Points
+ // (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) in
+ // the Amazon Simple Storage Service Developer Guide. When using this API with
+ // Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname.
+ // The S3 on Outposts hostname takes the form
+ // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using
+ // this operation using S3 on Outposts through the AWS SDKs, you provide the
+ // Outposts bucket ARN in place of the bucket name. For more information about S3
+ // on Outposts ARNs, see Using S3 on Outposts
+ // (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html) in the
+ // Amazon Simple Storage Service Developer Guide.
+ //
+ // This member is required.
+ Bucket *string
+
+ // Key name of the object to delete.
+ //
+ // This member is required.
+ Key *string
+
+ // Indicates whether S3 Object Lock should bypass Governance-mode restrictions to
+ // process this operation.
+ BypassGovernanceRetention bool
+
+ // The account id of the expected bucket owner. If the bucket is owned by a
+ // different account, the request will fail with an HTTP 403 (Access Denied) error.
+ ExpectedBucketOwner *string
+
+ // The concatenation of the authentication device's serial number, a space, and the
+ // value that is displayed on your authentication device. Required to permanently
+ // delete a versioned object if versioning is configured with MFA delete enabled.
+ MFA *string
+
+ // Confirms that the requester knows that they will be charged for the request.
+ // Bucket owners need not specify this parameter in their requests. For information
+ // about downloading objects from requester pays buckets, see Downloading Objects
+ // in Requestor Pays Buckets
+ // (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html)
+ // in the Amazon S3 Developer Guide.
+ RequestPayer types.RequestPayer
+
+ // VersionId used to reference a specific version of the object.
+ VersionId *string
+}
+
+type DeleteObjectOutput struct {
+
+ // Specifies whether the versioned object that was permanently deleted was (true)
+ // or was not (false) a delete marker.
+ DeleteMarker bool
+
+ // If present, indicates that the requester was successfully charged for the
+ // request.
+ RequestCharged types.RequestCharged
+
+ // Returns the version ID of the delete marker created as a result of the DELETE
+ // operation.
+ VersionId *string
+
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+}
+
+func addOperationDeleteObjectMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ err = stack.Serialize.Add(&awsRestxml_serializeOpDeleteObject{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsRestxml_deserializeOpDeleteObject{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
+ return err
+ }
+ if err = addRetryMiddlewares(stack, options); err != nil {
+ return err
+ }
+ if err = addHTTPSignerV4Middleware(stack, options); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addOpDeleteObjectValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDeleteObject(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addMetadataRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addDeleteObjectUpdateEndpoint(stack, options); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil {
+ return err
+ }
+ if err = disableAcceptEncodingGzip(stack); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ return nil
+}
+
+func newServiceMetadataMiddleware_opDeleteObject(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ SigningName: "s3",
+ OperationName: "DeleteObject",
+ }
+}
+
+// getDeleteObjectBucketMember returns a pointer to string denoting a provided
+// bucket member valueand a boolean indicating if the input has a modeled bucket
+// name,
+func getDeleteObjectBucketMember(input interface{}) (*string, bool) {
+ in := input.(*DeleteObjectInput)
+ if in.Bucket == nil {
+ return nil, false
+ }
+ return in.Bucket, true
+}
+func addDeleteObjectUpdateEndpoint(stack *middleware.Stack, options Options) error {
+ return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{
+ Accessor: s3cust.UpdateEndpointParameterAccessor{
+ GetBucketFromInput: getDeleteObjectBucketMember,
+ },
+ UsePathStyle: options.UsePathStyle,
+ UseAccelerate: options.UseAccelerate,
+ SupportsAccelerate: true,
+ EndpointResolver: options.EndpointResolver,
+ EndpointResolverOptions: options.EndpointOptions,
+ UseDualstack: options.UseDualstack,
+ UseARNRegion: options.UseARNRegion,
+ })
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteObjectTagging.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteObjectTagging.go
new file mode 100644
index 000000000..4d86e9550
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteObjectTagging.go
@@ -0,0 +1,193 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package s3
+
+import (
+ "context"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
+ s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// Removes the entire tag set from the specified object. For more information about
+// managing object tags, see Object Tagging
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-tagging.html). To use
+// this operation, you must have permission to perform the s3:DeleteObjectTagging
+// action. To delete tags of a specific object version, add the versionId query
+// parameter in the request. You will need permission for the
+// s3:DeleteObjectVersionTagging action. The following operations are related to
+// DeleteBucketMetricsConfiguration:
+//
+// * PutObjectTagging
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObjectTagging.html)
+//
+// *
+// GetObjectTagging
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectTagging.html)
+func (c *Client) DeleteObjectTagging(ctx context.Context, params *DeleteObjectTaggingInput, optFns ...func(*Options)) (*DeleteObjectTaggingOutput, error) {
+ if params == nil {
+ params = &DeleteObjectTaggingInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "DeleteObjectTagging", params, optFns, addOperationDeleteObjectTaggingMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*DeleteObjectTaggingOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type DeleteObjectTaggingInput struct {
+
+ // The bucket name containing the objects from which to remove the tags. When using
+ // this API with an access point, you must direct requests to the access point
+ // hostname. The access point hostname takes the form
+ // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this
+ // operation with an access point through the AWS SDKs, you provide the access
+ // point ARN in place of the bucket name. For more information about access point
+ // ARNs, see Using Access Points
+ // (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) in
+ // the Amazon Simple Storage Service Developer Guide. When using this API with
+ // Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname.
+ // The S3 on Outposts hostname takes the form
+ // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using
+ // this operation using S3 on Outposts through the AWS SDKs, you provide the
+ // Outposts bucket ARN in place of the bucket name. For more information about S3
+ // on Outposts ARNs, see Using S3 on Outposts
+ // (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html) in the
+ // Amazon Simple Storage Service Developer Guide.
+ //
+ // This member is required.
+ Bucket *string
+
+ // Name of the object key.
+ //
+ // This member is required.
+ Key *string
+
+ // The account id of the expected bucket owner. If the bucket is owned by a
+ // different account, the request will fail with an HTTP 403 (Access Denied) error.
+ ExpectedBucketOwner *string
+
+ // The versionId of the object that the tag-set will be removed from.
+ VersionId *string
+}
+
+type DeleteObjectTaggingOutput struct {
+
+ // The versionId of the object the tag-set was removed from.
+ VersionId *string
+
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+}
+
+func addOperationDeleteObjectTaggingMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ err = stack.Serialize.Add(&awsRestxml_serializeOpDeleteObjectTagging{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsRestxml_deserializeOpDeleteObjectTagging{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
+ return err
+ }
+ if err = addRetryMiddlewares(stack, options); err != nil {
+ return err
+ }
+ if err = addHTTPSignerV4Middleware(stack, options); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addOpDeleteObjectTaggingValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDeleteObjectTagging(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addMetadataRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addDeleteObjectTaggingUpdateEndpoint(stack, options); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil {
+ return err
+ }
+ if err = disableAcceptEncodingGzip(stack); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ return nil
+}
+
+func newServiceMetadataMiddleware_opDeleteObjectTagging(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ SigningName: "s3",
+ OperationName: "DeleteObjectTagging",
+ }
+}
+
+// getDeleteObjectTaggingBucketMember returns a pointer to string denoting a
+// provided bucket member valueand a boolean indicating if the input has a modeled
+// bucket name,
+func getDeleteObjectTaggingBucketMember(input interface{}) (*string, bool) {
+ in := input.(*DeleteObjectTaggingInput)
+ if in.Bucket == nil {
+ return nil, false
+ }
+ return in.Bucket, true
+}
+func addDeleteObjectTaggingUpdateEndpoint(stack *middleware.Stack, options Options) error {
+ return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{
+ Accessor: s3cust.UpdateEndpointParameterAccessor{
+ GetBucketFromInput: getDeleteObjectTaggingBucketMember,
+ },
+ UsePathStyle: options.UsePathStyle,
+ UseAccelerate: options.UseAccelerate,
+ SupportsAccelerate: true,
+ EndpointResolver: options.EndpointResolver,
+ EndpointResolverOptions: options.EndpointOptions,
+ UseDualstack: options.UseDualstack,
+ UseARNRegion: options.UseARNRegion,
+ })
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteObjects.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteObjects.go
new file mode 100644
index 000000000..20f85c0c7
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteObjects.go
@@ -0,0 +1,251 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package s3
+
+import (
+ "context"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
+ s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations"
+ "github.com/aws/aws-sdk-go-v2/service/s3/types"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// This operation enables you to delete multiple objects from a bucket using a
+// single HTTP request. If you know the object keys that you want to delete, then
+// this operation provides a suitable alternative to sending individual delete
+// requests, reducing per-request overhead. The request contains a list of up to
+// 1000 keys that you want to delete. In the XML, you provide the object key names,
+// and optionally, version IDs if you want to delete a specific version of the
+// object from a versioning-enabled bucket. For each key, Amazon S3 performs a
+// delete operation and returns the result of that delete, success, or failure, in
+// the response. Note that if the object specified in the request is not found,
+// Amazon S3 returns the result as deleted. The operation supports two modes for
+// the response: verbose and quiet. By default, the operation uses verbose mode in
+// which the response includes the result of deletion of each key in your request.
+// In quiet mode the response includes only keys where the delete operation
+// encountered an error. For a successful deletion, the operation does not return
+// any information about the delete in the response body. When performing this
+// operation on an MFA Delete enabled bucket, that attempts to delete any versioned
+// objects, you must include an MFA token. If you do not provide one, the entire
+// request will fail, even if there are non-versioned objects you are trying to
+// delete. If you provide an invalid token, whether there are versioned keys in the
+// request or not, the entire Multi-Object Delete request will fail. For
+// information about MFA Delete, see MFA Delete
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/Versioning.html#MultiFactorAuthenticationDelete).
+// Finally, the Content-MD5 header is required for all Multi-Object Delete
+// requests. Amazon S3 uses the header value to ensure that your request body has
+// not been altered in transit. The following operations are related to
+// DeleteObjects:
+//
+// * CreateMultipartUpload
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html)
+//
+// *
+// UploadPart
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html)
+//
+// *
+// CompleteMultipartUpload
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html)
+//
+// *
+// ListParts
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html)
+//
+// *
+// AbortMultipartUpload
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html)
+func (c *Client) DeleteObjects(ctx context.Context, params *DeleteObjectsInput, optFns ...func(*Options)) (*DeleteObjectsOutput, error) {
+ if params == nil {
+ params = &DeleteObjectsInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "DeleteObjects", params, optFns, addOperationDeleteObjectsMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*DeleteObjectsOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type DeleteObjectsInput struct {
+
+ // The bucket name containing the objects to delete. When using this API with an
+ // access point, you must direct requests to the access point hostname. The access
+ // point hostname takes the form
+ // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this
+ // operation with an access point through the AWS SDKs, you provide the access
+ // point ARN in place of the bucket name. For more information about access point
+ // ARNs, see Using Access Points
+ // (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) in
+ // the Amazon Simple Storage Service Developer Guide. When using this API with
+ // Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname.
+ // The S3 on Outposts hostname takes the form
+ // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using
+ // this operation using S3 on Outposts through the AWS SDKs, you provide the
+ // Outposts bucket ARN in place of the bucket name. For more information about S3
+ // on Outposts ARNs, see Using S3 on Outposts
+ // (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html) in the
+ // Amazon Simple Storage Service Developer Guide.
+ //
+ // This member is required.
+ Bucket *string
+
+ // Container for the request.
+ //
+ // This member is required.
+ Delete *types.Delete
+
+ // Specifies whether you want to delete this object even if it has a
+ // Governance-type Object Lock in place. You must have sufficient permissions to
+ // perform this operation.
+ BypassGovernanceRetention bool
+
+ // The account id of the expected bucket owner. If the bucket is owned by a
+ // different account, the request will fail with an HTTP 403 (Access Denied) error.
+ ExpectedBucketOwner *string
+
+ // The concatenation of the authentication device's serial number, a space, and the
+ // value that is displayed on your authentication device. Required to permanently
+ // delete a versioned object if versioning is configured with MFA delete enabled.
+ MFA *string
+
+ // Confirms that the requester knows that they will be charged for the request.
+ // Bucket owners need not specify this parameter in their requests. For information
+ // about downloading objects from requester pays buckets, see Downloading Objects
+ // in Requestor Pays Buckets
+ // (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html)
+ // in the Amazon S3 Developer Guide.
+ RequestPayer types.RequestPayer
+}
+
+type DeleteObjectsOutput struct {
+
+ // Container element for a successful delete. It identifies the object that was
+ // successfully deleted.
+ Deleted []types.DeletedObject
+
+ // Container for a failed delete operation that describes the object that Amazon S3
+ // attempted to delete and the error it encountered.
+ Errors []types.Error
+
+ // If present, indicates that the requester was successfully charged for the
+ // request.
+ RequestCharged types.RequestCharged
+
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+}
+
+func addOperationDeleteObjectsMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ err = stack.Serialize.Add(&awsRestxml_serializeOpDeleteObjects{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsRestxml_deserializeOpDeleteObjects{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
+ return err
+ }
+ if err = addRetryMiddlewares(stack, options); err != nil {
+ return err
+ }
+ if err = addHTTPSignerV4Middleware(stack, options); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addOpDeleteObjectsValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDeleteObjects(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addMetadataRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addDeleteObjectsUpdateEndpoint(stack, options); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil {
+ return err
+ }
+ if err = disableAcceptEncodingGzip(stack); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddContentChecksumMiddleware(stack); err != nil {
+ return err
+ }
+ return nil
+}
+
+func newServiceMetadataMiddleware_opDeleteObjects(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ SigningName: "s3",
+ OperationName: "DeleteObjects",
+ }
+}
+
+// getDeleteObjectsBucketMember returns a pointer to string denoting a provided
+// bucket member valueand a boolean indicating if the input has a modeled bucket
+// name,
+func getDeleteObjectsBucketMember(input interface{}) (*string, bool) {
+ in := input.(*DeleteObjectsInput)
+ if in.Bucket == nil {
+ return nil, false
+ }
+ return in.Bucket, true
+}
+func addDeleteObjectsUpdateEndpoint(stack *middleware.Stack, options Options) error {
+ return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{
+ Accessor: s3cust.UpdateEndpointParameterAccessor{
+ GetBucketFromInput: getDeleteObjectsBucketMember,
+ },
+ UsePathStyle: options.UsePathStyle,
+ UseAccelerate: options.UseAccelerate,
+ SupportsAccelerate: true,
+ EndpointResolver: options.EndpointResolver,
+ EndpointResolverOptions: options.EndpointOptions,
+ UseDualstack: options.UseDualstack,
+ UseARNRegion: options.UseARNRegion,
+ })
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeletePublicAccessBlock.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeletePublicAccessBlock.go
new file mode 100644
index 000000000..c882c384a
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeletePublicAccessBlock.go
@@ -0,0 +1,174 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package s3
+
+import (
+ "context"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
+ s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// Removes the PublicAccessBlock configuration for an Amazon S3 bucket. To use this
+// operation, you must have the s3:PutBucketPublicAccessBlock permission. For more
+// information about permissions, see Permissions Related to Bucket Subresource
+// Operations
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources)
+// and Managing Access Permissions to Your Amazon S3 Resources
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). The
+// following operations are related to DeletePublicAccessBlock:
+//
+// * Using Amazon S3
+// Block Public Access
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html)
+//
+// *
+// GetPublicAccessBlock
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetPublicAccessBlock.html)
+//
+// *
+// PutPublicAccessBlock
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutPublicAccessBlock.html)
+//
+// *
+// GetBucketPolicyStatus
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketPolicyStatus.html)
+func (c *Client) DeletePublicAccessBlock(ctx context.Context, params *DeletePublicAccessBlockInput, optFns ...func(*Options)) (*DeletePublicAccessBlockOutput, error) {
+ if params == nil {
+ params = &DeletePublicAccessBlockInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "DeletePublicAccessBlock", params, optFns, addOperationDeletePublicAccessBlockMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*DeletePublicAccessBlockOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type DeletePublicAccessBlockInput struct {
+
+ // The Amazon S3 bucket whose PublicAccessBlock configuration you want to delete.
+ //
+ // This member is required.
+ Bucket *string
+
+ // The account id of the expected bucket owner. If the bucket is owned by a
+ // different account, the request will fail with an HTTP 403 (Access Denied) error.
+ ExpectedBucketOwner *string
+}
+
+type DeletePublicAccessBlockOutput struct {
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+}
+
+func addOperationDeletePublicAccessBlockMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ err = stack.Serialize.Add(&awsRestxml_serializeOpDeletePublicAccessBlock{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsRestxml_deserializeOpDeletePublicAccessBlock{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
+ return err
+ }
+ if err = addRetryMiddlewares(stack, options); err != nil {
+ return err
+ }
+ if err = addHTTPSignerV4Middleware(stack, options); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addOpDeletePublicAccessBlockValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDeletePublicAccessBlock(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addMetadataRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addDeletePublicAccessBlockUpdateEndpoint(stack, options); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil {
+ return err
+ }
+ if err = disableAcceptEncodingGzip(stack); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ return nil
+}
+
+func newServiceMetadataMiddleware_opDeletePublicAccessBlock(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ SigningName: "s3",
+ OperationName: "DeletePublicAccessBlock",
+ }
+}
+
+// getDeletePublicAccessBlockBucketMember returns a pointer to string denoting a
+// provided bucket member valueand a boolean indicating if the input has a modeled
+// bucket name,
+func getDeletePublicAccessBlockBucketMember(input interface{}) (*string, bool) {
+ in := input.(*DeletePublicAccessBlockInput)
+ if in.Bucket == nil {
+ return nil, false
+ }
+ return in.Bucket, true
+}
+func addDeletePublicAccessBlockUpdateEndpoint(stack *middleware.Stack, options Options) error {
+ return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{
+ Accessor: s3cust.UpdateEndpointParameterAccessor{
+ GetBucketFromInput: getDeletePublicAccessBlockBucketMember,
+ },
+ UsePathStyle: options.UsePathStyle,
+ UseAccelerate: options.UseAccelerate,
+ SupportsAccelerate: true,
+ EndpointResolver: options.EndpointResolver,
+ EndpointResolverOptions: options.EndpointOptions,
+ UseDualstack: options.UseDualstack,
+ UseARNRegion: options.UseARNRegion,
+ })
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketAccelerateConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketAccelerateConfiguration.go
new file mode 100644
index 000000000..bc6fc6a38
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketAccelerateConfiguration.go
@@ -0,0 +1,180 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package s3
+
+import (
+ "context"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
+ s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations"
+ "github.com/aws/aws-sdk-go-v2/service/s3/types"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// This implementation of the GET operation uses the accelerate subresource to
+// return the Transfer Acceleration state of a bucket, which is either Enabled or
+// Suspended. Amazon S3 Transfer Acceleration is a bucket-level feature that
+// enables you to perform faster data transfers to and from Amazon S3. To use this
+// operation, you must have permission to perform the s3:GetAccelerateConfiguration
+// action. The bucket owner has this permission by default. The bucket owner can
+// grant this permission to others. For more information about permissions, see
+// Permissions Related to Bucket Subresource Operations
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources)
+// and Managing Access Permissions to your Amazon S3 Resources
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html) in the
+// Amazon Simple Storage Service Developer Guide. You set the Transfer Acceleration
+// state of an existing bucket to Enabled or Suspended by using the
+// PutBucketAccelerateConfiguration
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketAccelerateConfiguration.html)
+// operation. A GET accelerate request does not return a state value for a bucket
+// that has no transfer acceleration state. A bucket has no Transfer Acceleration
+// state if a state has never been set on the bucket. For more information about
+// transfer acceleration, see Transfer Acceleration
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html) in
+// the Amazon Simple Storage Service Developer Guide. Related Resources
+//
+// *
+// PutBucketAccelerateConfiguration
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketAccelerateConfiguration.html)
+func (c *Client) GetBucketAccelerateConfiguration(ctx context.Context, params *GetBucketAccelerateConfigurationInput, optFns ...func(*Options)) (*GetBucketAccelerateConfigurationOutput, error) {
+ if params == nil {
+ params = &GetBucketAccelerateConfigurationInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "GetBucketAccelerateConfiguration", params, optFns, addOperationGetBucketAccelerateConfigurationMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*GetBucketAccelerateConfigurationOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type GetBucketAccelerateConfigurationInput struct {
+
+ // The name of the bucket for which the accelerate configuration is retrieved.
+ //
+ // This member is required.
+ Bucket *string
+
+ // The account id of the expected bucket owner. If the bucket is owned by a
+ // different account, the request will fail with an HTTP 403 (Access Denied) error.
+ ExpectedBucketOwner *string
+}
+
+type GetBucketAccelerateConfigurationOutput struct {
+
+ // The accelerate configuration of the bucket.
+ Status types.BucketAccelerateStatus
+
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+}
+
+func addOperationGetBucketAccelerateConfigurationMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ err = stack.Serialize.Add(&awsRestxml_serializeOpGetBucketAccelerateConfiguration{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsRestxml_deserializeOpGetBucketAccelerateConfiguration{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
+ return err
+ }
+ if err = addRetryMiddlewares(stack, options); err != nil {
+ return err
+ }
+ if err = addHTTPSignerV4Middleware(stack, options); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addOpGetBucketAccelerateConfigurationValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetBucketAccelerateConfiguration(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addMetadataRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addGetBucketAccelerateConfigurationUpdateEndpoint(stack, options); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil {
+ return err
+ }
+ if err = disableAcceptEncodingGzip(stack); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ return nil
+}
+
+func newServiceMetadataMiddleware_opGetBucketAccelerateConfiguration(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ SigningName: "s3",
+ OperationName: "GetBucketAccelerateConfiguration",
+ }
+}
+
+// getGetBucketAccelerateConfigurationBucketMember returns a pointer to string
+// denoting a provided bucket member valueand a boolean indicating if the input has
+// a modeled bucket name,
+func getGetBucketAccelerateConfigurationBucketMember(input interface{}) (*string, bool) {
+ in := input.(*GetBucketAccelerateConfigurationInput)
+ if in.Bucket == nil {
+ return nil, false
+ }
+ return in.Bucket, true
+}
+func addGetBucketAccelerateConfigurationUpdateEndpoint(stack *middleware.Stack, options Options) error {
+ return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{
+ Accessor: s3cust.UpdateEndpointParameterAccessor{
+ GetBucketFromInput: getGetBucketAccelerateConfigurationBucketMember,
+ },
+ UsePathStyle: options.UsePathStyle,
+ UseAccelerate: options.UseAccelerate,
+ SupportsAccelerate: true,
+ EndpointResolver: options.EndpointResolver,
+ EndpointResolverOptions: options.EndpointOptions,
+ UseDualstack: options.UseDualstack,
+ UseARNRegion: options.UseARNRegion,
+ })
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketAcl.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketAcl.go
new file mode 100644
index 000000000..034cdd279
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketAcl.go
@@ -0,0 +1,166 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package s3
+
+import (
+ "context"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
+ s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations"
+ "github.com/aws/aws-sdk-go-v2/service/s3/types"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// This implementation of the GET operation uses the acl subresource to return the
+// access control list (ACL) of a bucket. To use GET to return the ACL of the
+// bucket, you must have READ_ACP access to the bucket. If READ_ACP permission is
+// granted to the anonymous user, you can return the ACL of the bucket without
+// using an authorization header. Related Resources
+//
+// * ListObjects
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjects.html)
+func (c *Client) GetBucketAcl(ctx context.Context, params *GetBucketAclInput, optFns ...func(*Options)) (*GetBucketAclOutput, error) {
+ if params == nil {
+ params = &GetBucketAclInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "GetBucketAcl", params, optFns, addOperationGetBucketAclMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*GetBucketAclOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type GetBucketAclInput struct {
+
+ // Specifies the S3 bucket whose ACL is being requested.
+ //
+ // This member is required.
+ Bucket *string
+
+ // The account id of the expected bucket owner. If the bucket is owned by a
+ // different account, the request will fail with an HTTP 403 (Access Denied) error.
+ ExpectedBucketOwner *string
+}
+
+type GetBucketAclOutput struct {
+
+ // A list of grants.
+ Grants []types.Grant
+
+ // Container for the bucket owner's display name and ID.
+ Owner *types.Owner
+
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+}
+
+func addOperationGetBucketAclMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ err = stack.Serialize.Add(&awsRestxml_serializeOpGetBucketAcl{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsRestxml_deserializeOpGetBucketAcl{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
+ return err
+ }
+ if err = addRetryMiddlewares(stack, options); err != nil {
+ return err
+ }
+ if err = addHTTPSignerV4Middleware(stack, options); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addOpGetBucketAclValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetBucketAcl(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addMetadataRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addGetBucketAclUpdateEndpoint(stack, options); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil {
+ return err
+ }
+ if err = disableAcceptEncodingGzip(stack); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ return nil
+}
+
+func newServiceMetadataMiddleware_opGetBucketAcl(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ SigningName: "s3",
+ OperationName: "GetBucketAcl",
+ }
+}
+
+// getGetBucketAclBucketMember returns a pointer to string denoting a provided
+// bucket member valueand a boolean indicating if the input has a modeled bucket
+// name,
+func getGetBucketAclBucketMember(input interface{}) (*string, bool) {
+ in := input.(*GetBucketAclInput)
+ if in.Bucket == nil {
+ return nil, false
+ }
+ return in.Bucket, true
+}
+func addGetBucketAclUpdateEndpoint(stack *middleware.Stack, options Options) error {
+ return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{
+ Accessor: s3cust.UpdateEndpointParameterAccessor{
+ GetBucketFromInput: getGetBucketAclBucketMember,
+ },
+ UsePathStyle: options.UsePathStyle,
+ UseAccelerate: options.UseAccelerate,
+ SupportsAccelerate: true,
+ EndpointResolver: options.EndpointResolver,
+ EndpointResolverOptions: options.EndpointOptions,
+ UseDualstack: options.UseDualstack,
+ UseARNRegion: options.UseARNRegion,
+ })
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketAnalyticsConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketAnalyticsConfiguration.go
new file mode 100644
index 000000000..ee01383b3
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketAnalyticsConfiguration.go
@@ -0,0 +1,185 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package s3
+
+import (
+ "context"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
+ s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations"
+ "github.com/aws/aws-sdk-go-v2/service/s3/types"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// This implementation of the GET operation returns an analytics configuration
+// (identified by the analytics configuration ID) from the bucket. To use this
+// operation, you must have permissions to perform the s3:GetAnalyticsConfiguration
+// action. The bucket owner has this permission by default. The bucket owner can
+// grant this permission to others. For more information about permissions, see
+// Permissions Related to Bucket Subresource Operations
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources)
+// and Managing Access Permissions to Your Amazon S3 Resources
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html) in the
+// Amazon Simple Storage Service Developer Guide. For information about Amazon S3
+// analytics feature, see Amazon S3 Analytics – Storage Class Analysis
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/analytics-storage-class.html)
+// in the Amazon Simple Storage Service Developer Guide. Related Resources
+//
+// *
+// DeleteBucketAnalyticsConfiguration
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketAnalyticsConfiguration.html)
+//
+// *
+// ListBucketAnalyticsConfigurations
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketAnalyticsConfigurations.html)
+//
+// *
+// PutBucketAnalyticsConfiguration
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketAnalyticsConfiguration.html)
+func (c *Client) GetBucketAnalyticsConfiguration(ctx context.Context, params *GetBucketAnalyticsConfigurationInput, optFns ...func(*Options)) (*GetBucketAnalyticsConfigurationOutput, error) {
+ if params == nil {
+ params = &GetBucketAnalyticsConfigurationInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "GetBucketAnalyticsConfiguration", params, optFns, addOperationGetBucketAnalyticsConfigurationMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*GetBucketAnalyticsConfigurationOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type GetBucketAnalyticsConfigurationInput struct {
+
+ // The name of the bucket from which an analytics configuration is retrieved.
+ //
+ // This member is required.
+ Bucket *string
+
+ // The ID that identifies the analytics configuration.
+ //
+ // This member is required.
+ Id *string
+
+ // The account id of the expected bucket owner. If the bucket is owned by a
+ // different account, the request will fail with an HTTP 403 (Access Denied) error.
+ ExpectedBucketOwner *string
+}
+
+type GetBucketAnalyticsConfigurationOutput struct {
+
+ // The configuration and any analyses for the analytics filter.
+ AnalyticsConfiguration *types.AnalyticsConfiguration
+
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+}
+
+func addOperationGetBucketAnalyticsConfigurationMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ err = stack.Serialize.Add(&awsRestxml_serializeOpGetBucketAnalyticsConfiguration{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsRestxml_deserializeOpGetBucketAnalyticsConfiguration{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
+ return err
+ }
+ if err = addRetryMiddlewares(stack, options); err != nil {
+ return err
+ }
+ if err = addHTTPSignerV4Middleware(stack, options); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addOpGetBucketAnalyticsConfigurationValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetBucketAnalyticsConfiguration(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addMetadataRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addGetBucketAnalyticsConfigurationUpdateEndpoint(stack, options); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil {
+ return err
+ }
+ if err = disableAcceptEncodingGzip(stack); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ return nil
+}
+
+func newServiceMetadataMiddleware_opGetBucketAnalyticsConfiguration(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ SigningName: "s3",
+ OperationName: "GetBucketAnalyticsConfiguration",
+ }
+}
+
+// getGetBucketAnalyticsConfigurationBucketMember returns a pointer to string
+// denoting a provided bucket member valueand a boolean indicating if the input has
+// a modeled bucket name,
+func getGetBucketAnalyticsConfigurationBucketMember(input interface{}) (*string, bool) {
+ in := input.(*GetBucketAnalyticsConfigurationInput)
+ if in.Bucket == nil {
+ return nil, false
+ }
+ return in.Bucket, true
+}
+func addGetBucketAnalyticsConfigurationUpdateEndpoint(stack *middleware.Stack, options Options) error {
+ return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{
+ Accessor: s3cust.UpdateEndpointParameterAccessor{
+ GetBucketFromInput: getGetBucketAnalyticsConfigurationBucketMember,
+ },
+ UsePathStyle: options.UsePathStyle,
+ UseAccelerate: options.UseAccelerate,
+ SupportsAccelerate: true,
+ EndpointResolver: options.EndpointResolver,
+ EndpointResolverOptions: options.EndpointOptions,
+ UseDualstack: options.UseDualstack,
+ UseARNRegion: options.UseARNRegion,
+ })
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketCors.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketCors.go
new file mode 100644
index 000000000..ea1d44eb2
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketCors.go
@@ -0,0 +1,169 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package s3
+
+import (
+ "context"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
+ s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations"
+ "github.com/aws/aws-sdk-go-v2/service/s3/types"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// Returns the cors configuration information set for the bucket. To use this
+// operation, you must have permission to perform the s3:GetBucketCORS action. By
+// default, the bucket owner has this permission and can grant it to others. For
+// more information about cors, see Enabling Cross-Origin Resource Sharing
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html). The following
+// operations are related to GetBucketCors:
+//
+// * PutBucketCors
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketCors.html)
+//
+// *
+// DeleteBucketCors
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketCors.html)
+func (c *Client) GetBucketCors(ctx context.Context, params *GetBucketCorsInput, optFns ...func(*Options)) (*GetBucketCorsOutput, error) {
+ if params == nil {
+ params = &GetBucketCorsInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "GetBucketCors", params, optFns, addOperationGetBucketCorsMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*GetBucketCorsOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type GetBucketCorsInput struct {
+
+ // The bucket name for which to get the cors configuration.
+ //
+ // This member is required.
+ Bucket *string
+
+ // The account id of the expected bucket owner. If the bucket is owned by a
+ // different account, the request will fail with an HTTP 403 (Access Denied) error.
+ ExpectedBucketOwner *string
+}
+
+type GetBucketCorsOutput struct {
+
+ // A set of origins and methods (cross-origin access that you want to allow). You
+ // can add up to 100 rules to the configuration.
+ CORSRules []types.CORSRule
+
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+}
+
+func addOperationGetBucketCorsMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ err = stack.Serialize.Add(&awsRestxml_serializeOpGetBucketCors{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsRestxml_deserializeOpGetBucketCors{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
+ return err
+ }
+ if err = addRetryMiddlewares(stack, options); err != nil {
+ return err
+ }
+ if err = addHTTPSignerV4Middleware(stack, options); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addOpGetBucketCorsValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetBucketCors(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addMetadataRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addGetBucketCorsUpdateEndpoint(stack, options); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil {
+ return err
+ }
+ if err = disableAcceptEncodingGzip(stack); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ return nil
+}
+
+func newServiceMetadataMiddleware_opGetBucketCors(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ SigningName: "s3",
+ OperationName: "GetBucketCors",
+ }
+}
+
+// getGetBucketCorsBucketMember returns a pointer to string denoting a provided
+// bucket member valueand a boolean indicating if the input has a modeled bucket
+// name,
+func getGetBucketCorsBucketMember(input interface{}) (*string, bool) {
+ in := input.(*GetBucketCorsInput)
+ if in.Bucket == nil {
+ return nil, false
+ }
+ return in.Bucket, true
+}
+func addGetBucketCorsUpdateEndpoint(stack *middleware.Stack, options Options) error {
+ return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{
+ Accessor: s3cust.UpdateEndpointParameterAccessor{
+ GetBucketFromInput: getGetBucketCorsBucketMember,
+ },
+ UsePathStyle: options.UsePathStyle,
+ UseAccelerate: options.UseAccelerate,
+ SupportsAccelerate: true,
+ EndpointResolver: options.EndpointResolver,
+ EndpointResolverOptions: options.EndpointOptions,
+ UseDualstack: options.UseDualstack,
+ UseARNRegion: options.UseARNRegion,
+ })
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketEncryption.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketEncryption.go
new file mode 100644
index 000000000..8c8202010
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketEncryption.go
@@ -0,0 +1,176 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package s3
+
+import (
+ "context"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
+ s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations"
+ "github.com/aws/aws-sdk-go-v2/service/s3/types"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// Returns the default encryption configuration for an Amazon S3 bucket. For
+// information about the Amazon S3 default encryption feature, see Amazon S3
+// Default Bucket Encryption
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html). To use
+// this operation, you must have permission to perform the
+// s3:GetEncryptionConfiguration action. The bucket owner has this permission by
+// default. The bucket owner can grant this permission to others. For more
+// information about permissions, see Permissions Related to Bucket Subresource
+// Operations
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources)
+// and Managing Access Permissions to Your Amazon S3 Resources
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). The
+// following operations are related to GetBucketEncryption:
+//
+// * PutBucketEncryption
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketEncryption.html)
+//
+// *
+// DeleteBucketEncryption
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketEncryption.html)
+func (c *Client) GetBucketEncryption(ctx context.Context, params *GetBucketEncryptionInput, optFns ...func(*Options)) (*GetBucketEncryptionOutput, error) {
+ if params == nil {
+ params = &GetBucketEncryptionInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "GetBucketEncryption", params, optFns, addOperationGetBucketEncryptionMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*GetBucketEncryptionOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type GetBucketEncryptionInput struct {
+
+ // The name of the bucket from which the server-side encryption configuration is
+ // retrieved.
+ //
+ // This member is required.
+ Bucket *string
+
+ // The account id of the expected bucket owner. If the bucket is owned by a
+ // different account, the request will fail with an HTTP 403 (Access Denied) error.
+ ExpectedBucketOwner *string
+}
+
+type GetBucketEncryptionOutput struct {
+
+ // Specifies the default server-side-encryption configuration.
+ ServerSideEncryptionConfiguration *types.ServerSideEncryptionConfiguration
+
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+}
+
+func addOperationGetBucketEncryptionMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ err = stack.Serialize.Add(&awsRestxml_serializeOpGetBucketEncryption{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsRestxml_deserializeOpGetBucketEncryption{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
+ return err
+ }
+ if err = addRetryMiddlewares(stack, options); err != nil {
+ return err
+ }
+ if err = addHTTPSignerV4Middleware(stack, options); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addOpGetBucketEncryptionValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetBucketEncryption(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addMetadataRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addGetBucketEncryptionUpdateEndpoint(stack, options); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil {
+ return err
+ }
+ if err = disableAcceptEncodingGzip(stack); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ return nil
+}
+
+func newServiceMetadataMiddleware_opGetBucketEncryption(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ SigningName: "s3",
+ OperationName: "GetBucketEncryption",
+ }
+}
+
+// getGetBucketEncryptionBucketMember returns a pointer to string denoting a
+// provided bucket member valueand a boolean indicating if the input has a modeled
+// bucket name,
+func getGetBucketEncryptionBucketMember(input interface{}) (*string, bool) {
+ in := input.(*GetBucketEncryptionInput)
+ if in.Bucket == nil {
+ return nil, false
+ }
+ return in.Bucket, true
+}
+func addGetBucketEncryptionUpdateEndpoint(stack *middleware.Stack, options Options) error {
+ return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{
+ Accessor: s3cust.UpdateEndpointParameterAccessor{
+ GetBucketFromInput: getGetBucketEncryptionBucketMember,
+ },
+ UsePathStyle: options.UsePathStyle,
+ UseAccelerate: options.UseAccelerate,
+ SupportsAccelerate: true,
+ EndpointResolver: options.EndpointResolver,
+ EndpointResolverOptions: options.EndpointOptions,
+ UseDualstack: options.UseDualstack,
+ UseARNRegion: options.UseARNRegion,
+ })
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketIntelligentTieringConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketIntelligentTieringConfiguration.go
new file mode 100644
index 000000000..22cd6debe
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketIntelligentTieringConfiguration.go
@@ -0,0 +1,184 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package s3
+
+import (
+ "context"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
+ s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations"
+ "github.com/aws/aws-sdk-go-v2/service/s3/types"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// Gets the S3 Intelligent-Tiering configuration from the specified bucket. The S3
+// Intelligent-Tiering storage class is designed to optimize storage costs by
+// automatically moving data to the most cost-effective storage access tier,
+// without additional operational overhead. S3 Intelligent-Tiering delivers
+// automatic cost savings by moving data between access tiers, when access patterns
+// change. The S3 Intelligent-Tiering storage class is suitable for objects larger
+// than 128 KB that you plan to store for at least 30 days. If the size of an
+// object is less than 128 KB, it is not eligible for auto-tiering. Smaller objects
+// can be stored, but they are always charged at the frequent access tier rates in
+// the S3 Intelligent-Tiering storage class. If you delete an object before the end
+// of the 30-day minimum storage duration period, you are charged for 30 days. For
+// more information, see Storage class for automatically optimizing frequently and
+// infrequently accessed objects
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html#sc-dynamic-data-access).
+// Operations related to GetBucketIntelligentTieringConfiguration include:
+//
+// *
+// DeleteBucketIntelligentTieringConfiguration
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketIntelligentTieringConfiguration.html)
+//
+// *
+// PutBucketIntelligentTieringConfiguration
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketIntelligentTieringConfiguration.html)
+//
+// *
+// ListBucketIntelligentTieringConfigurations
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketIntelligentTieringConfigurations.html)
+func (c *Client) GetBucketIntelligentTieringConfiguration(ctx context.Context, params *GetBucketIntelligentTieringConfigurationInput, optFns ...func(*Options)) (*GetBucketIntelligentTieringConfigurationOutput, error) {
+ if params == nil {
+ params = &GetBucketIntelligentTieringConfigurationInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "GetBucketIntelligentTieringConfiguration", params, optFns, addOperationGetBucketIntelligentTieringConfigurationMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*GetBucketIntelligentTieringConfigurationOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type GetBucketIntelligentTieringConfigurationInput struct {
+
+ // The name of the Amazon S3 bucket whose configuration you want to modify or
+ // retrieve.
+ //
+ // This member is required.
+ Bucket *string
+
+ // The ID used to identify the S3 Intelligent-Tiering configuration.
+ //
+ // This member is required.
+ Id *string
+}
+
+type GetBucketIntelligentTieringConfigurationOutput struct {
+
+ // Container for S3 Intelligent-Tiering configuration.
+ IntelligentTieringConfiguration *types.IntelligentTieringConfiguration
+
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+}
+
+func addOperationGetBucketIntelligentTieringConfigurationMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ err = stack.Serialize.Add(&awsRestxml_serializeOpGetBucketIntelligentTieringConfiguration{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsRestxml_deserializeOpGetBucketIntelligentTieringConfiguration{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
+ return err
+ }
+ if err = addRetryMiddlewares(stack, options); err != nil {
+ return err
+ }
+ if err = addHTTPSignerV4Middleware(stack, options); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addOpGetBucketIntelligentTieringConfigurationValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetBucketIntelligentTieringConfiguration(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addMetadataRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addGetBucketIntelligentTieringConfigurationUpdateEndpoint(stack, options); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil {
+ return err
+ }
+ if err = disableAcceptEncodingGzip(stack); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ return nil
+}
+
+func newServiceMetadataMiddleware_opGetBucketIntelligentTieringConfiguration(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ SigningName: "s3",
+ OperationName: "GetBucketIntelligentTieringConfiguration",
+ }
+}
+
+// getGetBucketIntelligentTieringConfigurationBucketMember returns a pointer to
+// string denoting a provided bucket member valueand a boolean indicating if the
+// input has a modeled bucket name,
+func getGetBucketIntelligentTieringConfigurationBucketMember(input interface{}) (*string, bool) {
+ in := input.(*GetBucketIntelligentTieringConfigurationInput)
+ if in.Bucket == nil {
+ return nil, false
+ }
+ return in.Bucket, true
+}
+func addGetBucketIntelligentTieringConfigurationUpdateEndpoint(stack *middleware.Stack, options Options) error {
+ return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{
+ Accessor: s3cust.UpdateEndpointParameterAccessor{
+ GetBucketFromInput: getGetBucketIntelligentTieringConfigurationBucketMember,
+ },
+ UsePathStyle: options.UsePathStyle,
+ UseAccelerate: options.UseAccelerate,
+ SupportsAccelerate: true,
+ EndpointResolver: options.EndpointResolver,
+ EndpointResolverOptions: options.EndpointOptions,
+ UseDualstack: options.UseDualstack,
+ UseARNRegion: options.UseARNRegion,
+ })
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketInventoryConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketInventoryConfiguration.go
new file mode 100644
index 000000000..861046790
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketInventoryConfiguration.go
@@ -0,0 +1,183 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package s3
+
+import (
+ "context"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
+ s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations"
+ "github.com/aws/aws-sdk-go-v2/service/s3/types"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// Returns an inventory configuration (identified by the inventory configuration
+// ID) from the bucket. To use this operation, you must have permissions to perform
+// the s3:GetInventoryConfiguration action. The bucket owner has this permission by
+// default and can grant this permission to others. For more information about
+// permissions, see Permissions Related to Bucket Subresource Operations
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources)
+// and Managing Access Permissions to Your Amazon S3 Resources
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). For
+// information about the Amazon S3 inventory feature, see Amazon S3 Inventory
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-inventory.html). The
+// following operations are related to GetBucketInventoryConfiguration:
+//
+// *
+// DeleteBucketInventoryConfiguration
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketInventoryConfiguration.html)
+//
+// *
+// ListBucketInventoryConfigurations
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketInventoryConfigurations.html)
+//
+// *
+// PutBucketInventoryConfiguration
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketInventoryConfiguration.html)
+func (c *Client) GetBucketInventoryConfiguration(ctx context.Context, params *GetBucketInventoryConfigurationInput, optFns ...func(*Options)) (*GetBucketInventoryConfigurationOutput, error) {
+ if params == nil {
+ params = &GetBucketInventoryConfigurationInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "GetBucketInventoryConfiguration", params, optFns, addOperationGetBucketInventoryConfigurationMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*GetBucketInventoryConfigurationOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type GetBucketInventoryConfigurationInput struct {
+
+ // The name of the bucket containing the inventory configuration to retrieve.
+ //
+ // This member is required.
+ Bucket *string
+
+ // The ID used to identify the inventory configuration.
+ //
+ // This member is required.
+ Id *string
+
+ // The account id of the expected bucket owner. If the bucket is owned by a
+ // different account, the request will fail with an HTTP 403 (Access Denied) error.
+ ExpectedBucketOwner *string
+}
+
+type GetBucketInventoryConfigurationOutput struct {
+
+ // Specifies the inventory configuration.
+ InventoryConfiguration *types.InventoryConfiguration
+
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+}
+
+func addOperationGetBucketInventoryConfigurationMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ err = stack.Serialize.Add(&awsRestxml_serializeOpGetBucketInventoryConfiguration{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsRestxml_deserializeOpGetBucketInventoryConfiguration{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
+ return err
+ }
+ if err = addRetryMiddlewares(stack, options); err != nil {
+ return err
+ }
+ if err = addHTTPSignerV4Middleware(stack, options); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addOpGetBucketInventoryConfigurationValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetBucketInventoryConfiguration(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addMetadataRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addGetBucketInventoryConfigurationUpdateEndpoint(stack, options); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil {
+ return err
+ }
+ if err = disableAcceptEncodingGzip(stack); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ return nil
+}
+
+func newServiceMetadataMiddleware_opGetBucketInventoryConfiguration(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ SigningName: "s3",
+ OperationName: "GetBucketInventoryConfiguration",
+ }
+}
+
+// getGetBucketInventoryConfigurationBucketMember returns a pointer to string
+// denoting a provided bucket member valueand a boolean indicating if the input has
+// a modeled bucket name,
+func getGetBucketInventoryConfigurationBucketMember(input interface{}) (*string, bool) {
+ in := input.(*GetBucketInventoryConfigurationInput)
+ if in.Bucket == nil {
+ return nil, false
+ }
+ return in.Bucket, true
+}
+func addGetBucketInventoryConfigurationUpdateEndpoint(stack *middleware.Stack, options Options) error {
+ return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{
+ Accessor: s3cust.UpdateEndpointParameterAccessor{
+ GetBucketFromInput: getGetBucketInventoryConfigurationBucketMember,
+ },
+ UsePathStyle: options.UsePathStyle,
+ UseAccelerate: options.UseAccelerate,
+ SupportsAccelerate: true,
+ EndpointResolver: options.EndpointResolver,
+ EndpointResolverOptions: options.EndpointOptions,
+ UseDualstack: options.UseDualstack,
+ UseARNRegion: options.UseARNRegion,
+ })
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketLifecycleConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketLifecycleConfiguration.go
new file mode 100644
index 000000000..28012f7ef
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketLifecycleConfiguration.go
@@ -0,0 +1,200 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package s3
+
+import (
+ "context"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
+ s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations"
+ "github.com/aws/aws-sdk-go-v2/service/s3/types"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// Bucket lifecycle configuration now supports specifying a lifecycle rule using an
+// object key name prefix, one or more object tags, or a combination of both.
+// Accordingly, this section describes the latest API. The response describes the
+// new filter element that you can use to specify a filter to select a subset of
+// objects to which the rule applies. If you are using a previous version of the
+// lifecycle configuration, it still works. For the earlier API description, see
+// GetBucketLifecycle
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycle.html).
+// Returns the lifecycle configuration information set on the bucket. For
+// information about lifecycle configuration, see Object Lifecycle Management
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html). To
+// use this operation, you must have permission to perform the
+// s3:GetLifecycleConfiguration action. The bucket owner has this permission, by
+// default. The bucket owner can grant this permission to others. For more
+// information about permissions, see Permissions Related to Bucket Subresource
+// Operations
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources)
+// and Managing Access Permissions to Your Amazon S3 Resources
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html).
+// GetBucketLifecycleConfiguration has the following special error:
+//
+// * Error code:
+// NoSuchLifecycleConfiguration
+//
+// * Description: The lifecycle configuration does
+// not exist.
+//
+// * HTTP Status Code: 404 Not Found
+//
+// * SOAP Fault Code Prefix:
+// Client
+//
+// The following operations are related to
+// GetBucketLifecycleConfiguration:
+//
+// * GetBucketLifecycle
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycle.html)
+//
+// *
+// PutBucketLifecycle
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycle.html)
+//
+// *
+// DeleteBucketLifecycle
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketLifecycle.html)
+func (c *Client) GetBucketLifecycleConfiguration(ctx context.Context, params *GetBucketLifecycleConfigurationInput, optFns ...func(*Options)) (*GetBucketLifecycleConfigurationOutput, error) {
+ if params == nil {
+ params = &GetBucketLifecycleConfigurationInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "GetBucketLifecycleConfiguration", params, optFns, addOperationGetBucketLifecycleConfigurationMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*GetBucketLifecycleConfigurationOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type GetBucketLifecycleConfigurationInput struct {
+
+ // The name of the bucket for which to get the lifecycle information.
+ //
+ // This member is required.
+ Bucket *string
+
+ // The account id of the expected bucket owner. If the bucket is owned by a
+ // different account, the request will fail with an HTTP 403 (Access Denied) error.
+ ExpectedBucketOwner *string
+}
+
+type GetBucketLifecycleConfigurationOutput struct {
+
+ // Container for a lifecycle rule.
+ Rules []types.LifecycleRule
+
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+}
+
+func addOperationGetBucketLifecycleConfigurationMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ err = stack.Serialize.Add(&awsRestxml_serializeOpGetBucketLifecycleConfiguration{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsRestxml_deserializeOpGetBucketLifecycleConfiguration{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
+ return err
+ }
+ if err = addRetryMiddlewares(stack, options); err != nil {
+ return err
+ }
+ if err = addHTTPSignerV4Middleware(stack, options); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addOpGetBucketLifecycleConfigurationValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetBucketLifecycleConfiguration(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addMetadataRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addGetBucketLifecycleConfigurationUpdateEndpoint(stack, options); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil {
+ return err
+ }
+ if err = disableAcceptEncodingGzip(stack); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ return nil
+}
+
+func newServiceMetadataMiddleware_opGetBucketLifecycleConfiguration(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ SigningName: "s3",
+ OperationName: "GetBucketLifecycleConfiguration",
+ }
+}
+
+// getGetBucketLifecycleConfigurationBucketMember returns a pointer to string
+// denoting a provided bucket member valueand a boolean indicating if the input has
+// a modeled bucket name,
+func getGetBucketLifecycleConfigurationBucketMember(input interface{}) (*string, bool) {
+ in := input.(*GetBucketLifecycleConfigurationInput)
+ if in.Bucket == nil {
+ return nil, false
+ }
+ return in.Bucket, true
+}
+func addGetBucketLifecycleConfigurationUpdateEndpoint(stack *middleware.Stack, options Options) error {
+ return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{
+ Accessor: s3cust.UpdateEndpointParameterAccessor{
+ GetBucketFromInput: getGetBucketLifecycleConfigurationBucketMember,
+ },
+ UsePathStyle: options.UsePathStyle,
+ UseAccelerate: options.UseAccelerate,
+ SupportsAccelerate: true,
+ EndpointResolver: options.EndpointResolver,
+ EndpointResolverOptions: options.EndpointOptions,
+ UseDualstack: options.UseDualstack,
+ UseARNRegion: options.UseARNRegion,
+ })
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketLocation.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketLocation.go
new file mode 100644
index 000000000..93df58f4e
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketLocation.go
@@ -0,0 +1,237 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package s3
+
+import (
+ "bytes"
+ "context"
+ "encoding/xml"
+ "fmt"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
+ s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations"
+ "github.com/aws/aws-sdk-go-v2/service/s3/types"
+ smithy "github.com/aws/smithy-go"
+ smithyxml "github.com/aws/smithy-go/encoding/xml"
+ smithyio "github.com/aws/smithy-go/io"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+ "io"
+)
+
+// Returns the Region the bucket resides in. You set the bucket's Region using the
+// LocationConstraint request parameter in a CreateBucket request. For more
+// information, see CreateBucket
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html). To use
+// this implementation of the operation, you must be the bucket owner. The
+// following operations are related to GetBucketLocation:
+//
+// * GetObject
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html)
+//
+// *
+// CreateBucket
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html)
+func (c *Client) GetBucketLocation(ctx context.Context, params *GetBucketLocationInput, optFns ...func(*Options)) (*GetBucketLocationOutput, error) {
+ if params == nil {
+ params = &GetBucketLocationInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "GetBucketLocation", params, optFns, addOperationGetBucketLocationMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*GetBucketLocationOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type GetBucketLocationInput struct {
+
+ // The name of the bucket for which to get the location.
+ //
+ // This member is required.
+ Bucket *string
+
+ // The account id of the expected bucket owner. If the bucket is owned by a
+ // different account, the request will fail with an HTTP 403 (Access Denied) error.
+ ExpectedBucketOwner *string
+}
+
+type GetBucketLocationOutput struct {
+
+ // Specifies the Region where the bucket resides. For a list of all the Amazon S3
+ // supported location constraints by Region, see Regions and Endpoints
+ // (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region). Buckets in
+ // Region us-east-1 have a LocationConstraint of null.
+ LocationConstraint types.BucketLocationConstraint
+
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+}
+
+func addOperationGetBucketLocationMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ err = stack.Serialize.Add(&awsRestxml_serializeOpGetBucketLocation{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsRestxml_deserializeOpGetBucketLocation{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
+ return err
+ }
+ if err = addRetryMiddlewares(stack, options); err != nil {
+ return err
+ }
+ if err = addHTTPSignerV4Middleware(stack, options); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = swapDeserializerHelper(stack); err != nil {
+ return err
+ }
+ if err = addOpGetBucketLocationValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetBucketLocation(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addMetadataRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addGetBucketLocationUpdateEndpoint(stack, options); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil {
+ return err
+ }
+ if err = disableAcceptEncodingGzip(stack); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ return nil
+}
+
+type awsRestxml_deserializeOpGetBucketLocation_custom struct {
+}
+
+func (*awsRestxml_deserializeOpGetBucketLocation_custom) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsRestxml_deserializeOpGetBucketLocation_custom) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsRestxml_deserializeOpErrorGetBucketLocation(response, &metadata)
+ }
+ output := &GetBucketLocationOutput{}
+ out.Result = output
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+ body := io.TeeReader(response.Body, ringBuffer)
+ rootDecoder := xml.NewDecoder(body)
+ decoder := smithyxml.WrapNodeDecoder(rootDecoder, xml.StartElement{})
+ err = awsRestxml_deserializeOpDocumentGetBucketLocationOutput(&output, decoder)
+ if err == io.EOF {
+ err = nil
+ }
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ return out, metadata, err
+}
+
+// Helper to swap in a custom deserializer
+func swapDeserializerHelper(stack *middleware.Stack) error {
+ _, err := stack.Deserialize.Swap("OperationDeserializer", &awsRestxml_deserializeOpGetBucketLocation_custom{})
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+func newServiceMetadataMiddleware_opGetBucketLocation(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ SigningName: "s3",
+ OperationName: "GetBucketLocation",
+ }
+}
+
+// getGetBucketLocationBucketMember returns a pointer to string denoting a provided
+// bucket member valueand a boolean indicating if the input has a modeled bucket
+// name,
+func getGetBucketLocationBucketMember(input interface{}) (*string, bool) {
+ in := input.(*GetBucketLocationInput)
+ if in.Bucket == nil {
+ return nil, false
+ }
+ return in.Bucket, true
+}
+func addGetBucketLocationUpdateEndpoint(stack *middleware.Stack, options Options) error {
+ return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{
+ Accessor: s3cust.UpdateEndpointParameterAccessor{
+ GetBucketFromInput: getGetBucketLocationBucketMember,
+ },
+ UsePathStyle: options.UsePathStyle,
+ UseAccelerate: options.UseAccelerate,
+ SupportsAccelerate: true,
+ EndpointResolver: options.EndpointResolver,
+ EndpointResolverOptions: options.EndpointOptions,
+ UseDualstack: options.UseDualstack,
+ UseARNRegion: options.UseARNRegion,
+ })
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketLogging.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketLogging.go
new file mode 100644
index 000000000..b38234cf4
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketLogging.go
@@ -0,0 +1,168 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package s3
+
+import (
+ "context"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
+ s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations"
+ "github.com/aws/aws-sdk-go-v2/service/s3/types"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// Returns the logging status of a bucket and the permissions users have to view
+// and modify that status. To use GET, you must be the bucket owner. The following
+// operations are related to GetBucketLogging:
+//
+// * CreateBucket
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html)
+//
+// *
+// PutBucketLogging
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLogging.html)
+func (c *Client) GetBucketLogging(ctx context.Context, params *GetBucketLoggingInput, optFns ...func(*Options)) (*GetBucketLoggingOutput, error) {
+ if params == nil {
+ params = &GetBucketLoggingInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "GetBucketLogging", params, optFns, addOperationGetBucketLoggingMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*GetBucketLoggingOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type GetBucketLoggingInput struct {
+
+ // The bucket name for which to get the logging information.
+ //
+ // This member is required.
+ Bucket *string
+
+ // The account id of the expected bucket owner. If the bucket is owned by a
+ // different account, the request will fail with an HTTP 403 (Access Denied) error.
+ ExpectedBucketOwner *string
+}
+
+type GetBucketLoggingOutput struct {
+
+ // Describes where logs are stored and the prefix that Amazon S3 assigns to all log
+ // object keys for a bucket. For more information, see PUT Bucket logging
+ // (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTlogging.html) in
+ // the Amazon Simple Storage Service API Reference.
+ LoggingEnabled *types.LoggingEnabled
+
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+}
+
+func addOperationGetBucketLoggingMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ err = stack.Serialize.Add(&awsRestxml_serializeOpGetBucketLogging{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsRestxml_deserializeOpGetBucketLogging{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
+ return err
+ }
+ if err = addRetryMiddlewares(stack, options); err != nil {
+ return err
+ }
+ if err = addHTTPSignerV4Middleware(stack, options); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addOpGetBucketLoggingValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetBucketLogging(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addMetadataRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addGetBucketLoggingUpdateEndpoint(stack, options); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil {
+ return err
+ }
+ if err = disableAcceptEncodingGzip(stack); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ return nil
+}
+
+func newServiceMetadataMiddleware_opGetBucketLogging(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ SigningName: "s3",
+ OperationName: "GetBucketLogging",
+ }
+}
+
+// getGetBucketLoggingBucketMember returns a pointer to string denoting a provided
+// bucket member valueand a boolean indicating if the input has a modeled bucket
+// name,
+func getGetBucketLoggingBucketMember(input interface{}) (*string, bool) {
+ in := input.(*GetBucketLoggingInput)
+ if in.Bucket == nil {
+ return nil, false
+ }
+ return in.Bucket, true
+}
+func addGetBucketLoggingUpdateEndpoint(stack *middleware.Stack, options Options) error {
+ return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{
+ Accessor: s3cust.UpdateEndpointParameterAccessor{
+ GetBucketFromInput: getGetBucketLoggingBucketMember,
+ },
+ UsePathStyle: options.UsePathStyle,
+ UseAccelerate: options.UseAccelerate,
+ SupportsAccelerate: true,
+ EndpointResolver: options.EndpointResolver,
+ EndpointResolverOptions: options.EndpointOptions,
+ UseDualstack: options.UseDualstack,
+ UseARNRegion: options.UseARNRegion,
+ })
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketMetricsConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketMetricsConfiguration.go
new file mode 100644
index 000000000..a6f8a5b78
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketMetricsConfiguration.go
@@ -0,0 +1,190 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package s3
+
+import (
+ "context"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
+ s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations"
+ "github.com/aws/aws-sdk-go-v2/service/s3/types"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// Gets a metrics configuration (specified by the metrics configuration ID) from
+// the bucket. Note that this doesn't include the daily storage metrics. To use
+// this operation, you must have permissions to perform the
+// s3:GetMetricsConfiguration action. The bucket owner has this permission by
+// default. The bucket owner can grant this permission to others. For more
+// information about permissions, see Permissions Related to Bucket Subresource
+// Operations
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources)
+// and Managing Access Permissions to Your Amazon S3 Resources
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). For
+// information about CloudWatch request metrics for Amazon S3, see Monitoring
+// Metrics with Amazon CloudWatch
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/cloudwatch-monitoring.html).
+// The following operations are related to GetBucketMetricsConfiguration:
+//
+// *
+// PutBucketMetricsConfiguration
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketMetricsConfiguration.html)
+//
+// *
+// DeleteBucketMetricsConfiguration
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketMetricsConfiguration.html)
+//
+// *
+// ListBucketMetricsConfigurations
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketMetricsConfigurations.html)
+//
+// *
+// Monitoring Metrics with Amazon CloudWatch
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/cloudwatch-monitoring.html)
+func (c *Client) GetBucketMetricsConfiguration(ctx context.Context, params *GetBucketMetricsConfigurationInput, optFns ...func(*Options)) (*GetBucketMetricsConfigurationOutput, error) {
+ if params == nil {
+ params = &GetBucketMetricsConfigurationInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "GetBucketMetricsConfiguration", params, optFns, addOperationGetBucketMetricsConfigurationMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*GetBucketMetricsConfigurationOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type GetBucketMetricsConfigurationInput struct {
+
+ // The name of the bucket containing the metrics configuration to retrieve.
+ //
+ // This member is required.
+ Bucket *string
+
+ // The ID used to identify the metrics configuration.
+ //
+ // This member is required.
+ Id *string
+
+ // The account id of the expected bucket owner. If the bucket is owned by a
+ // different account, the request will fail with an HTTP 403 (Access Denied) error.
+ ExpectedBucketOwner *string
+}
+
+type GetBucketMetricsConfigurationOutput struct {
+
+ // Specifies the metrics configuration.
+ MetricsConfiguration *types.MetricsConfiguration
+
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+}
+
+func addOperationGetBucketMetricsConfigurationMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ err = stack.Serialize.Add(&awsRestxml_serializeOpGetBucketMetricsConfiguration{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsRestxml_deserializeOpGetBucketMetricsConfiguration{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
+ return err
+ }
+ if err = addRetryMiddlewares(stack, options); err != nil {
+ return err
+ }
+ if err = addHTTPSignerV4Middleware(stack, options); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addOpGetBucketMetricsConfigurationValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetBucketMetricsConfiguration(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addMetadataRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addGetBucketMetricsConfigurationUpdateEndpoint(stack, options); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil {
+ return err
+ }
+ if err = disableAcceptEncodingGzip(stack); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ return nil
+}
+
+func newServiceMetadataMiddleware_opGetBucketMetricsConfiguration(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ SigningName: "s3",
+ OperationName: "GetBucketMetricsConfiguration",
+ }
+}
+
+// getGetBucketMetricsConfigurationBucketMember returns a pointer to string
+// denoting a provided bucket member valueand a boolean indicating if the input has
+// a modeled bucket name,
+func getGetBucketMetricsConfigurationBucketMember(input interface{}) (*string, bool) {
+ in := input.(*GetBucketMetricsConfigurationInput)
+ if in.Bucket == nil {
+ return nil, false
+ }
+ return in.Bucket, true
+}
+func addGetBucketMetricsConfigurationUpdateEndpoint(stack *middleware.Stack, options Options) error {
+ return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{
+ Accessor: s3cust.UpdateEndpointParameterAccessor{
+ GetBucketFromInput: getGetBucketMetricsConfigurationBucketMember,
+ },
+ UsePathStyle: options.UsePathStyle,
+ UseAccelerate: options.UseAccelerate,
+ SupportsAccelerate: true,
+ EndpointResolver: options.EndpointResolver,
+ EndpointResolverOptions: options.EndpointOptions,
+ UseDualstack: options.UseDualstack,
+ UseARNRegion: options.UseARNRegion,
+ })
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketNotificationConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketNotificationConfiguration.go
new file mode 100644
index 000000000..40f29d365
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketNotificationConfiguration.go
@@ -0,0 +1,182 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package s3
+
+import (
+ "context"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
+ s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations"
+ "github.com/aws/aws-sdk-go-v2/service/s3/types"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// Returns the notification configuration of a bucket. If notifications are not
+// enabled on the bucket, the operation returns an empty NotificationConfiguration
+// element. By default, you must be the bucket owner to read the notification
+// configuration of a bucket. However, the bucket owner can use a bucket policy to
+// grant permission to other users to read this configuration with the
+// s3:GetBucketNotification permission. For more information about setting and
+// reading the notification configuration on a bucket, see Setting Up Notification
+// of Bucket Events
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html). For
+// more information about bucket policies, see Using Bucket Policies
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html). The
+// following operation is related to GetBucketNotification:
+//
+// *
+// PutBucketNotification
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketNotification.html)
+func (c *Client) GetBucketNotificationConfiguration(ctx context.Context, params *GetBucketNotificationConfigurationInput, optFns ...func(*Options)) (*GetBucketNotificationConfigurationOutput, error) {
+ if params == nil {
+ params = &GetBucketNotificationConfigurationInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "GetBucketNotificationConfiguration", params, optFns, addOperationGetBucketNotificationConfigurationMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*GetBucketNotificationConfigurationOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type GetBucketNotificationConfigurationInput struct {
+
+ // The name of the bucket for which to get the notification configuration.
+ //
+ // This member is required.
+ Bucket *string
+
+ // The account id of the expected bucket owner. If the bucket is owned by a
+ // different account, the request will fail with an HTTP 403 (Access Denied) error.
+ ExpectedBucketOwner *string
+}
+
+// A container for specifying the notification configuration of the bucket. If this
+// element is empty, notifications are turned off for the bucket.
+type GetBucketNotificationConfigurationOutput struct {
+
+ // Describes the AWS Lambda functions to invoke and the events for which to invoke
+ // them.
+ LambdaFunctionConfigurations []types.LambdaFunctionConfiguration
+
+ // The Amazon Simple Queue Service queues to publish messages to and the events for
+ // which to publish messages.
+ QueueConfigurations []types.QueueConfiguration
+
+ // The topic to which notifications are sent and the events for which notifications
+ // are generated.
+ TopicConfigurations []types.TopicConfiguration
+
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+}
+
+func addOperationGetBucketNotificationConfigurationMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ err = stack.Serialize.Add(&awsRestxml_serializeOpGetBucketNotificationConfiguration{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsRestxml_deserializeOpGetBucketNotificationConfiguration{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
+ return err
+ }
+ if err = addRetryMiddlewares(stack, options); err != nil {
+ return err
+ }
+ if err = addHTTPSignerV4Middleware(stack, options); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addOpGetBucketNotificationConfigurationValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetBucketNotificationConfiguration(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addMetadataRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addGetBucketNotificationConfigurationUpdateEndpoint(stack, options); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil {
+ return err
+ }
+ if err = disableAcceptEncodingGzip(stack); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ return nil
+}
+
+func newServiceMetadataMiddleware_opGetBucketNotificationConfiguration(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ SigningName: "s3",
+ OperationName: "GetBucketNotificationConfiguration",
+ }
+}
+
+// getGetBucketNotificationConfigurationBucketMember returns a pointer to string
+// denoting a provided bucket member valueand a boolean indicating if the input has
+// a modeled bucket name,
+func getGetBucketNotificationConfigurationBucketMember(input interface{}) (*string, bool) {
+ in := input.(*GetBucketNotificationConfigurationInput)
+ if in.Bucket == nil {
+ return nil, false
+ }
+ return in.Bucket, true
+}
+func addGetBucketNotificationConfigurationUpdateEndpoint(stack *middleware.Stack, options Options) error {
+ return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{
+ Accessor: s3cust.UpdateEndpointParameterAccessor{
+ GetBucketFromInput: getGetBucketNotificationConfigurationBucketMember,
+ },
+ UsePathStyle: options.UsePathStyle,
+ UseAccelerate: options.UseAccelerate,
+ SupportsAccelerate: true,
+ EndpointResolver: options.EndpointResolver,
+ EndpointResolverOptions: options.EndpointOptions,
+ UseDualstack: options.UseDualstack,
+ UseARNRegion: options.UseARNRegion,
+ })
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketOwnershipControls.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketOwnershipControls.go
new file mode 100644
index 000000000..05e94b5d1
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketOwnershipControls.go
@@ -0,0 +1,168 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package s3
+
+import (
+ "context"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
+ s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations"
+ "github.com/aws/aws-sdk-go-v2/service/s3/types"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// Retrieves OwnershipControls for an Amazon S3 bucket. To use this operation, you
+// must have the s3:GetBucketOwnershipControls permission. For more information
+// about Amazon S3 permissions, see Specifying Permissions in a Policy
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html).
+// For information about Amazon S3 Object Ownership, see Using Object Ownership
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/about-object-ownership.html).
+// The following operations are related to GetBucketOwnershipControls:
+//
+// *
+// PutBucketOwnershipControls
+//
+// * DeleteBucketOwnershipControls
+func (c *Client) GetBucketOwnershipControls(ctx context.Context, params *GetBucketOwnershipControlsInput, optFns ...func(*Options)) (*GetBucketOwnershipControlsOutput, error) {
+ if params == nil {
+ params = &GetBucketOwnershipControlsInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "GetBucketOwnershipControls", params, optFns, addOperationGetBucketOwnershipControlsMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*GetBucketOwnershipControlsOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type GetBucketOwnershipControlsInput struct {
+
+ // The name of the Amazon S3 bucket whose OwnershipControls you want to retrieve.
+ //
+ // This member is required.
+ Bucket *string
+
+ // The account id of the expected bucket owner. If the bucket is owned by a
+ // different account, the request will fail with an HTTP 403 (Access Denied) error.
+ ExpectedBucketOwner *string
+}
+
+type GetBucketOwnershipControlsOutput struct {
+
+ // The OwnershipControls (BucketOwnerPreferred or ObjectWriter) currently in effect
+ // for this Amazon S3 bucket.
+ OwnershipControls *types.OwnershipControls
+
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+}
+
+func addOperationGetBucketOwnershipControlsMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ err = stack.Serialize.Add(&awsRestxml_serializeOpGetBucketOwnershipControls{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsRestxml_deserializeOpGetBucketOwnershipControls{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
+ return err
+ }
+ if err = addRetryMiddlewares(stack, options); err != nil {
+ return err
+ }
+ if err = addHTTPSignerV4Middleware(stack, options); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addOpGetBucketOwnershipControlsValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetBucketOwnershipControls(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addMetadataRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addGetBucketOwnershipControlsUpdateEndpoint(stack, options); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil {
+ return err
+ }
+ if err = disableAcceptEncodingGzip(stack); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ return nil
+}
+
+func newServiceMetadataMiddleware_opGetBucketOwnershipControls(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ SigningName: "s3",
+ OperationName: "GetBucketOwnershipControls",
+ }
+}
+
+// getGetBucketOwnershipControlsBucketMember returns a pointer to string denoting a
+// provided bucket member valueand a boolean indicating if the input has a modeled
+// bucket name,
+func getGetBucketOwnershipControlsBucketMember(input interface{}) (*string, bool) {
+ in := input.(*GetBucketOwnershipControlsInput)
+ if in.Bucket == nil {
+ return nil, false
+ }
+ return in.Bucket, true
+}
+func addGetBucketOwnershipControlsUpdateEndpoint(stack *middleware.Stack, options Options) error {
+ return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{
+ Accessor: s3cust.UpdateEndpointParameterAccessor{
+ GetBucketFromInput: getGetBucketOwnershipControlsBucketMember,
+ },
+ UsePathStyle: options.UsePathStyle,
+ UseAccelerate: options.UseAccelerate,
+ SupportsAccelerate: true,
+ EndpointResolver: options.EndpointResolver,
+ EndpointResolverOptions: options.EndpointOptions,
+ UseDualstack: options.UseDualstack,
+ UseARNRegion: options.UseARNRegion,
+ })
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketPolicy.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketPolicy.go
new file mode 100644
index 000000000..e43b4e885
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketPolicy.go
@@ -0,0 +1,170 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package s3
+
+import (
+ "context"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
+ s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// Returns the policy of a specified bucket. If you are using an identity other
+// than the root user of the AWS account that owns the bucket, the calling identity
+// must have the GetBucketPolicy permissions on the specified bucket and belong to
+// the bucket owner's account in order to use this operation. If you don't have
+// GetBucketPolicy permissions, Amazon S3 returns a 403 Access Denied error. If you
+// have the correct permissions, but you're not using an identity that belongs to
+// the bucket owner's account, Amazon S3 returns a 405 Method Not Allowed error. As
+// a security precaution, the root user of the AWS account that owns a bucket can
+// always use this operation, even if the policy explicitly denies the root user
+// the ability to perform this action. For more information about bucket policies,
+// see Using Bucket Policies and User Policies
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html). The
+// following operation is related to GetBucketPolicy:
+//
+// * GetObject
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html)
+func (c *Client) GetBucketPolicy(ctx context.Context, params *GetBucketPolicyInput, optFns ...func(*Options)) (*GetBucketPolicyOutput, error) {
+ if params == nil {
+ params = &GetBucketPolicyInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "GetBucketPolicy", params, optFns, addOperationGetBucketPolicyMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*GetBucketPolicyOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type GetBucketPolicyInput struct {
+
+ // The bucket name for which to get the bucket policy.
+ //
+ // This member is required.
+ Bucket *string
+
+ // The account id of the expected bucket owner. If the bucket is owned by a
+ // different account, the request will fail with an HTTP 403 (Access Denied) error.
+ ExpectedBucketOwner *string
+}
+
+type GetBucketPolicyOutput struct {
+
+ // The bucket policy as a JSON document.
+ Policy *string
+
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+}
+
+func addOperationGetBucketPolicyMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ err = stack.Serialize.Add(&awsRestxml_serializeOpGetBucketPolicy{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsRestxml_deserializeOpGetBucketPolicy{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
+ return err
+ }
+ if err = addRetryMiddlewares(stack, options); err != nil {
+ return err
+ }
+ if err = addHTTPSignerV4Middleware(stack, options); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addOpGetBucketPolicyValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetBucketPolicy(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addMetadataRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addGetBucketPolicyUpdateEndpoint(stack, options); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil {
+ return err
+ }
+ if err = disableAcceptEncodingGzip(stack); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ return nil
+}
+
+func newServiceMetadataMiddleware_opGetBucketPolicy(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ SigningName: "s3",
+ OperationName: "GetBucketPolicy",
+ }
+}
+
+// getGetBucketPolicyBucketMember returns a pointer to string denoting a provided
+// bucket member valueand a boolean indicating if the input has a modeled bucket
+// name,
+func getGetBucketPolicyBucketMember(input interface{}) (*string, bool) {
+ in := input.(*GetBucketPolicyInput)
+ if in.Bucket == nil {
+ return nil, false
+ }
+ return in.Bucket, true
+}
+func addGetBucketPolicyUpdateEndpoint(stack *middleware.Stack, options Options) error {
+ return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{
+ Accessor: s3cust.UpdateEndpointParameterAccessor{
+ GetBucketFromInput: getGetBucketPolicyBucketMember,
+ },
+ UsePathStyle: options.UsePathStyle,
+ UseAccelerate: options.UseAccelerate,
+ SupportsAccelerate: true,
+ EndpointResolver: options.EndpointResolver,
+ EndpointResolverOptions: options.EndpointOptions,
+ UseDualstack: options.UseDualstack,
+ UseARNRegion: options.UseARNRegion,
+ })
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketPolicyStatus.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketPolicyStatus.go
new file mode 100644
index 000000000..aef967623
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketPolicyStatus.go
@@ -0,0 +1,180 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package s3
+
+import (
+ "context"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
+ s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations"
+ "github.com/aws/aws-sdk-go-v2/service/s3/types"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// Retrieves the policy status for an Amazon S3 bucket, indicating whether the
+// bucket is public. In order to use this operation, you must have the
+// s3:GetBucketPolicyStatus permission. For more information about Amazon S3
+// permissions, see Specifying Permissions in a Policy
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html).
+// For more information about when Amazon S3 considers a bucket public, see The
+// Meaning of "Public"
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html#access-control-block-public-access-policy-status).
+// The following operations are related to GetBucketPolicyStatus:
+//
+// * Using Amazon
+// S3 Block Public Access
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html)
+//
+// *
+// GetPublicAccessBlock
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetPublicAccessBlock.html)
+//
+// *
+// PutPublicAccessBlock
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutPublicAccessBlock.html)
+//
+// *
+// DeletePublicAccessBlock
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeletePublicAccessBlock.html)
+func (c *Client) GetBucketPolicyStatus(ctx context.Context, params *GetBucketPolicyStatusInput, optFns ...func(*Options)) (*GetBucketPolicyStatusOutput, error) {
+ if params == nil {
+ params = &GetBucketPolicyStatusInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "GetBucketPolicyStatus", params, optFns, addOperationGetBucketPolicyStatusMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*GetBucketPolicyStatusOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type GetBucketPolicyStatusInput struct {
+
+ // The name of the Amazon S3 bucket whose policy status you want to retrieve.
+ //
+ // This member is required.
+ Bucket *string
+
+ // The account id of the expected bucket owner. If the bucket is owned by a
+ // different account, the request will fail with an HTTP 403 (Access Denied) error.
+ ExpectedBucketOwner *string
+}
+
+type GetBucketPolicyStatusOutput struct {
+
+ // The policy status for the specified bucket.
+ PolicyStatus *types.PolicyStatus
+
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+}
+
+func addOperationGetBucketPolicyStatusMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ err = stack.Serialize.Add(&awsRestxml_serializeOpGetBucketPolicyStatus{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsRestxml_deserializeOpGetBucketPolicyStatus{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
+ return err
+ }
+ if err = addRetryMiddlewares(stack, options); err != nil {
+ return err
+ }
+ if err = addHTTPSignerV4Middleware(stack, options); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addOpGetBucketPolicyStatusValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetBucketPolicyStatus(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addMetadataRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addGetBucketPolicyStatusUpdateEndpoint(stack, options); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil {
+ return err
+ }
+ if err = disableAcceptEncodingGzip(stack); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ return nil
+}
+
+func newServiceMetadataMiddleware_opGetBucketPolicyStatus(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ SigningName: "s3",
+ OperationName: "GetBucketPolicyStatus",
+ }
+}
+
+// getGetBucketPolicyStatusBucketMember returns a pointer to string denoting a
+// provided bucket member valueand a boolean indicating if the input has a modeled
+// bucket name,
+func getGetBucketPolicyStatusBucketMember(input interface{}) (*string, bool) {
+ in := input.(*GetBucketPolicyStatusInput)
+ if in.Bucket == nil {
+ return nil, false
+ }
+ return in.Bucket, true
+}
+func addGetBucketPolicyStatusUpdateEndpoint(stack *middleware.Stack, options Options) error {
+ return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{
+ Accessor: s3cust.UpdateEndpointParameterAccessor{
+ GetBucketFromInput: getGetBucketPolicyStatusBucketMember,
+ },
+ UsePathStyle: options.UsePathStyle,
+ UseAccelerate: options.UseAccelerate,
+ SupportsAccelerate: true,
+ EndpointResolver: options.EndpointResolver,
+ EndpointResolverOptions: options.EndpointOptions,
+ UseDualstack: options.UseDualstack,
+ UseARNRegion: options.UseARNRegion,
+ })
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketReplication.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketReplication.go
new file mode 100644
index 000000000..ead3f563d
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketReplication.go
@@ -0,0 +1,179 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package s3
+
+import (
+ "context"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
+ s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations"
+ "github.com/aws/aws-sdk-go-v2/service/s3/types"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// Returns the replication configuration of a bucket. It can take a while to
+// propagate the put or delete a replication configuration to all Amazon S3
+// systems. Therefore, a get request soon after put or delete can return a wrong
+// result. For information about replication configuration, see Replication
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication.html) in the Amazon
+// Simple Storage Service Developer Guide. This operation requires permissions for
+// the s3:GetReplicationConfiguration action. For more information about
+// permissions, see Using Bucket Policies and User Policies
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html). If
+// you include the Filter element in a replication configuration, you must also
+// include the DeleteMarkerReplication and Priority elements. The response also
+// returns those elements. For information about GetBucketReplication errors, see
+// List of replication-related error codes
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ReplicationErrorCodeList)
+// The following operations are related to GetBucketReplication:
+//
+// *
+// PutBucketReplication
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketReplication.html)
+//
+// *
+// DeleteBucketReplication
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketReplication.html)
+func (c *Client) GetBucketReplication(ctx context.Context, params *GetBucketReplicationInput, optFns ...func(*Options)) (*GetBucketReplicationOutput, error) {
+ if params == nil {
+ params = &GetBucketReplicationInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "GetBucketReplication", params, optFns, addOperationGetBucketReplicationMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*GetBucketReplicationOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type GetBucketReplicationInput struct {
+
+ // The bucket name for which to get the replication information.
+ //
+ // This member is required.
+ Bucket *string
+
+ // The account id of the expected bucket owner. If the bucket is owned by a
+ // different account, the request will fail with an HTTP 403 (Access Denied) error.
+ ExpectedBucketOwner *string
+}
+
+type GetBucketReplicationOutput struct {
+
+ // A container for replication rules. You can add up to 1,000 rules. The maximum
+ // size of a replication configuration is 2 MB.
+ ReplicationConfiguration *types.ReplicationConfiguration
+
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+}
+
+func addOperationGetBucketReplicationMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ err = stack.Serialize.Add(&awsRestxml_serializeOpGetBucketReplication{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsRestxml_deserializeOpGetBucketReplication{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
+ return err
+ }
+ if err = addRetryMiddlewares(stack, options); err != nil {
+ return err
+ }
+ if err = addHTTPSignerV4Middleware(stack, options); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addOpGetBucketReplicationValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetBucketReplication(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addMetadataRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addGetBucketReplicationUpdateEndpoint(stack, options); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil {
+ return err
+ }
+ if err = disableAcceptEncodingGzip(stack); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ return nil
+}
+
+func newServiceMetadataMiddleware_opGetBucketReplication(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ SigningName: "s3",
+ OperationName: "GetBucketReplication",
+ }
+}
+
+// getGetBucketReplicationBucketMember returns a pointer to string denoting a
+// provided bucket member valueand a boolean indicating if the input has a modeled
+// bucket name,
+func getGetBucketReplicationBucketMember(input interface{}) (*string, bool) {
+ in := input.(*GetBucketReplicationInput)
+ if in.Bucket == nil {
+ return nil, false
+ }
+ return in.Bucket, true
+}
+func addGetBucketReplicationUpdateEndpoint(stack *middleware.Stack, options Options) error {
+ return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{
+ Accessor: s3cust.UpdateEndpointParameterAccessor{
+ GetBucketFromInput: getGetBucketReplicationBucketMember,
+ },
+ UsePathStyle: options.UsePathStyle,
+ UseAccelerate: options.UseAccelerate,
+ SupportsAccelerate: true,
+ EndpointResolver: options.EndpointResolver,
+ EndpointResolverOptions: options.EndpointOptions,
+ UseDualstack: options.UseDualstack,
+ UseARNRegion: options.UseARNRegion,
+ })
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketRequestPayment.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketRequestPayment.go
new file mode 100644
index 000000000..4551dc842
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketRequestPayment.go
@@ -0,0 +1,163 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package s3
+
+import (
+ "context"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
+ s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations"
+ "github.com/aws/aws-sdk-go-v2/service/s3/types"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// Returns the request payment configuration of a bucket. To use this version of
+// the operation, you must be the bucket owner. For more information, see Requester
+// Pays Buckets
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/RequesterPaysBuckets.html). The
+// following operations are related to GetBucketRequestPayment:
+//
+// * ListObjects
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjects.html)
+func (c *Client) GetBucketRequestPayment(ctx context.Context, params *GetBucketRequestPaymentInput, optFns ...func(*Options)) (*GetBucketRequestPaymentOutput, error) {
+ if params == nil {
+ params = &GetBucketRequestPaymentInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "GetBucketRequestPayment", params, optFns, addOperationGetBucketRequestPaymentMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*GetBucketRequestPaymentOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type GetBucketRequestPaymentInput struct {
+
+ // The name of the bucket for which to get the payment request configuration
+ //
+ // This member is required.
+ Bucket *string
+
+ // The account id of the expected bucket owner. If the bucket is owned by a
+ // different account, the request will fail with an HTTP 403 (Access Denied) error.
+ ExpectedBucketOwner *string
+}
+
+type GetBucketRequestPaymentOutput struct {
+
+ // Specifies who pays for the download and request fees.
+ Payer types.Payer
+
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+}
+
+func addOperationGetBucketRequestPaymentMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ err = stack.Serialize.Add(&awsRestxml_serializeOpGetBucketRequestPayment{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsRestxml_deserializeOpGetBucketRequestPayment{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
+ return err
+ }
+ if err = addRetryMiddlewares(stack, options); err != nil {
+ return err
+ }
+ if err = addHTTPSignerV4Middleware(stack, options); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addOpGetBucketRequestPaymentValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetBucketRequestPayment(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addMetadataRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addGetBucketRequestPaymentUpdateEndpoint(stack, options); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil {
+ return err
+ }
+ if err = disableAcceptEncodingGzip(stack); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ return nil
+}
+
+func newServiceMetadataMiddleware_opGetBucketRequestPayment(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ SigningName: "s3",
+ OperationName: "GetBucketRequestPayment",
+ }
+}
+
+// getGetBucketRequestPaymentBucketMember returns a pointer to string denoting a
+// provided bucket member valueand a boolean indicating if the input has a modeled
+// bucket name,
+func getGetBucketRequestPaymentBucketMember(input interface{}) (*string, bool) {
+ in := input.(*GetBucketRequestPaymentInput)
+ if in.Bucket == nil {
+ return nil, false
+ }
+ return in.Bucket, true
+}
+func addGetBucketRequestPaymentUpdateEndpoint(stack *middleware.Stack, options Options) error {
+ return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{
+ Accessor: s3cust.UpdateEndpointParameterAccessor{
+ GetBucketFromInput: getGetBucketRequestPaymentBucketMember,
+ },
+ UsePathStyle: options.UsePathStyle,
+ UseAccelerate: options.UseAccelerate,
+ SupportsAccelerate: true,
+ EndpointResolver: options.EndpointResolver,
+ EndpointResolverOptions: options.EndpointOptions,
+ UseDualstack: options.UseDualstack,
+ UseARNRegion: options.UseARNRegion,
+ })
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketTagging.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketTagging.go
new file mode 100644
index 000000000..013c1ad0c
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketTagging.go
@@ -0,0 +1,177 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package s3
+
+import (
+ "context"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
+ s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations"
+ "github.com/aws/aws-sdk-go-v2/service/s3/types"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// Returns the tag set associated with the bucket. To use this operation, you must
+// have permission to perform the s3:GetBucketTagging action. By default, the
+// bucket owner has this permission and can grant this permission to others.
+// GetBucketTagging has the following special error:
+//
+// * Error code:
+// NoSuchTagSetError
+//
+// * Description: There is no tag set associated with the
+// bucket.
+//
+// The following operations are related to GetBucketTagging:
+//
+// *
+// PutBucketTagging
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketTagging.html)
+//
+// *
+// DeleteBucketTagging
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketTagging.html)
+func (c *Client) GetBucketTagging(ctx context.Context, params *GetBucketTaggingInput, optFns ...func(*Options)) (*GetBucketTaggingOutput, error) {
+ if params == nil {
+ params = &GetBucketTaggingInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "GetBucketTagging", params, optFns, addOperationGetBucketTaggingMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*GetBucketTaggingOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type GetBucketTaggingInput struct {
+
+ // The name of the bucket for which to get the tagging information.
+ //
+ // This member is required.
+ Bucket *string
+
+ // The account id of the expected bucket owner. If the bucket is owned by a
+ // different account, the request will fail with an HTTP 403 (Access Denied) error.
+ ExpectedBucketOwner *string
+}
+
+type GetBucketTaggingOutput struct {
+
+ // Contains the tag set.
+ //
+ // This member is required.
+ TagSet []types.Tag
+
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+}
+
+func addOperationGetBucketTaggingMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ err = stack.Serialize.Add(&awsRestxml_serializeOpGetBucketTagging{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsRestxml_deserializeOpGetBucketTagging{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
+ return err
+ }
+ if err = addRetryMiddlewares(stack, options); err != nil {
+ return err
+ }
+ if err = addHTTPSignerV4Middleware(stack, options); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addOpGetBucketTaggingValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetBucketTagging(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addMetadataRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addGetBucketTaggingUpdateEndpoint(stack, options); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil {
+ return err
+ }
+ if err = disableAcceptEncodingGzip(stack); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ return nil
+}
+
+func newServiceMetadataMiddleware_opGetBucketTagging(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ SigningName: "s3",
+ OperationName: "GetBucketTagging",
+ }
+}
+
+// getGetBucketTaggingBucketMember returns a pointer to string denoting a provided
+// bucket member valueand a boolean indicating if the input has a modeled bucket
+// name,
+func getGetBucketTaggingBucketMember(input interface{}) (*string, bool) {
+ in := input.(*GetBucketTaggingInput)
+ if in.Bucket == nil {
+ return nil, false
+ }
+ return in.Bucket, true
+}
+func addGetBucketTaggingUpdateEndpoint(stack *middleware.Stack, options Options) error {
+ return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{
+ Accessor: s3cust.UpdateEndpointParameterAccessor{
+ GetBucketFromInput: getGetBucketTaggingBucketMember,
+ },
+ UsePathStyle: options.UsePathStyle,
+ UseAccelerate: options.UseAccelerate,
+ SupportsAccelerate: true,
+ EndpointResolver: options.EndpointResolver,
+ EndpointResolverOptions: options.EndpointOptions,
+ UseDualstack: options.UseDualstack,
+ UseARNRegion: options.UseARNRegion,
+ })
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketVersioning.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketVersioning.go
new file mode 100644
index 000000000..4e2346777
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketVersioning.go
@@ -0,0 +1,177 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package s3
+
+import (
+ "context"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
+ s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations"
+ "github.com/aws/aws-sdk-go-v2/service/s3/types"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// Returns the versioning state of a bucket. To retrieve the versioning state of a
+// bucket, you must be the bucket owner. This implementation also returns the MFA
+// Delete status of the versioning state. If the MFA Delete status is enabled, the
+// bucket owner must use an authentication device to change the versioning state of
+// the bucket. The following operations are related to GetBucketVersioning:
+//
+// *
+// GetObject
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html)
+//
+// *
+// PutObject
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html)
+//
+// *
+// DeleteObject
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html)
+func (c *Client) GetBucketVersioning(ctx context.Context, params *GetBucketVersioningInput, optFns ...func(*Options)) (*GetBucketVersioningOutput, error) {
+ if params == nil {
+ params = &GetBucketVersioningInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "GetBucketVersioning", params, optFns, addOperationGetBucketVersioningMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*GetBucketVersioningOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type GetBucketVersioningInput struct {
+
+ // The name of the bucket for which to get the versioning information.
+ //
+ // This member is required.
+ Bucket *string
+
+ // The account id of the expected bucket owner. If the bucket is owned by a
+ // different account, the request will fail with an HTTP 403 (Access Denied) error.
+ ExpectedBucketOwner *string
+}
+
+type GetBucketVersioningOutput struct {
+
+ // Specifies whether MFA delete is enabled in the bucket versioning configuration.
+ // This element is only returned if the bucket has been configured with MFA delete.
+ // If the bucket has never been so configured, this element is not returned.
+ MFADelete types.MFADeleteStatus
+
+ // The versioning state of the bucket.
+ Status types.BucketVersioningStatus
+
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+}
+
+func addOperationGetBucketVersioningMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ err = stack.Serialize.Add(&awsRestxml_serializeOpGetBucketVersioning{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsRestxml_deserializeOpGetBucketVersioning{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
+ return err
+ }
+ if err = addRetryMiddlewares(stack, options); err != nil {
+ return err
+ }
+ if err = addHTTPSignerV4Middleware(stack, options); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addOpGetBucketVersioningValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetBucketVersioning(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addMetadataRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addGetBucketVersioningUpdateEndpoint(stack, options); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil {
+ return err
+ }
+ if err = disableAcceptEncodingGzip(stack); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ return nil
+}
+
+func newServiceMetadataMiddleware_opGetBucketVersioning(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ SigningName: "s3",
+ OperationName: "GetBucketVersioning",
+ }
+}
+
+// getGetBucketVersioningBucketMember returns a pointer to string denoting a
+// provided bucket member valueand a boolean indicating if the input has a modeled
+// bucket name,
+func getGetBucketVersioningBucketMember(input interface{}) (*string, bool) {
+ in := input.(*GetBucketVersioningInput)
+ if in.Bucket == nil {
+ return nil, false
+ }
+ return in.Bucket, true
+}
+func addGetBucketVersioningUpdateEndpoint(stack *middleware.Stack, options Options) error {
+ return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{
+ Accessor: s3cust.UpdateEndpointParameterAccessor{
+ GetBucketFromInput: getGetBucketVersioningBucketMember,
+ },
+ UsePathStyle: options.UsePathStyle,
+ UseAccelerate: options.UseAccelerate,
+ SupportsAccelerate: true,
+ EndpointResolver: options.EndpointResolver,
+ EndpointResolverOptions: options.EndpointOptions,
+ UseDualstack: options.UseDualstack,
+ UseARNRegion: options.UseARNRegion,
+ })
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketWebsite.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketWebsite.go
new file mode 100644
index 000000000..383e04ccd
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketWebsite.go
@@ -0,0 +1,181 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package s3
+
+import (
+ "context"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
+ s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations"
+ "github.com/aws/aws-sdk-go-v2/service/s3/types"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// Returns the website configuration for a bucket. To host website on Amazon S3,
+// you can configure a bucket as website by adding a website configuration. For
+// more information about hosting websites, see Hosting Websites on Amazon S3
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteHosting.html). This GET
+// operation requires the S3:GetBucketWebsite permission. By default, only the
+// bucket owner can read the bucket website configuration. However, bucket owners
+// can allow other users to read the website configuration by writing a bucket
+// policy granting them the S3:GetBucketWebsite permission. The following
+// operations are related to DeleteBucketWebsite:
+//
+// * DeleteBucketWebsite
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketWebsite.html)
+//
+// *
+// PutBucketWebsite
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketWebsite.html)
+func (c *Client) GetBucketWebsite(ctx context.Context, params *GetBucketWebsiteInput, optFns ...func(*Options)) (*GetBucketWebsiteOutput, error) {
+ if params == nil {
+ params = &GetBucketWebsiteInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "GetBucketWebsite", params, optFns, addOperationGetBucketWebsiteMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*GetBucketWebsiteOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type GetBucketWebsiteInput struct {
+
+ // The bucket name for which to get the website configuration.
+ //
+ // This member is required.
+ Bucket *string
+
+ // The account id of the expected bucket owner. If the bucket is owned by a
+ // different account, the request will fail with an HTTP 403 (Access Denied) error.
+ ExpectedBucketOwner *string
+}
+
+type GetBucketWebsiteOutput struct {
+
+ // The object key name of the website error document to use for 4XX class errors.
+ ErrorDocument *types.ErrorDocument
+
+ // The name of the index document for the website (for example index.html).
+ IndexDocument *types.IndexDocument
+
+ // Specifies the redirect behavior of all requests to a website endpoint of an
+ // Amazon S3 bucket.
+ RedirectAllRequestsTo *types.RedirectAllRequestsTo
+
+ // Rules that define when a redirect is applied and the redirect behavior.
+ RoutingRules []types.RoutingRule
+
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+}
+
+func addOperationGetBucketWebsiteMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ err = stack.Serialize.Add(&awsRestxml_serializeOpGetBucketWebsite{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsRestxml_deserializeOpGetBucketWebsite{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
+ return err
+ }
+ if err = addRetryMiddlewares(stack, options); err != nil {
+ return err
+ }
+ if err = addHTTPSignerV4Middleware(stack, options); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addOpGetBucketWebsiteValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetBucketWebsite(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addMetadataRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addGetBucketWebsiteUpdateEndpoint(stack, options); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil {
+ return err
+ }
+ if err = disableAcceptEncodingGzip(stack); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ return nil
+}
+
+func newServiceMetadataMiddleware_opGetBucketWebsite(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ SigningName: "s3",
+ OperationName: "GetBucketWebsite",
+ }
+}
+
+// getGetBucketWebsiteBucketMember returns a pointer to string denoting a provided
+// bucket member valueand a boolean indicating if the input has a modeled bucket
+// name,
+func getGetBucketWebsiteBucketMember(input interface{}) (*string, bool) {
+ in := input.(*GetBucketWebsiteInput)
+ if in.Bucket == nil {
+ return nil, false
+ }
+ return in.Bucket, true
+}
+func addGetBucketWebsiteUpdateEndpoint(stack *middleware.Stack, options Options) error {
+ return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{
+ Accessor: s3cust.UpdateEndpointParameterAccessor{
+ GetBucketFromInput: getGetBucketWebsiteBucketMember,
+ },
+ UsePathStyle: options.UsePathStyle,
+ UseAccelerate: options.UseAccelerate,
+ SupportsAccelerate: true,
+ EndpointResolver: options.EndpointResolver,
+ EndpointResolverOptions: options.EndpointOptions,
+ UseDualstack: options.UseDualstack,
+ UseARNRegion: options.UseARNRegion,
+ })
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObject.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObject.go
new file mode 100644
index 000000000..af1c85f6d
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObject.go
@@ -0,0 +1,516 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package s3
+
+import (
+ "context"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
+ s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations"
+ "github.com/aws/aws-sdk-go-v2/service/s3/types"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+ "io"
+ "time"
+)
+
+// Retrieves objects from Amazon S3. To use GET, you must have READ access to the
+// object. If you grant READ access to the anonymous user, you can return the
+// object without using an authorization header. An Amazon S3 bucket has no
+// directory hierarchy such as you would find in a typical computer file system.
+// You can, however, create a logical hierarchy by using object key names that
+// imply a folder structure. For example, instead of naming an object sample.jpg,
+// you can name it photos/2006/February/sample.jpg. To get an object from such a
+// logical hierarchy, specify the full key name for the object in the GET
+// operation. For a virtual hosted-style request example, if you have the object
+// photos/2006/February/sample.jpg, specify the resource as
+// /photos/2006/February/sample.jpg. For a path-style request example, if you have
+// the object photos/2006/February/sample.jpg in the bucket named examplebucket,
+// specify the resource as /examplebucket/photos/2006/February/sample.jpg. For more
+// information about request types, see HTTP Host Header Bucket Specification
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html#VirtualHostingSpecifyBucket).
+// To distribute large files to many people, you can save bandwidth costs by using
+// BitTorrent. For more information, see Amazon S3 Torrent
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3Torrent.html). For more
+// information about returning the ACL of an object, see GetObjectAcl
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAcl.html). If the
+// object you are retrieving is stored in the S3 Glacier or S3 Glacier Deep Archive
+// storage class, or S3 Intelligent-Tiering Archive or S3 Intelligent-Tiering Deep
+// Archive tiers, before you can retrieve the object you must first restore a copy
+// using RestoreObject
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_RestoreObject.html).
+// Otherwise, this operation returns an InvalidObjectStateError error. For
+// information about restoring archived objects, see Restoring Archived Objects
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/restoring-objects.html).
+// Encryption request headers, like x-amz-server-side-encryption, should not be
+// sent for GET requests if your object uses server-side encryption with CMKs
+// stored in AWS KMS (SSE-KMS) or server-side encryption with Amazon S3–managed
+// encryption keys (SSE-S3). If your object does use these types of keys, you’ll
+// get an HTTP 400 BadRequest error. If you encrypt an object by using server-side
+// encryption with customer-provided encryption keys (SSE-C) when you store the
+// object in Amazon S3, then when you GET the object, you must use the following
+// headers:
+//
+// * x-amz-server-side-encryption-customer-algorithm
+//
+// *
+// x-amz-server-side-encryption-customer-key
+//
+// *
+// x-amz-server-side-encryption-customer-key-MD5
+//
+// For more information about SSE-C,
+// see Server-Side Encryption (Using Customer-Provided Encryption Keys)
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html).
+// Assuming you have permission to read object tags (permission for the
+// s3:GetObjectVersionTagging action), the response also returns the
+// x-amz-tagging-count header that provides the count of number of tags associated
+// with the object. You can use GetObjectTagging
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectTagging.html) to
+// retrieve the tag set associated with an object. Permissions You need the
+// s3:GetObject permission for this operation. For more information, see Specifying
+// Permissions in a Policy
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html). If
+// the object you request does not exist, the error Amazon S3 returns depends on
+// whether you also have the s3:ListBucket permission.
+//
+// * If you have the
+// s3:ListBucket permission on the bucket, Amazon S3 will return an HTTP status
+// code 404 ("no such key") error.
+//
+// * If you don’t have the s3:ListBucket
+// permission, Amazon S3 will return an HTTP status code 403 ("access denied")
+// error.
+//
+// Versioning By default, the GET operation returns the current version of
+// an object. To return a different version, use the versionId subresource. If the
+// current version of the object is a delete marker, Amazon S3 behaves as if the
+// object was deleted and includes x-amz-delete-marker: true in the response. For
+// more information about versioning, see PutBucketVersioning
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketVersioning.html).
+// Overriding Response Header Values There are times when you want to override
+// certain response header values in a GET response. For example, you might
+// override the Content-Disposition response header value in your GET request. You
+// can override values for a set of response headers using the following query
+// parameters. These response header values are sent only on a successful request,
+// that is, when status code 200 OK is returned. The set of headers you can
+// override using these parameters is a subset of the headers that Amazon S3
+// accepts when you create an object. The response headers that you can override
+// for the GET response are Content-Type, Content-Language, Expires, Cache-Control,
+// Content-Disposition, and Content-Encoding. To override these header values in
+// the GET response, you use the following request parameters. You must sign the
+// request, either using an Authorization header or a presigned URL, when using
+// these parameters. They cannot be used with an unsigned (anonymous) request.
+//
+// *
+// response-content-type
+//
+// * response-content-language
+//
+// * response-expires
+//
+// *
+// response-cache-control
+//
+// * response-content-disposition
+//
+// *
+// response-content-encoding
+//
+// Additional Considerations about Request Headers If
+// both of the If-Match and If-Unmodified-Since headers are present in the request
+// as follows: If-Match condition evaluates to true, and; If-Unmodified-Since
+// condition evaluates to false; then, S3 returns 200 OK and the data requested. If
+// both of the If-None-Match and If-Modified-Since headers are present in the
+// request as follows: If-None-Match condition evaluates to false, and;
+// If-Modified-Since condition evaluates to true; then, S3 returns 304 Not Modified
+// response code. For more information about conditional requests, see RFC 7232
+// (https://tools.ietf.org/html/rfc7232). The following operations are related to
+// GetObject:
+//
+// * ListBuckets
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBuckets.html)
+//
+// *
+// GetObjectAcl
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAcl.html)
+func (c *Client) GetObject(ctx context.Context, params *GetObjectInput, optFns ...func(*Options)) (*GetObjectOutput, error) {
+ if params == nil {
+ params = &GetObjectInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "GetObject", params, optFns, addOperationGetObjectMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*GetObjectOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type GetObjectInput struct {
+
+ // The bucket name containing the object. When using this API with an access point,
+ // you must direct requests to the access point hostname. The access point hostname
+ // takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com.
+ // When using this operation with an access point through the AWS SDKs, you provide
+ // the access point ARN in place of the bucket name. For more information about
+ // access point ARNs, see Using Access Points
+ // (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) in
+ // the Amazon Simple Storage Service Developer Guide. When using this API with
+ // Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname.
+ // The S3 on Outposts hostname takes the form
+ // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using
+ // this operation using S3 on Outposts through the AWS SDKs, you provide the
+ // Outposts bucket ARN in place of the bucket name. For more information about S3
+ // on Outposts ARNs, see Using S3 on Outposts
+ // (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html) in the
+ // Amazon Simple Storage Service Developer Guide.
+ //
+ // This member is required.
+ Bucket *string
+
+ // Key of the object to get.
+ //
+ // This member is required.
+ Key *string
+
+ // The account id of the expected bucket owner. If the bucket is owned by a
+ // different account, the request will fail with an HTTP 403 (Access Denied) error.
+ ExpectedBucketOwner *string
+
+ // Return the object only if its entity tag (ETag) is the same as the one
+ // specified, otherwise return a 412 (precondition failed).
+ IfMatch *string
+
+ // Return the object only if it has been modified since the specified time,
+ // otherwise return a 304 (not modified).
+ IfModifiedSince *time.Time
+
+ // Return the object only if its entity tag (ETag) is different from the one
+ // specified, otherwise return a 304 (not modified).
+ IfNoneMatch *string
+
+ // Return the object only if it has not been modified since the specified time,
+ // otherwise return a 412 (precondition failed).
+ IfUnmodifiedSince *time.Time
+
+ // Part number of the object being read. This is a positive integer between 1 and
+ // 10,000. Effectively performs a 'ranged' GET request for the part specified.
+ // Useful for downloading just a part of an object.
+ PartNumber int32
+
+ // Downloads the specified range bytes of an object. For more information about the
+ // HTTP Range header, see
+ // https://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35
+ // (https://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35). Amazon S3
+ // doesn't support retrieving multiple ranges of data per GET request.
+ Range *string
+
+ // Confirms that the requester knows that they will be charged for the request.
+ // Bucket owners need not specify this parameter in their requests. For information
+ // about downloading objects from requester pays buckets, see Downloading Objects
+ // in Requestor Pays Buckets
+ // (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html)
+ // in the Amazon S3 Developer Guide.
+ RequestPayer types.RequestPayer
+
+ // Sets the Cache-Control header of the response.
+ ResponseCacheControl *string
+
+ // Sets the Content-Disposition header of the response
+ ResponseContentDisposition *string
+
+ // Sets the Content-Encoding header of the response.
+ ResponseContentEncoding *string
+
+ // Sets the Content-Language header of the response.
+ ResponseContentLanguage *string
+
+ // Sets the Content-Type header of the response.
+ ResponseContentType *string
+
+ // Sets the Expires header of the response.
+ ResponseExpires *time.Time
+
+ // Specifies the algorithm to use to when encrypting the object (for example,
+ // AES256).
+ SSECustomerAlgorithm *string
+
+ // Specifies the customer-provided encryption key for Amazon S3 to use in
+ // encrypting data. This value is used to store the object and then it is
+ // discarded; Amazon S3 does not store the encryption key. The key must be
+ // appropriate for use with the algorithm specified in the
+ // x-amz-server-side-encryption-customer-algorithm header.
+ SSECustomerKey *string
+
+ // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321.
+ // Amazon S3 uses this header for a message integrity check to ensure that the
+ // encryption key was transmitted without error.
+ SSECustomerKeyMD5 *string
+
+ // VersionId used to reference a specific version of the object.
+ VersionId *string
+}
+
+type GetObjectOutput struct {
+
+ // Indicates that a range of bytes was specified.
+ AcceptRanges *string
+
+ // Object data.
+ Body io.ReadCloser
+
+ // Indicates whether the object uses an S3 Bucket Key for server-side encryption
+ // with AWS KMS (SSE-KMS).
+ BucketKeyEnabled bool
+
+ // Specifies caching behavior along the request/reply chain.
+ CacheControl *string
+
+ // Specifies presentational information for the object.
+ ContentDisposition *string
+
+ // Specifies what content encodings have been applied to the object and thus what
+ // decoding mechanisms must be applied to obtain the media-type referenced by the
+ // Content-Type header field.
+ ContentEncoding *string
+
+ // The language the content is in.
+ ContentLanguage *string
+
+ // Size of the body in bytes.
+ ContentLength int64
+
+ // The portion of the object returned in the response.
+ ContentRange *string
+
+ // A standard MIME type describing the format of the object data.
+ ContentType *string
+
+ // Specifies whether the object retrieved was (true) or was not (false) a Delete
+ // Marker. If false, this response header does not appear in the response.
+ DeleteMarker bool
+
+ // An ETag is an opaque identifier assigned by a web server to a specific version
+ // of a resource found at a URL.
+ ETag *string
+
+ // If the object expiration is configured (see PUT Bucket lifecycle), the response
+ // includes this header. It includes the expiry-date and rule-id key-value pairs
+ // providing object expiration information. The value of the rule-id is URL
+ // encoded.
+ Expiration *string
+
+ // The date and time at which the object is no longer cacheable.
+ Expires *time.Time
+
+ // Last modified date of the object
+ LastModified *time.Time
+
+ // A map of metadata to store with the object in S3.
+ //
+ // Map keys will be normalized to lower-case.
+ Metadata map[string]string
+
+ // This is set to the number of metadata entries not returned in x-amz-meta
+ // headers. This can happen if you create metadata using an API like SOAP that
+ // supports more flexible metadata than the REST API. For example, using SOAP, you
+ // can create metadata whose values are not legal HTTP headers.
+ MissingMeta int32
+
+ // Indicates whether this object has an active legal hold. This field is only
+ // returned if you have permission to view an object's legal hold status.
+ ObjectLockLegalHoldStatus types.ObjectLockLegalHoldStatus
+
+ // The Object Lock mode currently in place for this object.
+ ObjectLockMode types.ObjectLockMode
+
+ // The date and time when this object's Object Lock will expire.
+ ObjectLockRetainUntilDate *time.Time
+
+ // The count of parts this object has.
+ PartsCount int32
+
+ // Amazon S3 can return this if your request involves a bucket that is either a
+ // source or destination in a replication rule.
+ ReplicationStatus types.ReplicationStatus
+
+ // If present, indicates that the requester was successfully charged for the
+ // request.
+ RequestCharged types.RequestCharged
+
+ // Provides information about object restoration operation and expiration time of
+ // the restored object copy.
+ Restore *string
+
+ // If server-side encryption with a customer-provided encryption key was requested,
+ // the response will include this header confirming the encryption algorithm used.
+ SSECustomerAlgorithm *string
+
+ // If server-side encryption with a customer-provided encryption key was requested,
+ // the response will include this header to provide round-trip message integrity
+ // verification of the customer-provided encryption key.
+ SSECustomerKeyMD5 *string
+
+ // If present, specifies the ID of the AWS Key Management Service (AWS KMS)
+ // symmetric customer managed customer master key (CMK) that was used for the
+ // object.
+ SSEKMSKeyId *string
+
+ // The server-side encryption algorithm used when storing this object in Amazon S3
+ // (for example, AES256, aws:kms).
+ ServerSideEncryption types.ServerSideEncryption
+
+ // Provides storage class information of the object. Amazon S3 returns this header
+ // for all objects except for S3 Standard storage class objects.
+ StorageClass types.StorageClass
+
+ // The number of tags, if any, on the object.
+ TagCount int32
+
+ // Version of the object.
+ VersionId *string
+
+ // If the bucket is configured as a website, redirects requests for this object to
+ // another object in the same bucket or to an external URL. Amazon S3 stores the
+ // value of this header in the object metadata.
+ WebsiteRedirectLocation *string
+
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+}
+
+func addOperationGetObjectMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ err = stack.Serialize.Add(&awsRestxml_serializeOpGetObject{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsRestxml_deserializeOpGetObject{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
+ return err
+ }
+ if err = addRetryMiddlewares(stack, options); err != nil {
+ return err
+ }
+ if err = addHTTPSignerV4Middleware(stack, options); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addOpGetObjectValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetObject(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addMetadataRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addGetObjectUpdateEndpoint(stack, options); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil {
+ return err
+ }
+ if err = disableAcceptEncodingGzip(stack); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ return nil
+}
+
+func newServiceMetadataMiddleware_opGetObject(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ SigningName: "s3",
+ OperationName: "GetObject",
+ }
+}
+
+// getGetObjectBucketMember returns a pointer to string denoting a provided bucket
+// member valueand a boolean indicating if the input has a modeled bucket name,
+func getGetObjectBucketMember(input interface{}) (*string, bool) {
+ in := input.(*GetObjectInput)
+ if in.Bucket == nil {
+ return nil, false
+ }
+ return in.Bucket, true
+}
+func addGetObjectUpdateEndpoint(stack *middleware.Stack, options Options) error {
+ return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{
+ Accessor: s3cust.UpdateEndpointParameterAccessor{
+ GetBucketFromInput: getGetObjectBucketMember,
+ },
+ UsePathStyle: options.UsePathStyle,
+ UseAccelerate: options.UseAccelerate,
+ SupportsAccelerate: true,
+ EndpointResolver: options.EndpointResolver,
+ EndpointResolverOptions: options.EndpointOptions,
+ UseDualstack: options.UseDualstack,
+ UseARNRegion: options.UseARNRegion,
+ })
+}
+
+// PresignGetObject is used to generate a presigned HTTP Request which contains
+// presigned URL, signed headers and HTTP method used.
+func (c *PresignClient) PresignGetObject(ctx context.Context, params *GetObjectInput, optFns ...func(*PresignOptions)) (*v4.PresignedHTTPRequest, error) {
+ if params == nil {
+ params = &GetObjectInput{}
+ }
+ options := c.options.copy()
+ for _, fn := range optFns {
+ fn(&options)
+ }
+ clientOptFns := append(options.ClientOptions, withNopHTTPClientAPIOption)
+
+ result, _, err := c.client.invokeOperation(ctx, "GetObject", params, clientOptFns,
+ addOperationGetObjectMiddlewares,
+ presignConverter(options).convertToPresignMiddleware,
+ addGetObjectPayloadAsUnsigned,
+ )
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*v4.PresignedHTTPRequest)
+ return out, nil
+}
+
+func addGetObjectPayloadAsUnsigned(stack *middleware.Stack, options Options) error {
+ v4.RemoveContentSHA256HeaderMiddleware(stack)
+ v4.RemoveComputePayloadSHA256Middleware(stack)
+ return v4.AddUnsignedPayloadMiddleware(stack)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectAcl.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectAcl.go
new file mode 100644
index 000000000..1cb1b7ad3
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectAcl.go
@@ -0,0 +1,202 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package s3
+
+import (
+ "context"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
+ s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations"
+ "github.com/aws/aws-sdk-go-v2/service/s3/types"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// Returns the access control list (ACL) of an object. To use this operation, you
+// must have READ_ACP access to the object. This action is not supported by Amazon
+// S3 on Outposts. Versioning By default, GET returns ACL information about the
+// current version of an object. To return ACL information about a different
+// version, use the versionId subresource. The following operations are related to
+// GetObjectAcl:
+//
+// * GetObject
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html)
+//
+// *
+// DeleteObject
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html)
+//
+// *
+// PutObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html)
+func (c *Client) GetObjectAcl(ctx context.Context, params *GetObjectAclInput, optFns ...func(*Options)) (*GetObjectAclOutput, error) {
+ if params == nil {
+ params = &GetObjectAclInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "GetObjectAcl", params, optFns, addOperationGetObjectAclMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*GetObjectAclOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type GetObjectAclInput struct {
+
+ // The bucket name that contains the object for which to get the ACL information.
+ // When using this API with an access point, you must direct requests to the access
+ // point hostname. The access point hostname takes the form
+ // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this
+ // operation with an access point through the AWS SDKs, you provide the access
+ // point ARN in place of the bucket name. For more information about access point
+ // ARNs, see Using Access Points
+ // (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) in
+ // the Amazon Simple Storage Service Developer Guide.
+ //
+ // This member is required.
+ Bucket *string
+
+ // The key of the object for which to get the ACL information.
+ //
+ // This member is required.
+ Key *string
+
+ // The account id of the expected bucket owner. If the bucket is owned by a
+ // different account, the request will fail with an HTTP 403 (Access Denied) error.
+ ExpectedBucketOwner *string
+
+ // Confirms that the requester knows that they will be charged for the request.
+ // Bucket owners need not specify this parameter in their requests. For information
+ // about downloading objects from requester pays buckets, see Downloading Objects
+ // in Requestor Pays Buckets
+ // (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html)
+ // in the Amazon S3 Developer Guide.
+ RequestPayer types.RequestPayer
+
+ // VersionId used to reference a specific version of the object.
+ VersionId *string
+}
+
+type GetObjectAclOutput struct {
+
+ // A list of grants.
+ Grants []types.Grant
+
+ // Container for the bucket owner's display name and ID.
+ Owner *types.Owner
+
+ // If present, indicates that the requester was successfully charged for the
+ // request.
+ RequestCharged types.RequestCharged
+
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+}
+
+func addOperationGetObjectAclMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ err = stack.Serialize.Add(&awsRestxml_serializeOpGetObjectAcl{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsRestxml_deserializeOpGetObjectAcl{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
+ return err
+ }
+ if err = addRetryMiddlewares(stack, options); err != nil {
+ return err
+ }
+ if err = addHTTPSignerV4Middleware(stack, options); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addOpGetObjectAclValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetObjectAcl(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addMetadataRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addGetObjectAclUpdateEndpoint(stack, options); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil {
+ return err
+ }
+ if err = disableAcceptEncodingGzip(stack); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ return nil
+}
+
+func newServiceMetadataMiddleware_opGetObjectAcl(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ SigningName: "s3",
+ OperationName: "GetObjectAcl",
+ }
+}
+
+// getGetObjectAclBucketMember returns a pointer to string denoting a provided
+// bucket member valueand a boolean indicating if the input has a modeled bucket
+// name,
+func getGetObjectAclBucketMember(input interface{}) (*string, bool) {
+ in := input.(*GetObjectAclInput)
+ if in.Bucket == nil {
+ return nil, false
+ }
+ return in.Bucket, true
+}
+func addGetObjectAclUpdateEndpoint(stack *middleware.Stack, options Options) error {
+ return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{
+ Accessor: s3cust.UpdateEndpointParameterAccessor{
+ GetBucketFromInput: getGetObjectAclBucketMember,
+ },
+ UsePathStyle: options.UsePathStyle,
+ UseAccelerate: options.UseAccelerate,
+ SupportsAccelerate: true,
+ EndpointResolver: options.EndpointResolver,
+ EndpointResolverOptions: options.EndpointOptions,
+ UseDualstack: options.UseDualstack,
+ UseARNRegion: options.UseARNRegion,
+ })
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectLegalHold.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectLegalHold.go
new file mode 100644
index 000000000..0172da80c
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectLegalHold.go
@@ -0,0 +1,182 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package s3
+
+import (
+ "context"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
+ s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations"
+ "github.com/aws/aws-sdk-go-v2/service/s3/types"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// Gets an object's current Legal Hold status. For more information, see Locking
+// Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html). This
+// action is not supported by Amazon S3 on Outposts.
+func (c *Client) GetObjectLegalHold(ctx context.Context, params *GetObjectLegalHoldInput, optFns ...func(*Options)) (*GetObjectLegalHoldOutput, error) {
+ if params == nil {
+ params = &GetObjectLegalHoldInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "GetObjectLegalHold", params, optFns, addOperationGetObjectLegalHoldMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*GetObjectLegalHoldOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type GetObjectLegalHoldInput struct {
+
+ // The bucket name containing the object whose Legal Hold status you want to
+ // retrieve. When using this API with an access point, you must direct requests to
+ // the access point hostname. The access point hostname takes the form
+ // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this
+ // operation with an access point through the AWS SDKs, you provide the access
+ // point ARN in place of the bucket name. For more information about access point
+ // ARNs, see Using Access Points
+ // (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) in
+ // the Amazon Simple Storage Service Developer Guide.
+ //
+ // This member is required.
+ Bucket *string
+
+ // The key name for the object whose Legal Hold status you want to retrieve.
+ //
+ // This member is required.
+ Key *string
+
+ // The account id of the expected bucket owner. If the bucket is owned by a
+ // different account, the request will fail with an HTTP 403 (Access Denied) error.
+ ExpectedBucketOwner *string
+
+ // Confirms that the requester knows that they will be charged for the request.
+ // Bucket owners need not specify this parameter in their requests. For information
+ // about downloading objects from requester pays buckets, see Downloading Objects
+ // in Requestor Pays Buckets
+ // (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html)
+ // in the Amazon S3 Developer Guide.
+ RequestPayer types.RequestPayer
+
+ // The version ID of the object whose Legal Hold status you want to retrieve.
+ VersionId *string
+}
+
+type GetObjectLegalHoldOutput struct {
+
+ // The current Legal Hold status for the specified object.
+ LegalHold *types.ObjectLockLegalHold
+
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+}
+
+func addOperationGetObjectLegalHoldMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ err = stack.Serialize.Add(&awsRestxml_serializeOpGetObjectLegalHold{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsRestxml_deserializeOpGetObjectLegalHold{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
+ return err
+ }
+ if err = addRetryMiddlewares(stack, options); err != nil {
+ return err
+ }
+ if err = addHTTPSignerV4Middleware(stack, options); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addOpGetObjectLegalHoldValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetObjectLegalHold(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addMetadataRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addGetObjectLegalHoldUpdateEndpoint(stack, options); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil {
+ return err
+ }
+ if err = disableAcceptEncodingGzip(stack); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ return nil
+}
+
+func newServiceMetadataMiddleware_opGetObjectLegalHold(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ SigningName: "s3",
+ OperationName: "GetObjectLegalHold",
+ }
+}
+
+// getGetObjectLegalHoldBucketMember returns a pointer to string denoting a
+// provided bucket member valueand a boolean indicating if the input has a modeled
+// bucket name,
+func getGetObjectLegalHoldBucketMember(input interface{}) (*string, bool) {
+ in := input.(*GetObjectLegalHoldInput)
+ if in.Bucket == nil {
+ return nil, false
+ }
+ return in.Bucket, true
+}
+func addGetObjectLegalHoldUpdateEndpoint(stack *middleware.Stack, options Options) error {
+ return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{
+ Accessor: s3cust.UpdateEndpointParameterAccessor{
+ GetBucketFromInput: getGetObjectLegalHoldBucketMember,
+ },
+ UsePathStyle: options.UsePathStyle,
+ UseAccelerate: options.UseAccelerate,
+ SupportsAccelerate: true,
+ EndpointResolver: options.EndpointResolver,
+ EndpointResolverOptions: options.EndpointOptions,
+ UseDualstack: options.UseDualstack,
+ UseARNRegion: options.UseARNRegion,
+ })
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectLockConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectLockConfiguration.go
new file mode 100644
index 000000000..5d2a4a682
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectLockConfiguration.go
@@ -0,0 +1,167 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package s3
+
+import (
+ "context"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
+ s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations"
+ "github.com/aws/aws-sdk-go-v2/service/s3/types"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// Gets the Object Lock configuration for a bucket. The rule specified in the
+// Object Lock configuration will be applied by default to every new object placed
+// in the specified bucket. For more information, see Locking Objects
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html).
+func (c *Client) GetObjectLockConfiguration(ctx context.Context, params *GetObjectLockConfigurationInput, optFns ...func(*Options)) (*GetObjectLockConfigurationOutput, error) {
+ if params == nil {
+ params = &GetObjectLockConfigurationInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "GetObjectLockConfiguration", params, optFns, addOperationGetObjectLockConfigurationMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*GetObjectLockConfigurationOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type GetObjectLockConfigurationInput struct {
+
+ // The bucket whose Object Lock configuration you want to retrieve. When using this
+ // API with an access point, you must direct requests to the access point hostname.
+ // The access point hostname takes the form
+ // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this
+ // operation with an access point through the AWS SDKs, you provide the access
+ // point ARN in place of the bucket name. For more information about access point
+ // ARNs, see Using Access Points
+ // (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) in
+ // the Amazon Simple Storage Service Developer Guide.
+ //
+ // This member is required.
+ Bucket *string
+
+ // The account id of the expected bucket owner. If the bucket is owned by a
+ // different account, the request will fail with an HTTP 403 (Access Denied) error.
+ ExpectedBucketOwner *string
+}
+
+type GetObjectLockConfigurationOutput struct {
+
+ // The specified bucket's Object Lock configuration.
+ ObjectLockConfiguration *types.ObjectLockConfiguration
+
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+}
+
+func addOperationGetObjectLockConfigurationMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ err = stack.Serialize.Add(&awsRestxml_serializeOpGetObjectLockConfiguration{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsRestxml_deserializeOpGetObjectLockConfiguration{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
+ return err
+ }
+ if err = addRetryMiddlewares(stack, options); err != nil {
+ return err
+ }
+ if err = addHTTPSignerV4Middleware(stack, options); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addOpGetObjectLockConfigurationValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetObjectLockConfiguration(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addMetadataRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addGetObjectLockConfigurationUpdateEndpoint(stack, options); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil {
+ return err
+ }
+ if err = disableAcceptEncodingGzip(stack); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ return nil
+}
+
+func newServiceMetadataMiddleware_opGetObjectLockConfiguration(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ SigningName: "s3",
+ OperationName: "GetObjectLockConfiguration",
+ }
+}
+
+// getGetObjectLockConfigurationBucketMember returns a pointer to string denoting a
+// provided bucket member valueand a boolean indicating if the input has a modeled
+// bucket name,
+func getGetObjectLockConfigurationBucketMember(input interface{}) (*string, bool) {
+ in := input.(*GetObjectLockConfigurationInput)
+ if in.Bucket == nil {
+ return nil, false
+ }
+ return in.Bucket, true
+}
+func addGetObjectLockConfigurationUpdateEndpoint(stack *middleware.Stack, options Options) error {
+ return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{
+ Accessor: s3cust.UpdateEndpointParameterAccessor{
+ GetBucketFromInput: getGetObjectLockConfigurationBucketMember,
+ },
+ UsePathStyle: options.UsePathStyle,
+ UseAccelerate: options.UseAccelerate,
+ SupportsAccelerate: true,
+ EndpointResolver: options.EndpointResolver,
+ EndpointResolverOptions: options.EndpointOptions,
+ UseDualstack: options.UseDualstack,
+ UseARNRegion: options.UseARNRegion,
+ })
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectRetention.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectRetention.go
new file mode 100644
index 000000000..ce361486a
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectRetention.go
@@ -0,0 +1,182 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package s3
+
+import (
+ "context"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
+ s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations"
+ "github.com/aws/aws-sdk-go-v2/service/s3/types"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// Retrieves an object's retention settings. For more information, see Locking
+// Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html). This
+// action is not supported by Amazon S3 on Outposts.
+func (c *Client) GetObjectRetention(ctx context.Context, params *GetObjectRetentionInput, optFns ...func(*Options)) (*GetObjectRetentionOutput, error) {
+ if params == nil {
+ params = &GetObjectRetentionInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "GetObjectRetention", params, optFns, addOperationGetObjectRetentionMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*GetObjectRetentionOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type GetObjectRetentionInput struct {
+
+ // The bucket name containing the object whose retention settings you want to
+ // retrieve. When using this API with an access point, you must direct requests to
+ // the access point hostname. The access point hostname takes the form
+ // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this
+ // operation with an access point through the AWS SDKs, you provide the access
+ // point ARN in place of the bucket name. For more information about access point
+ // ARNs, see Using Access Points
+ // (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) in
+ // the Amazon Simple Storage Service Developer Guide.
+ //
+ // This member is required.
+ Bucket *string
+
+ // The key name for the object whose retention settings you want to retrieve.
+ //
+ // This member is required.
+ Key *string
+
+ // The account id of the expected bucket owner. If the bucket is owned by a
+ // different account, the request will fail with an HTTP 403 (Access Denied) error.
+ ExpectedBucketOwner *string
+
+ // Confirms that the requester knows that they will be charged for the request.
+ // Bucket owners need not specify this parameter in their requests. For information
+ // about downloading objects from requester pays buckets, see Downloading Objects
+ // in Requestor Pays Buckets
+ // (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html)
+ // in the Amazon S3 Developer Guide.
+ RequestPayer types.RequestPayer
+
+ // The version ID for the object whose retention settings you want to retrieve.
+ VersionId *string
+}
+
+type GetObjectRetentionOutput struct {
+
+ // The container element for an object's retention settings.
+ Retention *types.ObjectLockRetention
+
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+}
+
+func addOperationGetObjectRetentionMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ err = stack.Serialize.Add(&awsRestxml_serializeOpGetObjectRetention{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsRestxml_deserializeOpGetObjectRetention{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
+ return err
+ }
+ if err = addRetryMiddlewares(stack, options); err != nil {
+ return err
+ }
+ if err = addHTTPSignerV4Middleware(stack, options); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addOpGetObjectRetentionValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetObjectRetention(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addMetadataRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addGetObjectRetentionUpdateEndpoint(stack, options); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil {
+ return err
+ }
+ if err = disableAcceptEncodingGzip(stack); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ return nil
+}
+
+func newServiceMetadataMiddleware_opGetObjectRetention(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ SigningName: "s3",
+ OperationName: "GetObjectRetention",
+ }
+}
+
+// getGetObjectRetentionBucketMember returns a pointer to string denoting a
+// provided bucket member valueand a boolean indicating if the input has a modeled
+// bucket name,
+func getGetObjectRetentionBucketMember(input interface{}) (*string, bool) {
+ in := input.(*GetObjectRetentionInput)
+ if in.Bucket == nil {
+ return nil, false
+ }
+ return in.Bucket, true
+}
+func addGetObjectRetentionUpdateEndpoint(stack *middleware.Stack, options Options) error {
+ return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{
+ Accessor: s3cust.UpdateEndpointParameterAccessor{
+ GetBucketFromInput: getGetObjectRetentionBucketMember,
+ },
+ UsePathStyle: options.UsePathStyle,
+ UseAccelerate: options.UseAccelerate,
+ SupportsAccelerate: true,
+ EndpointResolver: options.EndpointResolver,
+ EndpointResolverOptions: options.EndpointOptions,
+ UseDualstack: options.UseDualstack,
+ UseARNRegion: options.UseARNRegion,
+ })
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectTagging.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectTagging.go
new file mode 100644
index 000000000..de4098aaa
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectTagging.go
@@ -0,0 +1,198 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package s3
+
+import (
+ "context"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
+ s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations"
+ "github.com/aws/aws-sdk-go-v2/service/s3/types"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// Returns the tag-set of an object. You send the GET request against the tagging
+// subresource associated with the object. To use this operation, you must have
+// permission to perform the s3:GetObjectTagging action. By default, the GET
+// operation returns information about current version of an object. For a
+// versioned bucket, you can have multiple versions of an object in your bucket. To
+// retrieve tags of any other version, use the versionId query parameter. You also
+// need permission for the s3:GetObjectVersionTagging action. By default, the
+// bucket owner has this permission and can grant this permission to others. For
+// information about the Amazon S3 object tagging feature, see Object Tagging
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-tagging.html). The
+// following operation is related to GetObjectTagging:
+//
+// * PutObjectTagging
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObjectTagging.html)
+func (c *Client) GetObjectTagging(ctx context.Context, params *GetObjectTaggingInput, optFns ...func(*Options)) (*GetObjectTaggingOutput, error) {
+ if params == nil {
+ params = &GetObjectTaggingInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "GetObjectTagging", params, optFns, addOperationGetObjectTaggingMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*GetObjectTaggingOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type GetObjectTaggingInput struct {
+
+ // The bucket name containing the object for which to get the tagging information.
+ // When using this API with an access point, you must direct requests to the access
+ // point hostname. The access point hostname takes the form
+ // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this
+ // operation with an access point through the AWS SDKs, you provide the access
+ // point ARN in place of the bucket name. For more information about access point
+ // ARNs, see Using Access Points
+ // (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) in
+ // the Amazon Simple Storage Service Developer Guide. When using this API with
+ // Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname.
+ // The S3 on Outposts hostname takes the form
+ // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using
+ // this operation using S3 on Outposts through the AWS SDKs, you provide the
+ // Outposts bucket ARN in place of the bucket name. For more information about S3
+ // on Outposts ARNs, see Using S3 on Outposts
+ // (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html) in the
+ // Amazon Simple Storage Service Developer Guide.
+ //
+ // This member is required.
+ Bucket *string
+
+ // Object key for which to get the tagging information.
+ //
+ // This member is required.
+ Key *string
+
+ // The account id of the expected bucket owner. If the bucket is owned by a
+ // different account, the request will fail with an HTTP 403 (Access Denied) error.
+ ExpectedBucketOwner *string
+
+ // The versionId of the object for which to get the tagging information.
+ VersionId *string
+}
+
+type GetObjectTaggingOutput struct {
+
+ // Contains the tag set.
+ //
+ // This member is required.
+ TagSet []types.Tag
+
+ // The versionId of the object for which you got the tagging information.
+ VersionId *string
+
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+}
+
+func addOperationGetObjectTaggingMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ err = stack.Serialize.Add(&awsRestxml_serializeOpGetObjectTagging{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsRestxml_deserializeOpGetObjectTagging{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
+ return err
+ }
+ if err = addRetryMiddlewares(stack, options); err != nil {
+ return err
+ }
+ if err = addHTTPSignerV4Middleware(stack, options); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addOpGetObjectTaggingValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetObjectTagging(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addMetadataRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addGetObjectTaggingUpdateEndpoint(stack, options); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil {
+ return err
+ }
+ if err = disableAcceptEncodingGzip(stack); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ return nil
+}
+
+func newServiceMetadataMiddleware_opGetObjectTagging(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ SigningName: "s3",
+ OperationName: "GetObjectTagging",
+ }
+}
+
+// getGetObjectTaggingBucketMember returns a pointer to string denoting a provided
+// bucket member valueand a boolean indicating if the input has a modeled bucket
+// name,
+func getGetObjectTaggingBucketMember(input interface{}) (*string, bool) {
+ in := input.(*GetObjectTaggingInput)
+ if in.Bucket == nil {
+ return nil, false
+ }
+ return in.Bucket, true
+}
+func addGetObjectTaggingUpdateEndpoint(stack *middleware.Stack, options Options) error {
+ return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{
+ Accessor: s3cust.UpdateEndpointParameterAccessor{
+ GetBucketFromInput: getGetObjectTaggingBucketMember,
+ },
+ UsePathStyle: options.UsePathStyle,
+ UseAccelerate: options.UseAccelerate,
+ SupportsAccelerate: true,
+ EndpointResolver: options.EndpointResolver,
+ EndpointResolverOptions: options.EndpointOptions,
+ UseDualstack: options.UseDualstack,
+ UseARNRegion: options.UseARNRegion,
+ })
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectTorrent.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectTorrent.go
new file mode 100644
index 000000000..0529c9462
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectTorrent.go
@@ -0,0 +1,182 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package s3
+
+import (
+ "context"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
+ s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations"
+ "github.com/aws/aws-sdk-go-v2/service/s3/types"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+ "io"
+)
+
+// Returns torrent files from a bucket. BitTorrent can save you bandwidth when
+// you're distributing large files. For more information about BitTorrent, see
+// Using BitTorrent with Amazon S3
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3Torrent.html). You can get
+// torrent only for objects that are less than 5 GB in size, and that are not
+// encrypted using server-side encryption with a customer-provided encryption key.
+// To use GET, you must have READ access to the object. This action is not
+// supported by Amazon S3 on Outposts. The following operation is related to
+// GetObjectTorrent:
+//
+// * GetObject
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html)
+func (c *Client) GetObjectTorrent(ctx context.Context, params *GetObjectTorrentInput, optFns ...func(*Options)) (*GetObjectTorrentOutput, error) {
+ if params == nil {
+ params = &GetObjectTorrentInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "GetObjectTorrent", params, optFns, addOperationGetObjectTorrentMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*GetObjectTorrentOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type GetObjectTorrentInput struct {
+
+ // The name of the bucket containing the object for which to get the torrent files.
+ //
+ // This member is required.
+ Bucket *string
+
+ // The object key for which to get the information.
+ //
+ // This member is required.
+ Key *string
+
+ // The account id of the expected bucket owner. If the bucket is owned by a
+ // different account, the request will fail with an HTTP 403 (Access Denied) error.
+ ExpectedBucketOwner *string
+
+ // Confirms that the requester knows that they will be charged for the request.
+ // Bucket owners need not specify this parameter in their requests. For information
+ // about downloading objects from requester pays buckets, see Downloading Objects
+ // in Requestor Pays Buckets
+ // (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html)
+ // in the Amazon S3 Developer Guide.
+ RequestPayer types.RequestPayer
+}
+
+type GetObjectTorrentOutput struct {
+
+ // A Bencoded dictionary as defined by the BitTorrent specification
+ Body io.ReadCloser
+
+ // If present, indicates that the requester was successfully charged for the
+ // request.
+ RequestCharged types.RequestCharged
+
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+}
+
+func addOperationGetObjectTorrentMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ err = stack.Serialize.Add(&awsRestxml_serializeOpGetObjectTorrent{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsRestxml_deserializeOpGetObjectTorrent{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
+ return err
+ }
+ if err = addRetryMiddlewares(stack, options); err != nil {
+ return err
+ }
+ if err = addHTTPSignerV4Middleware(stack, options); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addOpGetObjectTorrentValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetObjectTorrent(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addMetadataRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addGetObjectTorrentUpdateEndpoint(stack, options); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil {
+ return err
+ }
+ if err = disableAcceptEncodingGzip(stack); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ return nil
+}
+
+func newServiceMetadataMiddleware_opGetObjectTorrent(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ SigningName: "s3",
+ OperationName: "GetObjectTorrent",
+ }
+}
+
+// getGetObjectTorrentBucketMember returns a pointer to string denoting a provided
+// bucket member valueand a boolean indicating if the input has a modeled bucket
+// name,
+func getGetObjectTorrentBucketMember(input interface{}) (*string, bool) {
+ in := input.(*GetObjectTorrentInput)
+ if in.Bucket == nil {
+ return nil, false
+ }
+ return in.Bucket, true
+}
+func addGetObjectTorrentUpdateEndpoint(stack *middleware.Stack, options Options) error {
+ return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{
+ Accessor: s3cust.UpdateEndpointParameterAccessor{
+ GetBucketFromInput: getGetObjectTorrentBucketMember,
+ },
+ UsePathStyle: options.UsePathStyle,
+ UseAccelerate: options.UseAccelerate,
+ SupportsAccelerate: true,
+ EndpointResolver: options.EndpointResolver,
+ EndpointResolverOptions: options.EndpointOptions,
+ UseDualstack: options.UseDualstack,
+ UseARNRegion: options.UseARNRegion,
+ })
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetPublicAccessBlock.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetPublicAccessBlock.go
new file mode 100644
index 000000000..38d82ac52
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetPublicAccessBlock.go
@@ -0,0 +1,187 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package s3
+
+import (
+ "context"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
+ s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations"
+ "github.com/aws/aws-sdk-go-v2/service/s3/types"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// Retrieves the PublicAccessBlock configuration for an Amazon S3 bucket. To use
+// this operation, you must have the s3:GetBucketPublicAccessBlock permission. For
+// more information about Amazon S3 permissions, see Specifying Permissions in a
+// Policy
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html).
+// When Amazon S3 evaluates the PublicAccessBlock configuration for a bucket or an
+// object, it checks the PublicAccessBlock configuration for both the bucket (or
+// the bucket that contains the object) and the bucket owner's account. If the
+// PublicAccessBlock settings are different between the bucket and the account,
+// Amazon S3 uses the most restrictive combination of the bucket-level and
+// account-level settings. For more information about when Amazon S3 considers a
+// bucket or an object public, see The Meaning of "Public"
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html#access-control-block-public-access-policy-status).
+// The following operations are related to GetPublicAccessBlock:
+//
+// * Using Amazon S3
+// Block Public Access
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html)
+//
+// *
+// PutPublicAccessBlock
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutPublicAccessBlock.html)
+//
+// *
+// GetPublicAccessBlock
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetPublicAccessBlock.html)
+//
+// *
+// DeletePublicAccessBlock
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeletePublicAccessBlock.html)
+func (c *Client) GetPublicAccessBlock(ctx context.Context, params *GetPublicAccessBlockInput, optFns ...func(*Options)) (*GetPublicAccessBlockOutput, error) {
+ if params == nil {
+ params = &GetPublicAccessBlockInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "GetPublicAccessBlock", params, optFns, addOperationGetPublicAccessBlockMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*GetPublicAccessBlockOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type GetPublicAccessBlockInput struct {
+
+ // The name of the Amazon S3 bucket whose PublicAccessBlock configuration you want
+ // to retrieve.
+ //
+ // This member is required.
+ Bucket *string
+
+ // The account id of the expected bucket owner. If the bucket is owned by a
+ // different account, the request will fail with an HTTP 403 (Access Denied) error.
+ ExpectedBucketOwner *string
+}
+
+type GetPublicAccessBlockOutput struct {
+
+ // The PublicAccessBlock configuration currently in effect for this Amazon S3
+ // bucket.
+ PublicAccessBlockConfiguration *types.PublicAccessBlockConfiguration
+
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+}
+
+func addOperationGetPublicAccessBlockMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ err = stack.Serialize.Add(&awsRestxml_serializeOpGetPublicAccessBlock{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsRestxml_deserializeOpGetPublicAccessBlock{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
+ return err
+ }
+ if err = addRetryMiddlewares(stack, options); err != nil {
+ return err
+ }
+ if err = addHTTPSignerV4Middleware(stack, options); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addOpGetPublicAccessBlockValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetPublicAccessBlock(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addMetadataRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addGetPublicAccessBlockUpdateEndpoint(stack, options); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil {
+ return err
+ }
+ if err = disableAcceptEncodingGzip(stack); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ return nil
+}
+
+func newServiceMetadataMiddleware_opGetPublicAccessBlock(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ SigningName: "s3",
+ OperationName: "GetPublicAccessBlock",
+ }
+}
+
+// getGetPublicAccessBlockBucketMember returns a pointer to string denoting a
+// provided bucket member valueand a boolean indicating if the input has a modeled
+// bucket name,
+func getGetPublicAccessBlockBucketMember(input interface{}) (*string, bool) {
+ in := input.(*GetPublicAccessBlockInput)
+ if in.Bucket == nil {
+ return nil, false
+ }
+ return in.Bucket, true
+}
+func addGetPublicAccessBlockUpdateEndpoint(stack *middleware.Stack, options Options) error {
+ return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{
+ Accessor: s3cust.UpdateEndpointParameterAccessor{
+ GetBucketFromInput: getGetPublicAccessBlockBucketMember,
+ },
+ UsePathStyle: options.UsePathStyle,
+ UseAccelerate: options.UseAccelerate,
+ SupportsAccelerate: true,
+ EndpointResolver: options.EndpointResolver,
+ EndpointResolverOptions: options.EndpointOptions,
+ UseDualstack: options.UseDualstack,
+ UseARNRegion: options.UseARNRegion,
+ })
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_HeadBucket.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_HeadBucket.go
new file mode 100644
index 000000000..52d855e10
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_HeadBucket.go
@@ -0,0 +1,474 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package s3
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
+ s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations"
+ "github.com/aws/aws-sdk-go-v2/service/s3/types"
+ "github.com/aws/smithy-go/middleware"
+ smithytime "github.com/aws/smithy-go/time"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+ smithywaiter "github.com/aws/smithy-go/waiter"
+ "time"
+)
+
+// This operation is useful to determine if a bucket exists and you have permission
+// to access it. The operation returns a 200 OK if the bucket exists and you have
+// permission to access it. Otherwise, the operation might return responses such as
+// 404 Not Found and 403 Forbidden. To use this operation, you must have
+// permissions to perform the s3:ListBucket action. The bucket owner has this
+// permission by default and can grant this permission to others. For more
+// information about permissions, see Permissions Related to Bucket Subresource
+// Operations
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources)
+// and Managing Access Permissions to Your Amazon S3 Resources
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html).
+func (c *Client) HeadBucket(ctx context.Context, params *HeadBucketInput, optFns ...func(*Options)) (*HeadBucketOutput, error) {
+ if params == nil {
+ params = &HeadBucketInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "HeadBucket", params, optFns, addOperationHeadBucketMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*HeadBucketOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type HeadBucketInput struct {
+
+ // The bucket name. When using this API with an access point, you must direct
+ // requests to the access point hostname. The access point hostname takes the form
+ // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this
+ // operation with an access point through the AWS SDKs, you provide the access
+ // point ARN in place of the bucket name. For more information about access point
+ // ARNs, see Using Access Points
+ // (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) in
+ // the Amazon Simple Storage Service Developer Guide. When using this API with
+ // Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname.
+ // The S3 on Outposts hostname takes the form
+ // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using
+ // this operation using S3 on Outposts through the AWS SDKs, you provide the
+ // Outposts bucket ARN in place of the bucket name. For more information about S3
+ // on Outposts ARNs, see Using S3 on Outposts
+ // (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html) in the
+ // Amazon Simple Storage Service Developer Guide.
+ //
+ // This member is required.
+ Bucket *string
+
+ // The account id of the expected bucket owner. If the bucket is owned by a
+ // different account, the request will fail with an HTTP 403 (Access Denied) error.
+ ExpectedBucketOwner *string
+}
+
+type HeadBucketOutput struct {
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+}
+
+func addOperationHeadBucketMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ err = stack.Serialize.Add(&awsRestxml_serializeOpHeadBucket{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsRestxml_deserializeOpHeadBucket{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
+ return err
+ }
+ if err = addRetryMiddlewares(stack, options); err != nil {
+ return err
+ }
+ if err = addHTTPSignerV4Middleware(stack, options); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addOpHeadBucketValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opHeadBucket(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addMetadataRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addHeadBucketUpdateEndpoint(stack, options); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil {
+ return err
+ }
+ if err = disableAcceptEncodingGzip(stack); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ return nil
+}
+
+// HeadBucketAPIClient is a client that implements the HeadBucket operation.
+type HeadBucketAPIClient interface {
+ HeadBucket(context.Context, *HeadBucketInput, ...func(*Options)) (*HeadBucketOutput, error)
+}
+
+var _ HeadBucketAPIClient = (*Client)(nil)
+
+// BucketExistsWaiterOptions are waiter options for BucketExistsWaiter
+type BucketExistsWaiterOptions struct {
+
+ // Set of options to modify how an operation is invoked. These apply to all
+ // operations invoked for this client. Use functional options on operation call to
+ // modify this list for per operation behavior.
+ APIOptions []func(*middleware.Stack) error
+
+ // MinDelay is the minimum amount of time to delay between retries. If unset,
+ // BucketExistsWaiter will use default minimum delay of 5 seconds. Note that
+ // MinDelay must resolve to a value lesser than or equal to the MaxDelay.
+ MinDelay time.Duration
+
+ // MaxDelay is the maximum amount of time to delay between retries. If unset or set
+ // to zero, BucketExistsWaiter will use default max delay of 120 seconds. Note that
+ // MaxDelay must resolve to value greater than or equal to the MinDelay.
+ MaxDelay time.Duration
+
+ // LogWaitAttempts is used to enable logging for waiter retry attempts
+ LogWaitAttempts bool
+
+ // Retryable is function that can be used to override the service defined
+ // waiter-behavior based on operation output, or returned error. This function is
+ // used by the waiter to decide if a state is retryable or a terminal state. By
+ // default service-modeled logic will populate this option. This option can thus be
+ // used to define a custom waiter state with fall-back to service-modeled waiter
+ // state mutators.The function returns an error in case of a failure state. In case
+ // of retry state, this function returns a bool value of true and nil error, while
+ // in case of success it returns a bool value of false and nil error.
+ Retryable func(context.Context, *HeadBucketInput, *HeadBucketOutput, error) (bool, error)
+}
+
+// BucketExistsWaiter defines the waiters for BucketExists
+type BucketExistsWaiter struct {
+ client HeadBucketAPIClient
+
+ options BucketExistsWaiterOptions
+}
+
+// NewBucketExistsWaiter constructs a BucketExistsWaiter.
+func NewBucketExistsWaiter(client HeadBucketAPIClient, optFns ...func(*BucketExistsWaiterOptions)) *BucketExistsWaiter {
+ options := BucketExistsWaiterOptions{}
+ options.MinDelay = 5 * time.Second
+ options.MaxDelay = 120 * time.Second
+ options.Retryable = bucketExistsStateRetryable
+
+ for _, fn := range optFns {
+ fn(&options)
+ }
+ return &BucketExistsWaiter{
+ client: client,
+ options: options,
+ }
+}
+
+// Wait calls the waiter function for BucketExists waiter. The maxWaitDur is the
+// maximum wait duration the waiter will wait. The maxWaitDur is required and must
+// be greater than zero.
+func (w *BucketExistsWaiter) Wait(ctx context.Context, params *HeadBucketInput, maxWaitDur time.Duration, optFns ...func(*BucketExistsWaiterOptions)) error {
+ if maxWaitDur <= 0 {
+ return fmt.Errorf("maximum wait time for waiter must be greater than zero")
+ }
+
+ options := w.options
+ for _, fn := range optFns {
+ fn(&options)
+ }
+
+ if options.MaxDelay <= 0 {
+ options.MaxDelay = 120 * time.Second
+ }
+
+ if options.MinDelay > options.MaxDelay {
+ return fmt.Errorf("minimum waiter delay %v must be lesser than or equal to maximum waiter delay of %v.", options.MinDelay, options.MaxDelay)
+ }
+
+ ctx, cancelFn := context.WithTimeout(ctx, maxWaitDur)
+ defer cancelFn()
+
+ logger := smithywaiter.Logger{}
+ remainingTime := maxWaitDur
+
+ var attempt int64
+ for {
+
+ attempt++
+ apiOptions := options.APIOptions
+ start := time.Now()
+
+ if options.LogWaitAttempts {
+ logger.Attempt = attempt
+ apiOptions = append([]func(*middleware.Stack) error{}, options.APIOptions...)
+ apiOptions = append(apiOptions, logger.AddLogger)
+ }
+
+ out, err := w.client.HeadBucket(ctx, params, func(o *Options) {
+ o.APIOptions = append(o.APIOptions, apiOptions...)
+ })
+
+ retryable, err := options.Retryable(ctx, params, out, err)
+ if err != nil {
+ return err
+ }
+ if !retryable {
+ return nil
+ }
+
+ remainingTime -= time.Since(start)
+ if remainingTime < options.MinDelay || remainingTime <= 0 {
+ break
+ }
+
+ // compute exponential backoff between waiter retries
+ delay, err := smithywaiter.ComputeDelay(
+ attempt, options.MinDelay, options.MaxDelay, remainingTime,
+ )
+ if err != nil {
+ return fmt.Errorf("error computing waiter delay, %w", err)
+ }
+
+ remainingTime -= delay
+ // sleep for the delay amount before invoking a request
+ if err := smithytime.SleepWithContext(ctx, delay); err != nil {
+ return fmt.Errorf("request cancelled while waiting, %w", err)
+ }
+ }
+ return fmt.Errorf("exceeded max wait time for BucketExists waiter")
+}
+
+func bucketExistsStateRetryable(ctx context.Context, input *HeadBucketInput, output *HeadBucketOutput, err error) (bool, error) {
+
+ if err == nil {
+ return false, nil
+ }
+
+ if err != nil {
+ var errorType *types.NotFound
+ if errors.As(err, &errorType) {
+ return true, nil
+ }
+ }
+
+ return true, nil
+}
+
+// BucketNotExistsWaiterOptions are waiter options for BucketNotExistsWaiter
+type BucketNotExistsWaiterOptions struct {
+
+ // Set of options to modify how an operation is invoked. These apply to all
+ // operations invoked for this client. Use functional options on operation call to
+ // modify this list for per operation behavior.
+ APIOptions []func(*middleware.Stack) error
+
+ // MinDelay is the minimum amount of time to delay between retries. If unset,
+ // BucketNotExistsWaiter will use default minimum delay of 5 seconds. Note that
+ // MinDelay must resolve to a value lesser than or equal to the MaxDelay.
+ MinDelay time.Duration
+
+ // MaxDelay is the maximum amount of time to delay between retries. If unset or set
+ // to zero, BucketNotExistsWaiter will use default max delay of 120 seconds. Note
+ // that MaxDelay must resolve to value greater than or equal to the MinDelay.
+ MaxDelay time.Duration
+
+ // LogWaitAttempts is used to enable logging for waiter retry attempts
+ LogWaitAttempts bool
+
+ // Retryable is function that can be used to override the service defined
+ // waiter-behavior based on operation output, or returned error. This function is
+ // used by the waiter to decide if a state is retryable or a terminal state. By
+ // default service-modeled logic will populate this option. This option can thus be
+ // used to define a custom waiter state with fall-back to service-modeled waiter
+ // state mutators.The function returns an error in case of a failure state. In case
+ // of retry state, this function returns a bool value of true and nil error, while
+ // in case of success it returns a bool value of false and nil error.
+ Retryable func(context.Context, *HeadBucketInput, *HeadBucketOutput, error) (bool, error)
+}
+
+// BucketNotExistsWaiter defines the waiters for BucketNotExists
+type BucketNotExistsWaiter struct {
+ client HeadBucketAPIClient
+
+ options BucketNotExistsWaiterOptions
+}
+
+// NewBucketNotExistsWaiter constructs a BucketNotExistsWaiter.
+func NewBucketNotExistsWaiter(client HeadBucketAPIClient, optFns ...func(*BucketNotExistsWaiterOptions)) *BucketNotExistsWaiter {
+ options := BucketNotExistsWaiterOptions{}
+ options.MinDelay = 5 * time.Second
+ options.MaxDelay = 120 * time.Second
+ options.Retryable = bucketNotExistsStateRetryable
+
+ for _, fn := range optFns {
+ fn(&options)
+ }
+ return &BucketNotExistsWaiter{
+ client: client,
+ options: options,
+ }
+}
+
+// Wait calls the waiter function for BucketNotExists waiter. The maxWaitDur is the
+// maximum wait duration the waiter will wait. The maxWaitDur is required and must
+// be greater than zero.
+func (w *BucketNotExistsWaiter) Wait(ctx context.Context, params *HeadBucketInput, maxWaitDur time.Duration, optFns ...func(*BucketNotExistsWaiterOptions)) error {
+ if maxWaitDur <= 0 {
+ return fmt.Errorf("maximum wait time for waiter must be greater than zero")
+ }
+
+ options := w.options
+ for _, fn := range optFns {
+ fn(&options)
+ }
+
+ if options.MaxDelay <= 0 {
+ options.MaxDelay = 120 * time.Second
+ }
+
+ if options.MinDelay > options.MaxDelay {
+ return fmt.Errorf("minimum waiter delay %v must be lesser than or equal to maximum waiter delay of %v.", options.MinDelay, options.MaxDelay)
+ }
+
+ ctx, cancelFn := context.WithTimeout(ctx, maxWaitDur)
+ defer cancelFn()
+
+ logger := smithywaiter.Logger{}
+ remainingTime := maxWaitDur
+
+ var attempt int64
+ for {
+
+ attempt++
+ apiOptions := options.APIOptions
+ start := time.Now()
+
+ if options.LogWaitAttempts {
+ logger.Attempt = attempt
+ apiOptions = append([]func(*middleware.Stack) error{}, options.APIOptions...)
+ apiOptions = append(apiOptions, logger.AddLogger)
+ }
+
+ out, err := w.client.HeadBucket(ctx, params, func(o *Options) {
+ o.APIOptions = append(o.APIOptions, apiOptions...)
+ })
+
+ retryable, err := options.Retryable(ctx, params, out, err)
+ if err != nil {
+ return err
+ }
+ if !retryable {
+ return nil
+ }
+
+ remainingTime -= time.Since(start)
+ if remainingTime < options.MinDelay || remainingTime <= 0 {
+ break
+ }
+
+ // compute exponential backoff between waiter retries
+ delay, err := smithywaiter.ComputeDelay(
+ attempt, options.MinDelay, options.MaxDelay, remainingTime,
+ )
+ if err != nil {
+ return fmt.Errorf("error computing waiter delay, %w", err)
+ }
+
+ remainingTime -= delay
+ // sleep for the delay amount before invoking a request
+ if err := smithytime.SleepWithContext(ctx, delay); err != nil {
+ return fmt.Errorf("request cancelled while waiting, %w", err)
+ }
+ }
+ return fmt.Errorf("exceeded max wait time for BucketNotExists waiter")
+}
+
+func bucketNotExistsStateRetryable(ctx context.Context, input *HeadBucketInput, output *HeadBucketOutput, err error) (bool, error) {
+
+ if err != nil {
+ var errorType *types.NotFound
+ if errors.As(err, &errorType) {
+ return false, nil
+ }
+ }
+
+ return true, nil
+}
+
+func newServiceMetadataMiddleware_opHeadBucket(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ SigningName: "s3",
+ OperationName: "HeadBucket",
+ }
+}
+
+// getHeadBucketBucketMember returns a pointer to string denoting a provided bucket
+// member valueand a boolean indicating if the input has a modeled bucket name,
+func getHeadBucketBucketMember(input interface{}) (*string, bool) {
+ in := input.(*HeadBucketInput)
+ if in.Bucket == nil {
+ return nil, false
+ }
+ return in.Bucket, true
+}
+func addHeadBucketUpdateEndpoint(stack *middleware.Stack, options Options) error {
+ return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{
+ Accessor: s3cust.UpdateEndpointParameterAccessor{
+ GetBucketFromInput: getHeadBucketBucketMember,
+ },
+ UsePathStyle: options.UsePathStyle,
+ UseAccelerate: options.UseAccelerate,
+ SupportsAccelerate: true,
+ EndpointResolver: options.EndpointResolver,
+ EndpointResolverOptions: options.EndpointOptions,
+ UseDualstack: options.UseDualstack,
+ UseARNRegion: options.UseARNRegion,
+ })
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_HeadObject.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_HeadObject.go
new file mode 100644
index 000000000..52dfec90f
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_HeadObject.go
@@ -0,0 +1,778 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package s3
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
+ s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations"
+ "github.com/aws/aws-sdk-go-v2/service/s3/types"
+ smithy "github.com/aws/smithy-go"
+ "github.com/aws/smithy-go/middleware"
+ smithytime "github.com/aws/smithy-go/time"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+ smithywaiter "github.com/aws/smithy-go/waiter"
+ "time"
+)
+
+// The HEAD operation retrieves metadata from an object without returning the
+// object itself. This operation is useful if you're only interested in an object's
+// metadata. To use HEAD, you must have READ access to the object. A HEAD request
+// has the same options as a GET operation on an object. The response is identical
+// to the GET response except that there is no response body. If you encrypt an
+// object by using server-side encryption with customer-provided encryption keys
+// (SSE-C) when you store the object in Amazon S3, then when you retrieve the
+// metadata from the object, you must use the following headers:
+//
+// *
+// x-amz-server-side-encryption-customer-algorithm
+//
+// *
+// x-amz-server-side-encryption-customer-key
+//
+// *
+// x-amz-server-side-encryption-customer-key-MD5
+//
+// For more information about SSE-C,
+// see Server-Side Encryption (Using Customer-Provided Encryption Keys)
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html).
+// Encryption request headers, like x-amz-server-side-encryption, should not be
+// sent for GET requests if your object uses server-side encryption with CMKs
+// stored in AWS KMS (SSE-KMS) or server-side encryption with Amazon S3–managed
+// encryption keys (SSE-S3). If your object does use these types of keys, you’ll
+// get an HTTP 400 BadRequest error. Request headers are limited to 8 KB in size.
+// For more information, see Common Request Headers
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTCommonRequestHeaders.html).
+// Consider the following when using request headers:
+//
+// * Consideration 1 – If both
+// of the If-Match and If-Unmodified-Since headers are present in the request as
+// follows:
+//
+// * If-Match condition evaluates to true, and;
+//
+// * If-Unmodified-Since
+// condition evaluates to false;
+//
+// Then Amazon S3 returns 200 OK and the data
+// requested.
+//
+// * Consideration 2 – If both of the If-None-Match and
+// If-Modified-Since headers are present in the request as follows:
+//
+// *
+// If-None-Match condition evaluates to false, and;
+//
+// * If-Modified-Since condition
+// evaluates to true;
+//
+// Then Amazon S3 returns the 304 Not Modified response
+// code.
+//
+// For more information about conditional requests, see RFC 7232
+// (https://tools.ietf.org/html/rfc7232). Permissions You need the s3:GetObject
+// permission for this operation. For more information, see Specifying Permissions
+// in a Policy
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html). If
+// the object you request does not exist, the error Amazon S3 returns depends on
+// whether you also have the s3:ListBucket permission.
+//
+// * If you have the
+// s3:ListBucket permission on the bucket, Amazon S3 returns an HTTP status code
+// 404 ("no such key") error.
+//
+// * If you don’t have the s3:ListBucket permission,
+// Amazon S3 returns an HTTP status code 403 ("access denied") error.
+//
+// The
+// following operation is related to HeadObject:
+//
+// * GetObject
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html)
+func (c *Client) HeadObject(ctx context.Context, params *HeadObjectInput, optFns ...func(*Options)) (*HeadObjectOutput, error) {
+ if params == nil {
+ params = &HeadObjectInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "HeadObject", params, optFns, addOperationHeadObjectMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*HeadObjectOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type HeadObjectInput struct {
+
+ // The name of the bucket containing the object. When using this API with an access
+ // point, you must direct requests to the access point hostname. The access point
+ // hostname takes the form
+ // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this
+ // operation with an access point through the AWS SDKs, you provide the access
+ // point ARN in place of the bucket name. For more information about access point
+ // ARNs, see Using Access Points
+ // (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) in
+ // the Amazon Simple Storage Service Developer Guide. When using this API with
+ // Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname.
+ // The S3 on Outposts hostname takes the form
+ // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using
+ // this operation using S3 on Outposts through the AWS SDKs, you provide the
+ // Outposts bucket ARN in place of the bucket name. For more information about S3
+ // on Outposts ARNs, see Using S3 on Outposts
+ // (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html) in the
+ // Amazon Simple Storage Service Developer Guide.
+ //
+ // This member is required.
+ Bucket *string
+
+ // The object key.
+ //
+ // This member is required.
+ Key *string
+
+ // The account id of the expected bucket owner. If the bucket is owned by a
+ // different account, the request will fail with an HTTP 403 (Access Denied) error.
+ ExpectedBucketOwner *string
+
+ // Return the object only if its entity tag (ETag) is the same as the one
+ // specified, otherwise return a 412 (precondition failed).
+ IfMatch *string
+
+ // Return the object only if it has been modified since the specified time,
+ // otherwise return a 304 (not modified).
+ IfModifiedSince *time.Time
+
+ // Return the object only if its entity tag (ETag) is different from the one
+ // specified, otherwise return a 304 (not modified).
+ IfNoneMatch *string
+
+ // Return the object only if it has not been modified since the specified time,
+ // otherwise return a 412 (precondition failed).
+ IfUnmodifiedSince *time.Time
+
+ // Part number of the object being read. This is a positive integer between 1 and
+ // 10,000. Effectively performs a 'ranged' HEAD request for the part specified.
+ // Useful querying about the size of the part and the number of parts in this
+ // object.
+ PartNumber int32
+
+ // Downloads the specified range bytes of an object. For more information about the
+ // HTTP Range header, see
+ // http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35
+ // (http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35). Amazon S3
+ // doesn't support retrieving multiple ranges of data per GET request.
+ Range *string
+
+ // Confirms that the requester knows that they will be charged for the request.
+ // Bucket owners need not specify this parameter in their requests. For information
+ // about downloading objects from requester pays buckets, see Downloading Objects
+ // in Requestor Pays Buckets
+ // (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html)
+ // in the Amazon S3 Developer Guide.
+ RequestPayer types.RequestPayer
+
+ // Specifies the algorithm to use to when encrypting the object (for example,
+ // AES256).
+ SSECustomerAlgorithm *string
+
+ // Specifies the customer-provided encryption key for Amazon S3 to use in
+ // encrypting data. This value is used to store the object and then it is
+ // discarded; Amazon S3 does not store the encryption key. The key must be
+ // appropriate for use with the algorithm specified in the
+ // x-amz-server-side-encryption-customer-algorithm header.
+ SSECustomerKey *string
+
+ // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321.
+ // Amazon S3 uses this header for a message integrity check to ensure that the
+ // encryption key was transmitted without error.
+ SSECustomerKeyMD5 *string
+
+ // VersionId used to reference a specific version of the object.
+ VersionId *string
+}
+
+type HeadObjectOutput struct {
+
+ // Indicates that a range of bytes was specified.
+ AcceptRanges *string
+
+ // The archive state of the head object.
+ ArchiveStatus types.ArchiveStatus
+
+ // Indicates whether the object uses an S3 Bucket Key for server-side encryption
+ // with AWS KMS (SSE-KMS).
+ BucketKeyEnabled bool
+
+ // Specifies caching behavior along the request/reply chain.
+ CacheControl *string
+
+ // Specifies presentational information for the object.
+ ContentDisposition *string
+
+ // Specifies what content encodings have been applied to the object and thus what
+ // decoding mechanisms must be applied to obtain the media-type referenced by the
+ // Content-Type header field.
+ ContentEncoding *string
+
+ // The language the content is in.
+ ContentLanguage *string
+
+ // Size of the body in bytes.
+ ContentLength int64
+
+ // A standard MIME type describing the format of the object data.
+ ContentType *string
+
+ // Specifies whether the object retrieved was (true) or was not (false) a Delete
+ // Marker. If false, this response header does not appear in the response.
+ DeleteMarker bool
+
+ // An ETag is an opaque identifier assigned by a web server to a specific version
+ // of a resource found at a URL.
+ ETag *string
+
+ // If the object expiration is configured (see PUT Bucket lifecycle), the response
+ // includes this header. It includes the expiry-date and rule-id key-value pairs
+ // providing object expiration information. The value of the rule-id is URL
+ // encoded.
+ Expiration *string
+
+ // The date and time at which the object is no longer cacheable.
+ Expires *time.Time
+
+ // Last modified date of the object
+ LastModified *time.Time
+
+ // A map of metadata to store with the object in S3.
+ //
+ // Map keys will be normalized to lower-case.
+ Metadata map[string]string
+
+ // This is set to the number of metadata entries not returned in x-amz-meta
+ // headers. This can happen if you create metadata using an API like SOAP that
+ // supports more flexible metadata than the REST API. For example, using SOAP, you
+ // can create metadata whose values are not legal HTTP headers.
+ MissingMeta int32
+
+ // Specifies whether a legal hold is in effect for this object. This header is only
+ // returned if the requester has the s3:GetObjectLegalHold permission. This header
+ // is not returned if the specified version of this object has never had a legal
+ // hold applied. For more information about S3 Object Lock, see Object Lock
+ // (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html).
+ ObjectLockLegalHoldStatus types.ObjectLockLegalHoldStatus
+
+ // The Object Lock mode, if any, that's in effect for this object. This header is
+ // only returned if the requester has the s3:GetObjectRetention permission. For
+ // more information about S3 Object Lock, see Object Lock
+ // (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html).
+ ObjectLockMode types.ObjectLockMode
+
+ // The date and time when the Object Lock retention period expires. This header is
+ // only returned if the requester has the s3:GetObjectRetention permission.
+ ObjectLockRetainUntilDate *time.Time
+
+ // The count of parts this object has.
+ PartsCount int32
+
+ // Amazon S3 can return this header if your request involves a bucket that is
+ // either a source or a destination in a replication rule. In replication, you have
+ // a source bucket on which you configure replication and destination bucket or
+ // buckets where Amazon S3 stores object replicas. When you request an object
+ // (GetObject) or object metadata (HeadObject) from these buckets, Amazon S3 will
+ // return the x-amz-replication-status header in the response as follows:
+ //
+ // * If
+ // requesting an object from the source bucket — Amazon S3 will return the
+ // x-amz-replication-status header if the object in your request is eligible for
+ // replication. For example, suppose that in your replication configuration, you
+ // specify object prefix TaxDocs requesting Amazon S3 to replicate objects with key
+ // prefix TaxDocs. Any objects you upload with this key name prefix, for example
+ // TaxDocs/document1.pdf, are eligible for replication. For any object request with
+ // this key name prefix, Amazon S3 will return the x-amz-replication-status header
+ // with value PENDING, COMPLETED or FAILED indicating object replication status.
+ //
+ // *
+ // If requesting an object from a destination bucket — Amazon S3 will return the
+ // x-amz-replication-status header with value REPLICA if the object in your request
+ // is a replica that Amazon S3 created and there is no replica modification
+ // replication in progress.
+ //
+ // * When replicating objects to multiple destination
+ // buckets the x-amz-replication-status header acts differently. The header of the
+ // source object will only return a value of COMPLETED when replication is
+ // successful to all destinations. The header will remain at value PENDING until
+ // replication has completed for all destinations. If one or more destinations
+ // fails replication the header will return FAILED.
+ //
+ // For more information, see
+ // Replication
+ // (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html).
+ ReplicationStatus types.ReplicationStatus
+
+ // If present, indicates that the requester was successfully charged for the
+ // request.
+ RequestCharged types.RequestCharged
+
+ // If the object is an archived object (an object whose storage class is GLACIER),
+ // the response includes this header if either the archive restoration is in
+ // progress (see RestoreObject
+ // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_RestoreObject.html) or an
+ // archive copy is already restored. If an archive copy is already restored, the
+ // header value indicates when Amazon S3 is scheduled to delete the object copy.
+ // For example: x-amz-restore: ongoing-request="false", expiry-date="Fri, 23 Dec
+ // 2012 00:00:00 GMT" If the object restoration is in progress, the header returns
+ // the value ongoing-request="true". For more information about archiving objects,
+ // see Transitioning Objects: General Considerations
+ // (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html#lifecycle-transition-general-considerations).
+ Restore *string
+
+ // If server-side encryption with a customer-provided encryption key was requested,
+ // the response will include this header confirming the encryption algorithm used.
+ SSECustomerAlgorithm *string
+
+ // If server-side encryption with a customer-provided encryption key was requested,
+ // the response will include this header to provide round-trip message integrity
+ // verification of the customer-provided encryption key.
+ SSECustomerKeyMD5 *string
+
+ // If present, specifies the ID of the AWS Key Management Service (AWS KMS)
+ // symmetric customer managed customer master key (CMK) that was used for the
+ // object.
+ SSEKMSKeyId *string
+
+ // If the object is stored using server-side encryption either with an AWS KMS
+ // customer master key (CMK) or an Amazon S3-managed encryption key, the response
+ // includes this header with the value of the server-side encryption algorithm used
+ // when storing this object in Amazon S3 (for example, AES256, aws:kms).
+ ServerSideEncryption types.ServerSideEncryption
+
+ // Provides storage class information of the object. Amazon S3 returns this header
+ // for all objects except for S3 Standard storage class objects. For more
+ // information, see Storage Classes
+ // (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html).
+ StorageClass types.StorageClass
+
+ // Version of the object.
+ VersionId *string
+
+ // If the bucket is configured as a website, redirects requests for this object to
+ // another object in the same bucket or to an external URL. Amazon S3 stores the
+ // value of this header in the object metadata.
+ WebsiteRedirectLocation *string
+
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+}
+
+func addOperationHeadObjectMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ err = stack.Serialize.Add(&awsRestxml_serializeOpHeadObject{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsRestxml_deserializeOpHeadObject{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
+ return err
+ }
+ if err = addRetryMiddlewares(stack, options); err != nil {
+ return err
+ }
+ if err = addHTTPSignerV4Middleware(stack, options); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addOpHeadObjectValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opHeadObject(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addMetadataRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addHeadObjectUpdateEndpoint(stack, options); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil {
+ return err
+ }
+ if err = disableAcceptEncodingGzip(stack); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ return nil
+}
+
+// HeadObjectAPIClient is a client that implements the HeadObject operation.
+type HeadObjectAPIClient interface {
+ HeadObject(context.Context, *HeadObjectInput, ...func(*Options)) (*HeadObjectOutput, error)
+}
+
+var _ HeadObjectAPIClient = (*Client)(nil)
+
+// ObjectExistsWaiterOptions are waiter options for ObjectExistsWaiter
+type ObjectExistsWaiterOptions struct {
+
+ // Set of options to modify how an operation is invoked. These apply to all
+ // operations invoked for this client. Use functional options on operation call to
+ // modify this list for per operation behavior.
+ APIOptions []func(*middleware.Stack) error
+
+ // MinDelay is the minimum amount of time to delay between retries. If unset,
+ // ObjectExistsWaiter will use default minimum delay of 5 seconds. Note that
+ // MinDelay must resolve to a value lesser than or equal to the MaxDelay.
+ MinDelay time.Duration
+
+ // MaxDelay is the maximum amount of time to delay between retries. If unset or set
+ // to zero, ObjectExistsWaiter will use default max delay of 120 seconds. Note that
+ // MaxDelay must resolve to value greater than or equal to the MinDelay.
+ MaxDelay time.Duration
+
+ // LogWaitAttempts is used to enable logging for waiter retry attempts
+ LogWaitAttempts bool
+
+ // Retryable is function that can be used to override the service defined
+ // waiter-behavior based on operation output, or returned error. This function is
+ // used by the waiter to decide if a state is retryable or a terminal state. By
+ // default service-modeled logic will populate this option. This option can thus be
+ // used to define a custom waiter state with fall-back to service-modeled waiter
+ // state mutators.The function returns an error in case of a failure state. In case
+ // of retry state, this function returns a bool value of true and nil error, while
+ // in case of success it returns a bool value of false and nil error.
+ Retryable func(context.Context, *HeadObjectInput, *HeadObjectOutput, error) (bool, error)
+}
+
+// ObjectExistsWaiter defines the waiters for ObjectExists
+type ObjectExistsWaiter struct {
+ client HeadObjectAPIClient
+
+ options ObjectExistsWaiterOptions
+}
+
+// NewObjectExistsWaiter constructs a ObjectExistsWaiter.
+func NewObjectExistsWaiter(client HeadObjectAPIClient, optFns ...func(*ObjectExistsWaiterOptions)) *ObjectExistsWaiter {
+ options := ObjectExistsWaiterOptions{}
+ options.MinDelay = 5 * time.Second
+ options.MaxDelay = 120 * time.Second
+ options.Retryable = objectExistsStateRetryable
+
+ for _, fn := range optFns {
+ fn(&options)
+ }
+ return &ObjectExistsWaiter{
+ client: client,
+ options: options,
+ }
+}
+
+// Wait calls the waiter function for ObjectExists waiter. The maxWaitDur is the
+// maximum wait duration the waiter will wait. The maxWaitDur is required and must
+// be greater than zero.
+func (w *ObjectExistsWaiter) Wait(ctx context.Context, params *HeadObjectInput, maxWaitDur time.Duration, optFns ...func(*ObjectExistsWaiterOptions)) error {
+ if maxWaitDur <= 0 {
+ return fmt.Errorf("maximum wait time for waiter must be greater than zero")
+ }
+
+ options := w.options
+ for _, fn := range optFns {
+ fn(&options)
+ }
+
+ if options.MaxDelay <= 0 {
+ options.MaxDelay = 120 * time.Second
+ }
+
+ if options.MinDelay > options.MaxDelay {
+ return fmt.Errorf("minimum waiter delay %v must be lesser than or equal to maximum waiter delay of %v.", options.MinDelay, options.MaxDelay)
+ }
+
+ ctx, cancelFn := context.WithTimeout(ctx, maxWaitDur)
+ defer cancelFn()
+
+ logger := smithywaiter.Logger{}
+ remainingTime := maxWaitDur
+
+ var attempt int64
+ for {
+
+ attempt++
+ apiOptions := options.APIOptions
+ start := time.Now()
+
+ if options.LogWaitAttempts {
+ logger.Attempt = attempt
+ apiOptions = append([]func(*middleware.Stack) error{}, options.APIOptions...)
+ apiOptions = append(apiOptions, logger.AddLogger)
+ }
+
+ out, err := w.client.HeadObject(ctx, params, func(o *Options) {
+ o.APIOptions = append(o.APIOptions, apiOptions...)
+ })
+
+ retryable, err := options.Retryable(ctx, params, out, err)
+ if err != nil {
+ return err
+ }
+ if !retryable {
+ return nil
+ }
+
+ remainingTime -= time.Since(start)
+ if remainingTime < options.MinDelay || remainingTime <= 0 {
+ break
+ }
+
+ // compute exponential backoff between waiter retries
+ delay, err := smithywaiter.ComputeDelay(
+ attempt, options.MinDelay, options.MaxDelay, remainingTime,
+ )
+ if err != nil {
+ return fmt.Errorf("error computing waiter delay, %w", err)
+ }
+
+ remainingTime -= delay
+ // sleep for the delay amount before invoking a request
+ if err := smithytime.SleepWithContext(ctx, delay); err != nil {
+ return fmt.Errorf("request cancelled while waiting, %w", err)
+ }
+ }
+ return fmt.Errorf("exceeded max wait time for ObjectExists waiter")
+}
+
+func objectExistsStateRetryable(ctx context.Context, input *HeadObjectInput, output *HeadObjectOutput, err error) (bool, error) {
+
+ if err == nil {
+ return false, nil
+ }
+
+ if err != nil {
+ var apiErr smithy.APIError
+ ok := errors.As(err, &apiErr)
+ if !ok {
+ return false, fmt.Errorf("expected err to be of type smithy.APIError, got %w", err)
+ }
+
+ if "NotFound" == apiErr.ErrorCode() {
+ return true, nil
+ }
+ }
+
+ return true, nil
+}
+
+// ObjectNotExistsWaiterOptions are waiter options for ObjectNotExistsWaiter
+type ObjectNotExistsWaiterOptions struct {
+
+ // Set of options to modify how an operation is invoked. These apply to all
+ // operations invoked for this client. Use functional options on operation call to
+ // modify this list for per operation behavior.
+ APIOptions []func(*middleware.Stack) error
+
+ // MinDelay is the minimum amount of time to delay between retries. If unset,
+ // ObjectNotExistsWaiter will use default minimum delay of 5 seconds. Note that
+ // MinDelay must resolve to a value lesser than or equal to the MaxDelay.
+ MinDelay time.Duration
+
+ // MaxDelay is the maximum amount of time to delay between retries. If unset or set
+ // to zero, ObjectNotExistsWaiter will use default max delay of 120 seconds. Note
+ // that MaxDelay must resolve to value greater than or equal to the MinDelay.
+ MaxDelay time.Duration
+
+ // LogWaitAttempts is used to enable logging for waiter retry attempts
+ LogWaitAttempts bool
+
+ // Retryable is function that can be used to override the service defined
+ // waiter-behavior based on operation output, or returned error. This function is
+ // used by the waiter to decide if a state is retryable or a terminal state. By
+ // default service-modeled logic will populate this option. This option can thus be
+ // used to define a custom waiter state with fall-back to service-modeled waiter
+ // state mutators.The function returns an error in case of a failure state. In case
+ // of retry state, this function returns a bool value of true and nil error, while
+ // in case of success it returns a bool value of false and nil error.
+ Retryable func(context.Context, *HeadObjectInput, *HeadObjectOutput, error) (bool, error)
+}
+
+// ObjectNotExistsWaiter defines the waiters for ObjectNotExists
+type ObjectNotExistsWaiter struct {
+ client HeadObjectAPIClient
+
+ options ObjectNotExistsWaiterOptions
+}
+
+// NewObjectNotExistsWaiter constructs a ObjectNotExistsWaiter.
+func NewObjectNotExistsWaiter(client HeadObjectAPIClient, optFns ...func(*ObjectNotExistsWaiterOptions)) *ObjectNotExistsWaiter {
+ options := ObjectNotExistsWaiterOptions{}
+ options.MinDelay = 5 * time.Second
+ options.MaxDelay = 120 * time.Second
+ options.Retryable = objectNotExistsStateRetryable
+
+ for _, fn := range optFns {
+ fn(&options)
+ }
+ return &ObjectNotExistsWaiter{
+ client: client,
+ options: options,
+ }
+}
+
+// Wait calls the waiter function for ObjectNotExists waiter. The maxWaitDur is the
+// maximum wait duration the waiter will wait. The maxWaitDur is required and must
+// be greater than zero.
+func (w *ObjectNotExistsWaiter) Wait(ctx context.Context, params *HeadObjectInput, maxWaitDur time.Duration, optFns ...func(*ObjectNotExistsWaiterOptions)) error {
+ if maxWaitDur <= 0 {
+ return fmt.Errorf("maximum wait time for waiter must be greater than zero")
+ }
+
+ options := w.options
+ for _, fn := range optFns {
+ fn(&options)
+ }
+
+ if options.MaxDelay <= 0 {
+ options.MaxDelay = 120 * time.Second
+ }
+
+ if options.MinDelay > options.MaxDelay {
+ return fmt.Errorf("minimum waiter delay %v must be lesser than or equal to maximum waiter delay of %v.", options.MinDelay, options.MaxDelay)
+ }
+
+ ctx, cancelFn := context.WithTimeout(ctx, maxWaitDur)
+ defer cancelFn()
+
+ logger := smithywaiter.Logger{}
+ remainingTime := maxWaitDur
+
+ var attempt int64
+ for {
+
+ attempt++
+ apiOptions := options.APIOptions
+ start := time.Now()
+
+ if options.LogWaitAttempts {
+ logger.Attempt = attempt
+ apiOptions = append([]func(*middleware.Stack) error{}, options.APIOptions...)
+ apiOptions = append(apiOptions, logger.AddLogger)
+ }
+
+ out, err := w.client.HeadObject(ctx, params, func(o *Options) {
+ o.APIOptions = append(o.APIOptions, apiOptions...)
+ })
+
+ retryable, err := options.Retryable(ctx, params, out, err)
+ if err != nil {
+ return err
+ }
+ if !retryable {
+ return nil
+ }
+
+ remainingTime -= time.Since(start)
+ if remainingTime < options.MinDelay || remainingTime <= 0 {
+ break
+ }
+
+ // compute exponential backoff between waiter retries
+ delay, err := smithywaiter.ComputeDelay(
+ attempt, options.MinDelay, options.MaxDelay, remainingTime,
+ )
+ if err != nil {
+ return fmt.Errorf("error computing waiter delay, %w", err)
+ }
+
+ remainingTime -= delay
+ // sleep for the delay amount before invoking a request
+ if err := smithytime.SleepWithContext(ctx, delay); err != nil {
+ return fmt.Errorf("request cancelled while waiting, %w", err)
+ }
+ }
+ return fmt.Errorf("exceeded max wait time for ObjectNotExists waiter")
+}
+
+func objectNotExistsStateRetryable(ctx context.Context, input *HeadObjectInput, output *HeadObjectOutput, err error) (bool, error) {
+
+ if err != nil {
+ var apiErr smithy.APIError
+ ok := errors.As(err, &apiErr)
+ if !ok {
+ return false, fmt.Errorf("expected err to be of type smithy.APIError, got %w", err)
+ }
+
+ if "NotFound" == apiErr.ErrorCode() {
+ return false, nil
+ }
+ }
+
+ return true, nil
+}
+
+func newServiceMetadataMiddleware_opHeadObject(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ SigningName: "s3",
+ OperationName: "HeadObject",
+ }
+}
+
+// getHeadObjectBucketMember returns a pointer to string denoting a provided bucket
+// member valueand a boolean indicating if the input has a modeled bucket name,
+func getHeadObjectBucketMember(input interface{}) (*string, bool) {
+ in := input.(*HeadObjectInput)
+ if in.Bucket == nil {
+ return nil, false
+ }
+ return in.Bucket, true
+}
+func addHeadObjectUpdateEndpoint(stack *middleware.Stack, options Options) error {
+ return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{
+ Accessor: s3cust.UpdateEndpointParameterAccessor{
+ GetBucketFromInput: getHeadObjectBucketMember,
+ },
+ UsePathStyle: options.UsePathStyle,
+ UseAccelerate: options.UseAccelerate,
+ SupportsAccelerate: true,
+ EndpointResolver: options.EndpointResolver,
+ EndpointResolverOptions: options.EndpointOptions,
+ UseDualstack: options.UseDualstack,
+ UseARNRegion: options.UseARNRegion,
+ })
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListBucketAnalyticsConfigurations.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListBucketAnalyticsConfigurations.go
new file mode 100644
index 000000000..68257520c
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListBucketAnalyticsConfigurations.go
@@ -0,0 +1,205 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package s3
+
+import (
+ "context"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
+ s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations"
+ "github.com/aws/aws-sdk-go-v2/service/s3/types"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// Lists the analytics configurations for the bucket. You can have up to 1,000
+// analytics configurations per bucket. This operation supports list pagination and
+// does not return more than 100 configurations at a time. You should always check
+// the IsTruncated element in the response. If there are no more configurations to
+// list, IsTruncated is set to false. If there are more configurations to list,
+// IsTruncated is set to true, and there will be a value in NextContinuationToken.
+// You use the NextContinuationToken value to continue the pagination of the list
+// by passing the value in continuation-token in the request to GET the next page.
+// To use this operation, you must have permissions to perform the
+// s3:GetAnalyticsConfiguration action. The bucket owner has this permission by
+// default. The bucket owner can grant this permission to others. For more
+// information about permissions, see Permissions Related to Bucket Subresource
+// Operations
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources)
+// and Managing Access Permissions to Your Amazon S3 Resources
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). For
+// information about Amazon S3 analytics feature, see Amazon S3 Analytics – Storage
+// Class Analysis
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/analytics-storage-class.html).
+// The following operations are related to ListBucketAnalyticsConfigurations:
+//
+// *
+// GetBucketAnalyticsConfiguration
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketAnalyticsConfiguration.html)
+//
+// *
+// DeleteBucketAnalyticsConfiguration
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketAnalyticsConfiguration.html)
+//
+// *
+// PutBucketAnalyticsConfiguration
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketAnalyticsConfiguration.html)
+func (c *Client) ListBucketAnalyticsConfigurations(ctx context.Context, params *ListBucketAnalyticsConfigurationsInput, optFns ...func(*Options)) (*ListBucketAnalyticsConfigurationsOutput, error) {
+ if params == nil {
+ params = &ListBucketAnalyticsConfigurationsInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "ListBucketAnalyticsConfigurations", params, optFns, addOperationListBucketAnalyticsConfigurationsMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*ListBucketAnalyticsConfigurationsOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type ListBucketAnalyticsConfigurationsInput struct {
+
+ // The name of the bucket from which analytics configurations are retrieved.
+ //
+ // This member is required.
+ Bucket *string
+
+ // The ContinuationToken that represents a placeholder from where this request
+ // should begin.
+ ContinuationToken *string
+
+ // The account id of the expected bucket owner. If the bucket is owned by a
+ // different account, the request will fail with an HTTP 403 (Access Denied) error.
+ ExpectedBucketOwner *string
+}
+
+type ListBucketAnalyticsConfigurationsOutput struct {
+
+ // The list of analytics configurations for a bucket.
+ AnalyticsConfigurationList []types.AnalyticsConfiguration
+
+ // The marker that is used as a starting point for this analytics configuration
+ // list response. This value is present if it was sent in the request.
+ ContinuationToken *string
+
+ // Indicates whether the returned list of analytics configurations is complete. A
+ // value of true indicates that the list is not complete and the
+ // NextContinuationToken will be provided for a subsequent request.
+ IsTruncated bool
+
+ // NextContinuationToken is sent when isTruncated is true, which indicates that
+ // there are more analytics configurations to list. The next request must include
+ // this NextContinuationToken. The token is obfuscated and is not a usable value.
+ NextContinuationToken *string
+
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+}
+
+func addOperationListBucketAnalyticsConfigurationsMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ err = stack.Serialize.Add(&awsRestxml_serializeOpListBucketAnalyticsConfigurations{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsRestxml_deserializeOpListBucketAnalyticsConfigurations{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
+ return err
+ }
+ if err = addRetryMiddlewares(stack, options); err != nil {
+ return err
+ }
+ if err = addHTTPSignerV4Middleware(stack, options); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addOpListBucketAnalyticsConfigurationsValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListBucketAnalyticsConfigurations(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addMetadataRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addListBucketAnalyticsConfigurationsUpdateEndpoint(stack, options); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil {
+ return err
+ }
+ if err = disableAcceptEncodingGzip(stack); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ return nil
+}
+
+func newServiceMetadataMiddleware_opListBucketAnalyticsConfigurations(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ SigningName: "s3",
+ OperationName: "ListBucketAnalyticsConfigurations",
+ }
+}
+
+// getListBucketAnalyticsConfigurationsBucketMember returns a pointer to string
+// denoting a provided bucket member valueand a boolean indicating if the input has
+// a modeled bucket name,
+func getListBucketAnalyticsConfigurationsBucketMember(input interface{}) (*string, bool) {
+ in := input.(*ListBucketAnalyticsConfigurationsInput)
+ if in.Bucket == nil {
+ return nil, false
+ }
+ return in.Bucket, true
+}
+func addListBucketAnalyticsConfigurationsUpdateEndpoint(stack *middleware.Stack, options Options) error {
+ return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{
+ Accessor: s3cust.UpdateEndpointParameterAccessor{
+ GetBucketFromInput: getListBucketAnalyticsConfigurationsBucketMember,
+ },
+ UsePathStyle: options.UsePathStyle,
+ UseAccelerate: options.UseAccelerate,
+ SupportsAccelerate: true,
+ EndpointResolver: options.EndpointResolver,
+ EndpointResolverOptions: options.EndpointOptions,
+ UseDualstack: options.UseDualstack,
+ UseARNRegion: options.UseARNRegion,
+ })
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListBucketIntelligentTieringConfigurations.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListBucketIntelligentTieringConfigurations.go
new file mode 100644
index 000000000..1d311ab7a
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListBucketIntelligentTieringConfigurations.go
@@ -0,0 +1,197 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package s3
+
+import (
+ "context"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
+ s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations"
+ "github.com/aws/aws-sdk-go-v2/service/s3/types"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// Lists the S3 Intelligent-Tiering configuration from the specified bucket. The S3
+// Intelligent-Tiering storage class is designed to optimize storage costs by
+// automatically moving data to the most cost-effective storage access tier,
+// without additional operational overhead. S3 Intelligent-Tiering delivers
+// automatic cost savings by moving data between access tiers, when access patterns
+// change. The S3 Intelligent-Tiering storage class is suitable for objects larger
+// than 128 KB that you plan to store for at least 30 days. If the size of an
+// object is less than 128 KB, it is not eligible for auto-tiering. Smaller objects
+// can be stored, but they are always charged at the frequent access tier rates in
+// the S3 Intelligent-Tiering storage class. If you delete an object before the end
+// of the 30-day minimum storage duration period, you are charged for 30 days. For
+// more information, see Storage class for automatically optimizing frequently and
+// infrequently accessed objects
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html#sc-dynamic-data-access).
+// Operations related to ListBucketIntelligentTieringConfigurations include:
+//
+// *
+// DeleteBucketIntelligentTieringConfiguration
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketIntelligentTieringConfiguration.html)
+//
+// *
+// PutBucketIntelligentTieringConfiguration
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketIntelligentTieringConfiguration.html)
+//
+// *
+// GetBucketIntelligentTieringConfiguration
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketIntelligentTieringConfiguration.html)
+func (c *Client) ListBucketIntelligentTieringConfigurations(ctx context.Context, params *ListBucketIntelligentTieringConfigurationsInput, optFns ...func(*Options)) (*ListBucketIntelligentTieringConfigurationsOutput, error) {
+ if params == nil {
+ params = &ListBucketIntelligentTieringConfigurationsInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "ListBucketIntelligentTieringConfigurations", params, optFns, addOperationListBucketIntelligentTieringConfigurationsMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*ListBucketIntelligentTieringConfigurationsOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type ListBucketIntelligentTieringConfigurationsInput struct {
+
+ // The name of the Amazon S3 bucket whose configuration you want to modify or
+ // retrieve.
+ //
+ // This member is required.
+ Bucket *string
+
+ // The ContinuationToken that represents a placeholder from where this request
+ // should begin.
+ ContinuationToken *string
+}
+
+type ListBucketIntelligentTieringConfigurationsOutput struct {
+
+ // The ContinuationToken that represents a placeholder from where this request
+ // should begin.
+ ContinuationToken *string
+
+ // The list of S3 Intelligent-Tiering configurations for a bucket.
+ IntelligentTieringConfigurationList []types.IntelligentTieringConfiguration
+
+ // Indicates whether the returned list of analytics configurations is complete. A
+ // value of true indicates that the list is not complete and the
+ // NextContinuationToken will be provided for a subsequent request.
+ IsTruncated bool
+
+ // The marker used to continue this inventory configuration listing. Use the
+ // NextContinuationToken from this response to continue the listing in a subsequent
+ // request. The continuation token is an opaque value that Amazon S3 understands.
+ NextContinuationToken *string
+
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+}
+
+func addOperationListBucketIntelligentTieringConfigurationsMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ err = stack.Serialize.Add(&awsRestxml_serializeOpListBucketIntelligentTieringConfigurations{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsRestxml_deserializeOpListBucketIntelligentTieringConfigurations{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
+ return err
+ }
+ if err = addRetryMiddlewares(stack, options); err != nil {
+ return err
+ }
+ if err = addHTTPSignerV4Middleware(stack, options); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addOpListBucketIntelligentTieringConfigurationsValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListBucketIntelligentTieringConfigurations(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addMetadataRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addListBucketIntelligentTieringConfigurationsUpdateEndpoint(stack, options); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil {
+ return err
+ }
+ if err = disableAcceptEncodingGzip(stack); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ return nil
+}
+
+func newServiceMetadataMiddleware_opListBucketIntelligentTieringConfigurations(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ SigningName: "s3",
+ OperationName: "ListBucketIntelligentTieringConfigurations",
+ }
+}
+
+// getListBucketIntelligentTieringConfigurationsBucketMember returns a pointer to
+// string denoting a provided bucket member valueand a boolean indicating if the
+// input has a modeled bucket name,
+func getListBucketIntelligentTieringConfigurationsBucketMember(input interface{}) (*string, bool) {
+ in := input.(*ListBucketIntelligentTieringConfigurationsInput)
+ if in.Bucket == nil {
+ return nil, false
+ }
+ return in.Bucket, true
+}
+func addListBucketIntelligentTieringConfigurationsUpdateEndpoint(stack *middleware.Stack, options Options) error {
+ return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{
+ Accessor: s3cust.UpdateEndpointParameterAccessor{
+ GetBucketFromInput: getListBucketIntelligentTieringConfigurationsBucketMember,
+ },
+ UsePathStyle: options.UsePathStyle,
+ UseAccelerate: options.UseAccelerate,
+ SupportsAccelerate: true,
+ EndpointResolver: options.EndpointResolver,
+ EndpointResolverOptions: options.EndpointOptions,
+ UseDualstack: options.UseDualstack,
+ UseARNRegion: options.UseARNRegion,
+ })
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListBucketInventoryConfigurations.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListBucketInventoryConfigurations.go
new file mode 100644
index 000000000..0260b091a
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListBucketInventoryConfigurations.go
@@ -0,0 +1,206 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package s3
+
+import (
+ "context"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
+ s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations"
+ "github.com/aws/aws-sdk-go-v2/service/s3/types"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// Returns a list of inventory configurations for the bucket. You can have up to
+// 1,000 analytics configurations per bucket. This operation supports list
+// pagination and does not return more than 100 configurations at a time. Always
+// check the IsTruncated element in the response. If there are no more
+// configurations to list, IsTruncated is set to false. If there are more
+// configurations to list, IsTruncated is set to true, and there is a value in
+// NextContinuationToken. You use the NextContinuationToken value to continue the
+// pagination of the list by passing the value in continuation-token in the request
+// to GET the next page. To use this operation, you must have permissions to
+// perform the s3:GetInventoryConfiguration action. The bucket owner has this
+// permission by default. The bucket owner can grant this permission to others. For
+// more information about permissions, see Permissions Related to Bucket
+// Subresource Operations
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources)
+// and Managing Access Permissions to Your Amazon S3 Resources
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). For
+// information about the Amazon S3 inventory feature, see Amazon S3 Inventory
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-inventory.html) The
+// following operations are related to ListBucketInventoryConfigurations:
+//
+// *
+// GetBucketInventoryConfiguration
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketInventoryConfiguration.html)
+//
+// *
+// DeleteBucketInventoryConfiguration
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketInventoryConfiguration.html)
+//
+// *
+// PutBucketInventoryConfiguration
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketInventoryConfiguration.html)
+func (c *Client) ListBucketInventoryConfigurations(ctx context.Context, params *ListBucketInventoryConfigurationsInput, optFns ...func(*Options)) (*ListBucketInventoryConfigurationsOutput, error) {
+ if params == nil {
+ params = &ListBucketInventoryConfigurationsInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "ListBucketInventoryConfigurations", params, optFns, addOperationListBucketInventoryConfigurationsMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*ListBucketInventoryConfigurationsOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type ListBucketInventoryConfigurationsInput struct {
+
+ // The name of the bucket containing the inventory configurations to retrieve.
+ //
+ // This member is required.
+ Bucket *string
+
+ // The marker used to continue an inventory configuration listing that has been
+ // truncated. Use the NextContinuationToken from a previously truncated list
+ // response to continue the listing. The continuation token is an opaque value that
+ // Amazon S3 understands.
+ ContinuationToken *string
+
+ // The account id of the expected bucket owner. If the bucket is owned by a
+ // different account, the request will fail with an HTTP 403 (Access Denied) error.
+ ExpectedBucketOwner *string
+}
+
+type ListBucketInventoryConfigurationsOutput struct {
+
+ // If sent in the request, the marker that is used as a starting point for this
+ // inventory configuration list response.
+ ContinuationToken *string
+
+ // The list of inventory configurations for a bucket.
+ InventoryConfigurationList []types.InventoryConfiguration
+
+ // Tells whether the returned list of inventory configurations is complete. A value
+ // of true indicates that the list is not complete and the NextContinuationToken is
+ // provided for a subsequent request.
+ IsTruncated bool
+
+ // The marker used to continue this inventory configuration listing. Use the
+ // NextContinuationToken from this response to continue the listing in a subsequent
+ // request. The continuation token is an opaque value that Amazon S3 understands.
+ NextContinuationToken *string
+
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+}
+
+func addOperationListBucketInventoryConfigurationsMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ err = stack.Serialize.Add(&awsRestxml_serializeOpListBucketInventoryConfigurations{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsRestxml_deserializeOpListBucketInventoryConfigurations{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
+ return err
+ }
+ if err = addRetryMiddlewares(stack, options); err != nil {
+ return err
+ }
+ if err = addHTTPSignerV4Middleware(stack, options); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addOpListBucketInventoryConfigurationsValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListBucketInventoryConfigurations(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addMetadataRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addListBucketInventoryConfigurationsUpdateEndpoint(stack, options); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil {
+ return err
+ }
+ if err = disableAcceptEncodingGzip(stack); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ return nil
+}
+
+func newServiceMetadataMiddleware_opListBucketInventoryConfigurations(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ SigningName: "s3",
+ OperationName: "ListBucketInventoryConfigurations",
+ }
+}
+
+// getListBucketInventoryConfigurationsBucketMember returns a pointer to string
+// denoting a provided bucket member valueand a boolean indicating if the input has
+// a modeled bucket name,
+func getListBucketInventoryConfigurationsBucketMember(input interface{}) (*string, bool) {
+ in := input.(*ListBucketInventoryConfigurationsInput)
+ if in.Bucket == nil {
+ return nil, false
+ }
+ return in.Bucket, true
+}
+func addListBucketInventoryConfigurationsUpdateEndpoint(stack *middleware.Stack, options Options) error {
+ return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{
+ Accessor: s3cust.UpdateEndpointParameterAccessor{
+ GetBucketFromInput: getListBucketInventoryConfigurationsBucketMember,
+ },
+ UsePathStyle: options.UsePathStyle,
+ UseAccelerate: options.UseAccelerate,
+ SupportsAccelerate: true,
+ EndpointResolver: options.EndpointResolver,
+ EndpointResolverOptions: options.EndpointOptions,
+ UseDualstack: options.UseDualstack,
+ UseARNRegion: options.UseARNRegion,
+ })
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListBucketMetricsConfigurations.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListBucketMetricsConfigurations.go
new file mode 100644
index 000000000..57927ed5a
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListBucketMetricsConfigurations.go
@@ -0,0 +1,209 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package s3
+
+import (
+ "context"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
+ s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations"
+ "github.com/aws/aws-sdk-go-v2/service/s3/types"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// Lists the metrics configurations for the bucket. The metrics configurations are
+// only for the request metrics of the bucket and do not provide information on
+// daily storage metrics. You can have up to 1,000 configurations per bucket. This
+// operation supports list pagination and does not return more than 100
+// configurations at a time. Always check the IsTruncated element in the response.
+// If there are no more configurations to list, IsTruncated is set to false. If
+// there are more configurations to list, IsTruncated is set to true, and there is
+// a value in NextContinuationToken. You use the NextContinuationToken value to
+// continue the pagination of the list by passing the value in continuation-token
+// in the request to GET the next page. To use this operation, you must have
+// permissions to perform the s3:GetMetricsConfiguration action. The bucket owner
+// has this permission by default. The bucket owner can grant this permission to
+// others. For more information about permissions, see Permissions Related to
+// Bucket Subresource Operations
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources)
+// and Managing Access Permissions to Your Amazon S3 Resources
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). For
+// more information about metrics configurations and CloudWatch request metrics,
+// see Monitoring Metrics with Amazon CloudWatch
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/cloudwatch-monitoring.html).
+// The following operations are related to ListBucketMetricsConfigurations:
+//
+// *
+// PutBucketMetricsConfiguration
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketMetricsConfiguration.html)
+//
+// *
+// GetBucketMetricsConfiguration
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketMetricsConfiguration.html)
+//
+// *
+// DeleteBucketMetricsConfiguration
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketMetricsConfiguration.html)
+func (c *Client) ListBucketMetricsConfigurations(ctx context.Context, params *ListBucketMetricsConfigurationsInput, optFns ...func(*Options)) (*ListBucketMetricsConfigurationsOutput, error) {
+ if params == nil {
+ params = &ListBucketMetricsConfigurationsInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "ListBucketMetricsConfigurations", params, optFns, addOperationListBucketMetricsConfigurationsMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*ListBucketMetricsConfigurationsOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type ListBucketMetricsConfigurationsInput struct {
+
+ // The name of the bucket containing the metrics configurations to retrieve.
+ //
+ // This member is required.
+ Bucket *string
+
+ // The marker that is used to continue a metrics configuration listing that has
+ // been truncated. Use the NextContinuationToken from a previously truncated list
+ // response to continue the listing. The continuation token is an opaque value that
+ // Amazon S3 understands.
+ ContinuationToken *string
+
+ // The account id of the expected bucket owner. If the bucket is owned by a
+ // different account, the request will fail with an HTTP 403 (Access Denied) error.
+ ExpectedBucketOwner *string
+}
+
+type ListBucketMetricsConfigurationsOutput struct {
+
+ // The marker that is used as a starting point for this metrics configuration list
+ // response. This value is present if it was sent in the request.
+ ContinuationToken *string
+
+ // Indicates whether the returned list of metrics configurations is complete. A
+ // value of true indicates that the list is not complete and the
+ // NextContinuationToken will be provided for a subsequent request.
+ IsTruncated bool
+
+ // The list of metrics configurations for a bucket.
+ MetricsConfigurationList []types.MetricsConfiguration
+
+ // The marker used to continue a metrics configuration listing that has been
+ // truncated. Use the NextContinuationToken from a previously truncated list
+ // response to continue the listing. The continuation token is an opaque value that
+ // Amazon S3 understands.
+ NextContinuationToken *string
+
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+}
+
+func addOperationListBucketMetricsConfigurationsMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ err = stack.Serialize.Add(&awsRestxml_serializeOpListBucketMetricsConfigurations{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsRestxml_deserializeOpListBucketMetricsConfigurations{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
+ return err
+ }
+ if err = addRetryMiddlewares(stack, options); err != nil {
+ return err
+ }
+ if err = addHTTPSignerV4Middleware(stack, options); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addOpListBucketMetricsConfigurationsValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListBucketMetricsConfigurations(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addMetadataRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addListBucketMetricsConfigurationsUpdateEndpoint(stack, options); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil {
+ return err
+ }
+ if err = disableAcceptEncodingGzip(stack); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ return nil
+}
+
+func newServiceMetadataMiddleware_opListBucketMetricsConfigurations(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ SigningName: "s3",
+ OperationName: "ListBucketMetricsConfigurations",
+ }
+}
+
+// getListBucketMetricsConfigurationsBucketMember returns a pointer to string
+// denoting a provided bucket member valueand a boolean indicating if the input has
+// a modeled bucket name,
+func getListBucketMetricsConfigurationsBucketMember(input interface{}) (*string, bool) {
+ in := input.(*ListBucketMetricsConfigurationsInput)
+ if in.Bucket == nil {
+ return nil, false
+ }
+ return in.Bucket, true
+}
+func addListBucketMetricsConfigurationsUpdateEndpoint(stack *middleware.Stack, options Options) error {
+ return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{
+ Accessor: s3cust.UpdateEndpointParameterAccessor{
+ GetBucketFromInput: getListBucketMetricsConfigurationsBucketMember,
+ },
+ UsePathStyle: options.UsePathStyle,
+ UseAccelerate: options.UseAccelerate,
+ SupportsAccelerate: true,
+ EndpointResolver: options.EndpointResolver,
+ EndpointResolverOptions: options.EndpointOptions,
+ UseDualstack: options.UseDualstack,
+ UseARNRegion: options.UseARNRegion,
+ })
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListBuckets.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListBuckets.go
new file mode 100644
index 000000000..127dc70ee
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListBuckets.go
@@ -0,0 +1,137 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package s3
+
+import (
+ "context"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
+ s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations"
+ "github.com/aws/aws-sdk-go-v2/service/s3/types"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// Returns a list of all buckets owned by the authenticated sender of the request.
+func (c *Client) ListBuckets(ctx context.Context, params *ListBucketsInput, optFns ...func(*Options)) (*ListBucketsOutput, error) {
+ if params == nil {
+ params = &ListBucketsInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "ListBuckets", params, optFns, addOperationListBucketsMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*ListBucketsOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type ListBucketsInput struct {
+}
+
+type ListBucketsOutput struct {
+
+ // The list of buckets owned by the requestor.
+ Buckets []types.Bucket
+
+ // The owner of the buckets listed.
+ Owner *types.Owner
+
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+}
+
+func addOperationListBucketsMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ err = stack.Serialize.Add(&awsRestxml_serializeOpListBuckets{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsRestxml_deserializeOpListBuckets{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
+ return err
+ }
+ if err = addRetryMiddlewares(stack, options); err != nil {
+ return err
+ }
+ if err = addHTTPSignerV4Middleware(stack, options); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListBuckets(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addMetadataRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addListBucketsUpdateEndpoint(stack, options); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil {
+ return err
+ }
+ if err = disableAcceptEncodingGzip(stack); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ return nil
+}
+
+func newServiceMetadataMiddleware_opListBuckets(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ SigningName: "s3",
+ OperationName: "ListBuckets",
+ }
+}
+
+func addListBucketsUpdateEndpoint(stack *middleware.Stack, options Options) error {
+ return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{
+ Accessor: s3cust.UpdateEndpointParameterAccessor{
+ GetBucketFromInput: nopGetBucketAccessor,
+ },
+ UsePathStyle: options.UsePathStyle,
+ UseAccelerate: options.UseAccelerate,
+ SupportsAccelerate: false,
+ EndpointResolver: options.EndpointResolver,
+ EndpointResolverOptions: options.EndpointOptions,
+ UseDualstack: options.UseDualstack,
+ UseARNRegion: options.UseARNRegion,
+ })
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListMultipartUploads.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListMultipartUploads.go
new file mode 100644
index 000000000..1ed082f82
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListMultipartUploads.go
@@ -0,0 +1,300 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package s3
+
+import (
+ "context"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
+ s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations"
+ "github.com/aws/aws-sdk-go-v2/service/s3/types"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// This operation lists in-progress multipart uploads. An in-progress multipart
+// upload is a multipart upload that has been initiated using the Initiate
+// Multipart Upload request, but has not yet been completed or aborted. This
+// operation returns at most 1,000 multipart uploads in the response. 1,000
+// multipart uploads is the maximum number of uploads a response can include, which
+// is also the default value. You can further limit the number of uploads in a
+// response by specifying the max-uploads parameter in the response. If additional
+// multipart uploads satisfy the list criteria, the response will contain an
+// IsTruncated element with the value true. To list the additional multipart
+// uploads, use the key-marker and upload-id-marker request parameters. In the
+// response, the uploads are sorted by key. If your application has initiated more
+// than one multipart upload using the same object key, then uploads in the
+// response are first sorted by key. Additionally, uploads are sorted in ascending
+// order within each key by the upload initiation time. For more information on
+// multipart uploads, see Uploading Objects Using Multipart Upload
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html). For
+// information on permissions required to use the multipart upload API, see
+// Multipart Upload API and Permissions
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html). The
+// following operations are related to ListMultipartUploads:
+//
+// *
+// CreateMultipartUpload
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html)
+//
+// *
+// UploadPart
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html)
+//
+// *
+// CompleteMultipartUpload
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html)
+//
+// *
+// ListParts
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html)
+//
+// *
+// AbortMultipartUpload
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html)
+func (c *Client) ListMultipartUploads(ctx context.Context, params *ListMultipartUploadsInput, optFns ...func(*Options)) (*ListMultipartUploadsOutput, error) {
+ if params == nil {
+ params = &ListMultipartUploadsInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "ListMultipartUploads", params, optFns, addOperationListMultipartUploadsMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*ListMultipartUploadsOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type ListMultipartUploadsInput struct {
+
+ // The name of the bucket to which the multipart upload was initiated. When using
+ // this API with an access point, you must direct requests to the access point
+ // hostname. The access point hostname takes the form
+ // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this
+ // operation with an access point through the AWS SDKs, you provide the access
+ // point ARN in place of the bucket name. For more information about access point
+ // ARNs, see Using Access Points
+ // (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) in
+ // the Amazon Simple Storage Service Developer Guide. When using this API with
+ // Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname.
+ // The S3 on Outposts hostname takes the form
+ // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using
+ // this operation using S3 on Outposts through the AWS SDKs, you provide the
+ // Outposts bucket ARN in place of the bucket name. For more information about S3
+ // on Outposts ARNs, see Using S3 on Outposts
+ // (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html) in the
+ // Amazon Simple Storage Service Developer Guide.
+ //
+ // This member is required.
+ Bucket *string
+
+ // Character you use to group keys. All keys that contain the same string between
+ // the prefix, if specified, and the first occurrence of the delimiter after the
+ // prefix are grouped under a single result element, CommonPrefixes. If you don't
+ // specify the prefix parameter, then the substring starts at the beginning of the
+ // key. The keys that are grouped under CommonPrefixes result element are not
+ // returned elsewhere in the response.
+ Delimiter *string
+
+ // Requests Amazon S3 to encode the object keys in the response and specifies the
+ // encoding method to use. An object key may contain any Unicode character;
+ // however, XML 1.0 parser cannot parse some characters, such as characters with an
+ // ASCII value from 0 to 10. For characters that are not supported in XML 1.0, you
+ // can add this parameter to request that Amazon S3 encode the keys in the
+ // response.
+ EncodingType types.EncodingType
+
+ // The account id of the expected bucket owner. If the bucket is owned by a
+ // different account, the request will fail with an HTTP 403 (Access Denied) error.
+ ExpectedBucketOwner *string
+
+ // Together with upload-id-marker, this parameter specifies the multipart upload
+ // after which listing should begin. If upload-id-marker is not specified, only the
+ // keys lexicographically greater than the specified key-marker will be included in
+ // the list. If upload-id-marker is specified, any multipart uploads for a key
+ // equal to the key-marker might also be included, provided those multipart uploads
+ // have upload IDs lexicographically greater than the specified upload-id-marker.
+ KeyMarker *string
+
+ // Sets the maximum number of multipart uploads, from 1 to 1,000, to return in the
+ // response body. 1,000 is the maximum number of uploads that can be returned in a
+ // response.
+ MaxUploads int32
+
+ // Lists in-progress uploads only for those keys that begin with the specified
+ // prefix. You can use prefixes to separate a bucket into different grouping of
+ // keys. (You can think of using prefix to make groups in the same way you'd use a
+ // folder in a file system.)
+ Prefix *string
+
+ // Together with key-marker, specifies the multipart upload after which listing
+ // should begin. If key-marker is not specified, the upload-id-marker parameter is
+ // ignored. Otherwise, any multipart uploads for a key equal to the key-marker
+ // might be included in the list only if they have an upload ID lexicographically
+ // greater than the specified upload-id-marker.
+ UploadIdMarker *string
+}
+
+type ListMultipartUploadsOutput struct {
+
+ // The name of the bucket to which the multipart upload was initiated.
+ Bucket *string
+
+ // If you specify a delimiter in the request, then the result returns each distinct
+ // key prefix containing the delimiter in a CommonPrefixes element. The distinct
+ // key prefixes are returned in the Prefix child element.
+ CommonPrefixes []types.CommonPrefix
+
+ // Contains the delimiter you specified in the request. If you don't specify a
+ // delimiter in your request, this element is absent from the response.
+ Delimiter *string
+
+ // Encoding type used by Amazon S3 to encode object keys in the response. If you
+ // specify encoding-type request parameter, Amazon S3 includes this element in the
+ // response, and returns encoded key name values in the following response
+ // elements: Delimiter, KeyMarker, Prefix, NextKeyMarker, Key.
+ EncodingType types.EncodingType
+
+ // Indicates whether the returned list of multipart uploads is truncated. A value
+ // of true indicates that the list was truncated. The list can be truncated if the
+ // number of multipart uploads exceeds the limit allowed or specified by max
+ // uploads.
+ IsTruncated bool
+
+ // The key at or after which the listing began.
+ KeyMarker *string
+
+ // Maximum number of multipart uploads that could have been included in the
+ // response.
+ MaxUploads int32
+
+ // When a list is truncated, this element specifies the value that should be used
+ // for the key-marker request parameter in a subsequent request.
+ NextKeyMarker *string
+
+ // When a list is truncated, this element specifies the value that should be used
+ // for the upload-id-marker request parameter in a subsequent request.
+ NextUploadIdMarker *string
+
+ // When a prefix is provided in the request, this field contains the specified
+ // prefix. The result contains only keys starting with the specified prefix.
+ Prefix *string
+
+ // Upload ID after which listing began.
+ UploadIdMarker *string
+
+ // Container for elements related to a particular multipart upload. A response can
+ // contain zero or more Upload elements.
+ Uploads []types.MultipartUpload
+
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+}
+
+func addOperationListMultipartUploadsMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ err = stack.Serialize.Add(&awsRestxml_serializeOpListMultipartUploads{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsRestxml_deserializeOpListMultipartUploads{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
+ return err
+ }
+ if err = addRetryMiddlewares(stack, options); err != nil {
+ return err
+ }
+ if err = addHTTPSignerV4Middleware(stack, options); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addOpListMultipartUploadsValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListMultipartUploads(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addMetadataRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addListMultipartUploadsUpdateEndpoint(stack, options); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil {
+ return err
+ }
+ if err = disableAcceptEncodingGzip(stack); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ return nil
+}
+
+func newServiceMetadataMiddleware_opListMultipartUploads(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ SigningName: "s3",
+ OperationName: "ListMultipartUploads",
+ }
+}
+
+// getListMultipartUploadsBucketMember returns a pointer to string denoting a
+// provided bucket member valueand a boolean indicating if the input has a modeled
+// bucket name,
+func getListMultipartUploadsBucketMember(input interface{}) (*string, bool) {
+ in := input.(*ListMultipartUploadsInput)
+ if in.Bucket == nil {
+ return nil, false
+ }
+ return in.Bucket, true
+}
+func addListMultipartUploadsUpdateEndpoint(stack *middleware.Stack, options Options) error {
+ return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{
+ Accessor: s3cust.UpdateEndpointParameterAccessor{
+ GetBucketFromInput: getListMultipartUploadsBucketMember,
+ },
+ UsePathStyle: options.UsePathStyle,
+ UseAccelerate: options.UseAccelerate,
+ SupportsAccelerate: true,
+ EndpointResolver: options.EndpointResolver,
+ EndpointResolverOptions: options.EndpointOptions,
+ UseDualstack: options.UseDualstack,
+ UseARNRegion: options.UseARNRegion,
+ })
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListObjectVersions.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListObjectVersions.go
new file mode 100644
index 000000000..752b51a04
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListObjectVersions.go
@@ -0,0 +1,265 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package s3
+
+import (
+ "context"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
+ s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations"
+ "github.com/aws/aws-sdk-go-v2/service/s3/types"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// Returns metadata about all versions of the objects in a bucket. You can also use
+// request parameters as selection criteria to return metadata about a subset of
+// all the object versions. A 200 OK response can contain valid or invalid XML.
+// Make sure to design your application to parse the contents of the response and
+// handle it appropriately. To use this operation, you must have READ access to the
+// bucket. This action is not supported by Amazon S3 on Outposts. The following
+// operations are related to ListObjectVersions:
+//
+// * ListObjectsV2
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjectsV2.html)
+//
+// *
+// GetObject
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html)
+//
+// *
+// PutObject
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html)
+//
+// *
+// DeleteObject
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html)
+func (c *Client) ListObjectVersions(ctx context.Context, params *ListObjectVersionsInput, optFns ...func(*Options)) (*ListObjectVersionsOutput, error) {
+ if params == nil {
+ params = &ListObjectVersionsInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "ListObjectVersions", params, optFns, addOperationListObjectVersionsMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*ListObjectVersionsOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type ListObjectVersionsInput struct {
+
+ // The bucket name that contains the objects.
+ //
+ // This member is required.
+ Bucket *string
+
+ // A delimiter is a character that you specify to group keys. All keys that contain
+ // the same string between the prefix and the first occurrence of the delimiter are
+ // grouped under a single result element in CommonPrefixes. These groups are
+ // counted as one result against the max-keys limitation. These keys are not
+ // returned elsewhere in the response.
+ Delimiter *string
+
+ // Requests Amazon S3 to encode the object keys in the response and specifies the
+ // encoding method to use. An object key may contain any Unicode character;
+ // however, XML 1.0 parser cannot parse some characters, such as characters with an
+ // ASCII value from 0 to 10. For characters that are not supported in XML 1.0, you
+ // can add this parameter to request that Amazon S3 encode the keys in the
+ // response.
+ EncodingType types.EncodingType
+
+ // The account id of the expected bucket owner. If the bucket is owned by a
+ // different account, the request will fail with an HTTP 403 (Access Denied) error.
+ ExpectedBucketOwner *string
+
+ // Specifies the key to start with when listing objects in a bucket.
+ KeyMarker *string
+
+ // Sets the maximum number of keys returned in the response. By default the API
+ // returns up to 1,000 key names. The response might contain fewer keys but will
+ // never contain more. If additional keys satisfy the search criteria, but were not
+ // returned because max-keys was exceeded, the response contains true. To return
+ // the additional keys, see key-marker and version-id-marker.
+ MaxKeys int32
+
+ // Use this parameter to select only those keys that begin with the specified
+ // prefix. You can use prefixes to separate a bucket into different groupings of
+ // keys. (You can think of using prefix to make groups in the same way you'd use a
+ // folder in a file system.) You can use prefix with delimiter to roll up numerous
+ // objects into a single result under CommonPrefixes.
+ Prefix *string
+
+ // Specifies the object version you want to start listing from.
+ VersionIdMarker *string
+}
+
+type ListObjectVersionsOutput struct {
+
+ // All of the keys rolled up into a common prefix count as a single return when
+ // calculating the number of returns.
+ CommonPrefixes []types.CommonPrefix
+
+ // Container for an object that is a delete marker.
+ DeleteMarkers []types.DeleteMarkerEntry
+
+ // The delimiter grouping the included keys. A delimiter is a character that you
+ // specify to group keys. All keys that contain the same string between the prefix
+ // and the first occurrence of the delimiter are grouped under a single result
+ // element in CommonPrefixes. These groups are counted as one result against the
+ // max-keys limitation. These keys are not returned elsewhere in the response.
+ Delimiter *string
+
+ // Encoding type used by Amazon S3 to encode object key names in the XML response.
+ // If you specify encoding-type request parameter, Amazon S3 includes this element
+ // in the response, and returns encoded key name values in the following response
+ // elements: KeyMarker, NextKeyMarker, Prefix, Key, and Delimiter.
+ EncodingType types.EncodingType
+
+ // A flag that indicates whether Amazon S3 returned all of the results that
+ // satisfied the search criteria. If your results were truncated, you can make a
+ // follow-up paginated request using the NextKeyMarker and NextVersionIdMarker
+ // response parameters as a starting place in another request to return the rest of
+ // the results.
+ IsTruncated bool
+
+ // Marks the last key returned in a truncated response.
+ KeyMarker *string
+
+ // Specifies the maximum number of objects to return.
+ MaxKeys int32
+
+ // The bucket name.
+ Name *string
+
+ // When the number of responses exceeds the value of MaxKeys, NextKeyMarker
+ // specifies the first key not returned that satisfies the search criteria. Use
+ // this value for the key-marker request parameter in a subsequent request.
+ NextKeyMarker *string
+
+ // When the number of responses exceeds the value of MaxKeys, NextVersionIdMarker
+ // specifies the first object version not returned that satisfies the search
+ // criteria. Use this value for the version-id-marker request parameter in a
+ // subsequent request.
+ NextVersionIdMarker *string
+
+ // Selects objects that start with the value supplied by this parameter.
+ Prefix *string
+
+ // Marks the last version of the key returned in a truncated response.
+ VersionIdMarker *string
+
+ // Container for version information.
+ Versions []types.ObjectVersion
+
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+}
+
+func addOperationListObjectVersionsMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ err = stack.Serialize.Add(&awsRestxml_serializeOpListObjectVersions{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsRestxml_deserializeOpListObjectVersions{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
+ return err
+ }
+ if err = addRetryMiddlewares(stack, options); err != nil {
+ return err
+ }
+ if err = addHTTPSignerV4Middleware(stack, options); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addOpListObjectVersionsValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListObjectVersions(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addMetadataRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addListObjectVersionsUpdateEndpoint(stack, options); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil {
+ return err
+ }
+ if err = disableAcceptEncodingGzip(stack); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ return nil
+}
+
+func newServiceMetadataMiddleware_opListObjectVersions(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ SigningName: "s3",
+ OperationName: "ListObjectVersions",
+ }
+}
+
+// getListObjectVersionsBucketMember returns a pointer to string denoting a
+// provided bucket member valueand a boolean indicating if the input has a modeled
+// bucket name,
+func getListObjectVersionsBucketMember(input interface{}) (*string, bool) {
+ in := input.(*ListObjectVersionsInput)
+ if in.Bucket == nil {
+ return nil, false
+ }
+ return in.Bucket, true
+}
+func addListObjectVersionsUpdateEndpoint(stack *middleware.Stack, options Options) error {
+ return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{
+ Accessor: s3cust.UpdateEndpointParameterAccessor{
+ GetBucketFromInput: getListObjectVersionsBucketMember,
+ },
+ UsePathStyle: options.UsePathStyle,
+ UseAccelerate: options.UseAccelerate,
+ SupportsAccelerate: true,
+ EndpointResolver: options.EndpointResolver,
+ EndpointResolverOptions: options.EndpointOptions,
+ UseDualstack: options.UseDualstack,
+ UseARNRegion: options.UseARNRegion,
+ })
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListObjects.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListObjects.go
new file mode 100644
index 000000000..92e3baace
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListObjects.go
@@ -0,0 +1,274 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package s3
+
+import (
+ "context"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
+ s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations"
+ "github.com/aws/aws-sdk-go-v2/service/s3/types"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// Returns some or all (up to 1,000) of the objects in a bucket. You can use the
+// request parameters as selection criteria to return a subset of the objects in a
+// bucket. A 200 OK response can contain valid or invalid XML. Be sure to design
+// your application to parse the contents of the response and handle it
+// appropriately. This API has been revised. We recommend that you use the newer
+// version, ListObjectsV2
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjectsV2.html), when
+// developing applications. For backward compatibility, Amazon S3 continues to
+// support ListObjects. The following operations are related to ListObjects:
+//
+// *
+// ListObjectsV2
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjectsV2.html)
+//
+// *
+// GetObject
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html)
+//
+// *
+// PutObject
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html)
+//
+// *
+// CreateBucket
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html)
+//
+// *
+// ListBuckets
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBuckets.html)
+func (c *Client) ListObjects(ctx context.Context, params *ListObjectsInput, optFns ...func(*Options)) (*ListObjectsOutput, error) {
+ if params == nil {
+ params = &ListObjectsInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "ListObjects", params, optFns, addOperationListObjectsMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*ListObjectsOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type ListObjectsInput struct {
+
+ // The name of the bucket containing the objects. When using this API with an
+ // access point, you must direct requests to the access point hostname. The access
+ // point hostname takes the form
+ // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this
+ // operation with an access point through the AWS SDKs, you provide the access
+ // point ARN in place of the bucket name. For more information about access point
+ // ARNs, see Using Access Points
+ // (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) in
+ // the Amazon Simple Storage Service Developer Guide. When using this API with
+ // Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname.
+ // The S3 on Outposts hostname takes the form
+ // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using
+ // this operation using S3 on Outposts through the AWS SDKs, you provide the
+ // Outposts bucket ARN in place of the bucket name. For more information about S3
+ // on Outposts ARNs, see Using S3 on Outposts
+ // (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html) in the
+ // Amazon Simple Storage Service Developer Guide.
+ //
+ // This member is required.
+ Bucket *string
+
+ // A delimiter is a character you use to group keys.
+ Delimiter *string
+
+ // Requests Amazon S3 to encode the object keys in the response and specifies the
+ // encoding method to use. An object key may contain any Unicode character;
+ // however, XML 1.0 parser cannot parse some characters, such as characters with an
+ // ASCII value from 0 to 10. For characters that are not supported in XML 1.0, you
+ // can add this parameter to request that Amazon S3 encode the keys in the
+ // response.
+ EncodingType types.EncodingType
+
+ // The account id of the expected bucket owner. If the bucket is owned by a
+ // different account, the request will fail with an HTTP 403 (Access Denied) error.
+ ExpectedBucketOwner *string
+
+ // Specifies the key to start with when listing objects in a bucket.
+ Marker *string
+
+ // Sets the maximum number of keys returned in the response. By default the API
+ // returns up to 1,000 key names. The response might contain fewer keys but will
+ // never contain more.
+ MaxKeys int32
+
+ // Limits the response to keys that begin with the specified prefix.
+ Prefix *string
+
+ // Confirms that the requester knows that she or he will be charged for the list
+ // objects request. Bucket owners need not specify this parameter in their
+ // requests.
+ RequestPayer types.RequestPayer
+}
+
+type ListObjectsOutput struct {
+
+ // All of the keys rolled up in a common prefix count as a single return when
+ // calculating the number of returns. A response can contain CommonPrefixes only if
+ // you specify a delimiter. CommonPrefixes contains all (if there are any) keys
+ // between Prefix and the next occurrence of the string specified by the delimiter.
+ // CommonPrefixes lists keys that act like subdirectories in the directory
+ // specified by Prefix. For example, if the prefix is notes/ and the delimiter is a
+ // slash (/) as in notes/summer/july, the common prefix is notes/summer/. All of
+ // the keys that roll up into a common prefix count as a single return when
+ // calculating the number of returns.
+ CommonPrefixes []types.CommonPrefix
+
+ // Metadata about each object returned.
+ Contents []types.Object
+
+ // Causes keys that contain the same string between the prefix and the first
+ // occurrence of the delimiter to be rolled up into a single result element in the
+ // CommonPrefixes collection. These rolled-up keys are not returned elsewhere in
+ // the response. Each rolled-up result counts as only one return against the
+ // MaxKeys value.
+ Delimiter *string
+
+ // Encoding type used by Amazon S3 to encode object keys in the response.
+ EncodingType types.EncodingType
+
+ // A flag that indicates whether Amazon S3 returned all of the results that
+ // satisfied the search criteria.
+ IsTruncated bool
+
+ // Indicates where in the bucket listing begins. Marker is included in the response
+ // if it was sent with the request.
+ Marker *string
+
+ // The maximum number of keys returned in the response body.
+ MaxKeys int32
+
+ // The bucket name.
+ Name *string
+
+ // When response is truncated (the IsTruncated element value in the response is
+ // true), you can use the key name in this field as marker in the subsequent
+ // request to get next set of objects. Amazon S3 lists objects in alphabetical
+ // order Note: This element is returned only if you have delimiter request
+ // parameter specified. If response does not include the NextMarker and it is
+ // truncated, you can use the value of the last Key in the response as the marker
+ // in the subsequent request to get the next set of object keys.
+ NextMarker *string
+
+ // Keys that begin with the indicated prefix.
+ Prefix *string
+
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+}
+
+func addOperationListObjectsMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ err = stack.Serialize.Add(&awsRestxml_serializeOpListObjects{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsRestxml_deserializeOpListObjects{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
+ return err
+ }
+ if err = addRetryMiddlewares(stack, options); err != nil {
+ return err
+ }
+ if err = addHTTPSignerV4Middleware(stack, options); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addOpListObjectsValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListObjects(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addMetadataRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addListObjectsUpdateEndpoint(stack, options); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil {
+ return err
+ }
+ if err = disableAcceptEncodingGzip(stack); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ return nil
+}
+
+func newServiceMetadataMiddleware_opListObjects(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ SigningName: "s3",
+ OperationName: "ListObjects",
+ }
+}
+
+// getListObjectsBucketMember returns a pointer to string denoting a provided
+// bucket member valueand a boolean indicating if the input has a modeled bucket
+// name,
+func getListObjectsBucketMember(input interface{}) (*string, bool) {
+ in := input.(*ListObjectsInput)
+ if in.Bucket == nil {
+ return nil, false
+ }
+ return in.Bucket, true
+}
+func addListObjectsUpdateEndpoint(stack *middleware.Stack, options Options) error {
+ return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{
+ Accessor: s3cust.UpdateEndpointParameterAccessor{
+ GetBucketFromInput: getListObjectsBucketMember,
+ },
+ UsePathStyle: options.UsePathStyle,
+ UseAccelerate: options.UseAccelerate,
+ SupportsAccelerate: true,
+ EndpointResolver: options.EndpointResolver,
+ EndpointResolverOptions: options.EndpointOptions,
+ UseDualstack: options.UseDualstack,
+ UseARNRegion: options.UseARNRegion,
+ })
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListObjectsV2.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListObjectsV2.go
new file mode 100644
index 000000000..a9f17e9f0
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListObjectsV2.go
@@ -0,0 +1,391 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package s3
+
+import (
+ "context"
+ "fmt"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
+ s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations"
+ "github.com/aws/aws-sdk-go-v2/service/s3/types"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// Returns some or all (up to 1,000) of the objects in a bucket. You can use the
+// request parameters as selection criteria to return a subset of the objects in a
+// bucket. A 200 OK response can contain valid or invalid XML. Make sure to design
+// your application to parse the contents of the response and handle it
+// appropriately. To use this operation, you must have READ access to the bucket.
+// To use this operation in an AWS Identity and Access Management (IAM) policy, you
+// must have permissions to perform the s3:ListBucket action. The bucket owner has
+// this permission by default and can grant this permission to others. For more
+// information about permissions, see Permissions Related to Bucket Subresource
+// Operations
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources)
+// and Managing Access Permissions to Your Amazon S3 Resources
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). This
+// section describes the latest revision of the API. We recommend that you use this
+// revised API for application development. For backward compatibility, Amazon S3
+// continues to support the prior version of this API, ListObjects
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjects.html). To get a
+// list of your buckets, see ListBuckets
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBuckets.html). The
+// following operations are related to ListObjectsV2:
+//
+// * GetObject
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html)
+//
+// *
+// PutObject
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html)
+//
+// *
+// CreateBucket
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html)
+func (c *Client) ListObjectsV2(ctx context.Context, params *ListObjectsV2Input, optFns ...func(*Options)) (*ListObjectsV2Output, error) {
+ if params == nil {
+ params = &ListObjectsV2Input{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "ListObjectsV2", params, optFns, addOperationListObjectsV2Middlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*ListObjectsV2Output)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type ListObjectsV2Input struct {
+
+ // Bucket name to list. When using this API with an access point, you must direct
+ // requests to the access point hostname. The access point hostname takes the form
+ // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this
+ // operation with an access point through the AWS SDKs, you provide the access
+ // point ARN in place of the bucket name. For more information about access point
+ // ARNs, see Using Access Points
+ // (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) in
+ // the Amazon Simple Storage Service Developer Guide. When using this API with
+ // Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname.
+ // The S3 on Outposts hostname takes the form
+ // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using
+ // this operation using S3 on Outposts through the AWS SDKs, you provide the
+ // Outposts bucket ARN in place of the bucket name. For more information about S3
+ // on Outposts ARNs, see Using S3 on Outposts
+ // (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html) in the
+ // Amazon Simple Storage Service Developer Guide.
+ //
+ // This member is required.
+ Bucket *string
+
+ // ContinuationToken indicates Amazon S3 that the list is being continued on this
+ // bucket with a token. ContinuationToken is obfuscated and is not a real key.
+ ContinuationToken *string
+
+ // A delimiter is a character you use to group keys.
+ Delimiter *string
+
+ // Encoding type used by Amazon S3 to encode object keys in the response.
+ EncodingType types.EncodingType
+
+ // The account id of the expected bucket owner. If the bucket is owned by a
+ // different account, the request will fail with an HTTP 403 (Access Denied) error.
+ ExpectedBucketOwner *string
+
+ // The owner field is not present in listV2 by default, if you want to return owner
+ // field with each key in the result then set the fetch owner field to true.
+ FetchOwner bool
+
+ // Sets the maximum number of keys returned in the response. By default the API
+ // returns up to 1,000 key names. The response might contain fewer keys but will
+ // never contain more.
+ MaxKeys int32
+
+ // Limits the response to keys that begin with the specified prefix.
+ Prefix *string
+
+ // Confirms that the requester knows that she or he will be charged for the list
+ // objects request in V2 style. Bucket owners need not specify this parameter in
+ // their requests.
+ RequestPayer types.RequestPayer
+
+ // StartAfter is where you want Amazon S3 to start listing from. Amazon S3 starts
+ // listing after this specified key. StartAfter can be any key in the bucket.
+ StartAfter *string
+}
+
+type ListObjectsV2Output struct {
+
+ // All of the keys rolled up into a common prefix count as a single return when
+ // calculating the number of returns. A response can contain CommonPrefixes only if
+ // you specify a delimiter. CommonPrefixes contains all (if there are any) keys
+ // between Prefix and the next occurrence of the string specified by a delimiter.
+ // CommonPrefixes lists keys that act like subdirectories in the directory
+ // specified by Prefix. For example, if the prefix is notes/ and the delimiter is a
+ // slash (/) as in notes/summer/july, the common prefix is notes/summer/. All of
+ // the keys that roll up into a common prefix count as a single return when
+ // calculating the number of returns.
+ CommonPrefixes []types.CommonPrefix
+
+ // Metadata about each object returned.
+ Contents []types.Object
+
+ // If ContinuationToken was sent with the request, it is included in the response.
+ ContinuationToken *string
+
+ // Causes keys that contain the same string between the prefix and the first
+ // occurrence of the delimiter to be rolled up into a single result element in the
+ // CommonPrefixes collection. These rolled-up keys are not returned elsewhere in
+ // the response. Each rolled-up result counts as only one return against the
+ // MaxKeys value.
+ Delimiter *string
+
+ // Encoding type used by Amazon S3 to encode object key names in the XML response.
+ // If you specify the encoding-type request parameter, Amazon S3 includes this
+ // element in the response, and returns encoded key name values in the following
+ // response elements: Delimiter, Prefix, Key, and StartAfter.
+ EncodingType types.EncodingType
+
+ // Set to false if all of the results were returned. Set to true if more keys are
+ // available to return. If the number of results exceeds that specified by MaxKeys,
+ // all of the results might not be returned.
+ IsTruncated bool
+
+ // KeyCount is the number of keys returned with this request. KeyCount will always
+ // be less than equals to MaxKeys field. Say you ask for 50 keys, your result will
+ // include less than equals 50 keys
+ KeyCount int32
+
+ // Sets the maximum number of keys returned in the response. By default the API
+ // returns up to 1,000 key names. The response might contain fewer keys but will
+ // never contain more.
+ MaxKeys int32
+
+ // The bucket name. When using this API with an access point, you must direct
+ // requests to the access point hostname. The access point hostname takes the form
+ // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this
+ // operation with an access point through the AWS SDKs, you provide the access
+ // point ARN in place of the bucket name. For more information about access point
+ // ARNs, see Using Access Points
+ // (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) in
+ // the Amazon Simple Storage Service Developer Guide. When using this API with
+ // Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname.
+ // The S3 on Outposts hostname takes the form
+ // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using
+ // this operation using S3 on Outposts through the AWS SDKs, you provide the
+ // Outposts bucket ARN in place of the bucket name. For more information about S3
+ // on Outposts ARNs, see Using S3 on Outposts
+ // (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html) in the
+ // Amazon Simple Storage Service Developer Guide.
+ Name *string
+
+ // NextContinuationToken is sent when isTruncated is true, which means there are
+ // more keys in the bucket that can be listed. The next list requests to Amazon S3
+ // can be continued with this NextContinuationToken. NextContinuationToken is
+ // obfuscated and is not a real key
+ NextContinuationToken *string
+
+ // Keys that begin with the indicated prefix.
+ Prefix *string
+
+ // If StartAfter was sent with the request, it is included in the response.
+ StartAfter *string
+
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+}
+
+func addOperationListObjectsV2Middlewares(stack *middleware.Stack, options Options) (err error) {
+ err = stack.Serialize.Add(&awsRestxml_serializeOpListObjectsV2{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsRestxml_deserializeOpListObjectsV2{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
+ return err
+ }
+ if err = addRetryMiddlewares(stack, options); err != nil {
+ return err
+ }
+ if err = addHTTPSignerV4Middleware(stack, options); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addOpListObjectsV2ValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListObjectsV2(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addMetadataRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addListObjectsV2UpdateEndpoint(stack, options); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil {
+ return err
+ }
+ if err = disableAcceptEncodingGzip(stack); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ return nil
+}
+
+// ListObjectsV2APIClient is a client that implements the ListObjectsV2 operation.
+type ListObjectsV2APIClient interface {
+ ListObjectsV2(context.Context, *ListObjectsV2Input, ...func(*Options)) (*ListObjectsV2Output, error)
+}
+
+var _ ListObjectsV2APIClient = (*Client)(nil)
+
+// ListObjectsV2PaginatorOptions is the paginator options for ListObjectsV2
+type ListObjectsV2PaginatorOptions struct {
+ // Sets the maximum number of keys returned in the response. By default the API
+ // returns up to 1,000 key names. The response might contain fewer keys but will
+ // never contain more.
+ Limit int32
+
+ // Set to true if pagination should stop if the service returns a pagination token
+ // that matches the most recent token provided to the service.
+ StopOnDuplicateToken bool
+}
+
+// ListObjectsV2Paginator is a paginator for ListObjectsV2
+type ListObjectsV2Paginator struct {
+ options ListObjectsV2PaginatorOptions
+ client ListObjectsV2APIClient
+ params *ListObjectsV2Input
+ nextToken *string
+ firstPage bool
+}
+
+// NewListObjectsV2Paginator returns a new ListObjectsV2Paginator
+func NewListObjectsV2Paginator(client ListObjectsV2APIClient, params *ListObjectsV2Input, optFns ...func(*ListObjectsV2PaginatorOptions)) *ListObjectsV2Paginator {
+ if params == nil {
+ params = &ListObjectsV2Input{}
+ }
+
+ options := ListObjectsV2PaginatorOptions{}
+ if params.MaxKeys != 0 {
+ options.Limit = params.MaxKeys
+ }
+
+ for _, fn := range optFns {
+ fn(&options)
+ }
+
+ return &ListObjectsV2Paginator{
+ options: options,
+ client: client,
+ params: params,
+ firstPage: true,
+ }
+}
+
+// HasMorePages returns a boolean indicating whether more pages are available
+func (p *ListObjectsV2Paginator) HasMorePages() bool {
+ return p.firstPage || p.nextToken != nil
+}
+
+// NextPage retrieves the next ListObjectsV2 page.
+func (p *ListObjectsV2Paginator) NextPage(ctx context.Context, optFns ...func(*Options)) (*ListObjectsV2Output, error) {
+ if !p.HasMorePages() {
+ return nil, fmt.Errorf("no more pages available")
+ }
+
+ params := *p.params
+ params.ContinuationToken = p.nextToken
+
+ params.MaxKeys = p.options.Limit
+
+ result, err := p.client.ListObjectsV2(ctx, ¶ms, optFns...)
+ if err != nil {
+ return nil, err
+ }
+ p.firstPage = false
+
+ prevToken := p.nextToken
+ p.nextToken = nil
+ if result.IsTruncated {
+ p.nextToken = result.NextContinuationToken
+ }
+
+ if p.options.StopOnDuplicateToken && prevToken != nil && p.nextToken != nil && *prevToken == *p.nextToken {
+ p.nextToken = nil
+ }
+
+ return result, nil
+}
+
+func newServiceMetadataMiddleware_opListObjectsV2(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ SigningName: "s3",
+ OperationName: "ListObjectsV2",
+ }
+}
+
+// getListObjectsV2BucketMember returns a pointer to string denoting a provided
+// bucket member valueand a boolean indicating if the input has a modeled bucket
+// name,
+func getListObjectsV2BucketMember(input interface{}) (*string, bool) {
+ in := input.(*ListObjectsV2Input)
+ if in.Bucket == nil {
+ return nil, false
+ }
+ return in.Bucket, true
+}
+func addListObjectsV2UpdateEndpoint(stack *middleware.Stack, options Options) error {
+ return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{
+ Accessor: s3cust.UpdateEndpointParameterAccessor{
+ GetBucketFromInput: getListObjectsV2BucketMember,
+ },
+ UsePathStyle: options.UsePathStyle,
+ UseAccelerate: options.UseAccelerate,
+ SupportsAccelerate: true,
+ EndpointResolver: options.EndpointResolver,
+ EndpointResolverOptions: options.EndpointOptions,
+ UseDualstack: options.UseDualstack,
+ UseARNRegion: options.UseARNRegion,
+ })
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListParts.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListParts.go
new file mode 100644
index 000000000..2632d4c48
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListParts.go
@@ -0,0 +1,380 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package s3
+
+import (
+ "context"
+ "fmt"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
+ s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations"
+ "github.com/aws/aws-sdk-go-v2/service/s3/types"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+ "time"
+)
+
+// Lists the parts that have been uploaded for a specific multipart upload. This
+// operation must include the upload ID, which you obtain by sending the initiate
+// multipart upload request (see CreateMultipartUpload
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html)).
+// This request returns a maximum of 1,000 uploaded parts. The default number of
+// parts returned is 1,000 parts. You can restrict the number of parts returned by
+// specifying the max-parts request parameter. If your multipart upload consists of
+// more than 1,000 parts, the response returns an IsTruncated field with the value
+// of true, and a NextPartNumberMarker element. In subsequent ListParts requests
+// you can include the part-number-marker query string parameter and set its value
+// to the NextPartNumberMarker field value from the previous response. For more
+// information on multipart uploads, see Uploading Objects Using Multipart Upload
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html). For
+// information on permissions required to use the multipart upload API, see
+// Multipart Upload API and Permissions
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html). The
+// following operations are related to ListParts:
+//
+// * CreateMultipartUpload
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html)
+//
+// *
+// UploadPart
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html)
+//
+// *
+// CompleteMultipartUpload
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html)
+//
+// *
+// AbortMultipartUpload
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html)
+//
+// *
+// ListMultipartUploads
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html)
+func (c *Client) ListParts(ctx context.Context, params *ListPartsInput, optFns ...func(*Options)) (*ListPartsOutput, error) {
+ if params == nil {
+ params = &ListPartsInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "ListParts", params, optFns, addOperationListPartsMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*ListPartsOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type ListPartsInput struct {
+
+ // The name of the bucket to which the parts are being uploaded. When using this
+ // API with an access point, you must direct requests to the access point hostname.
+ // The access point hostname takes the form
+ // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this
+ // operation with an access point through the AWS SDKs, you provide the access
+ // point ARN in place of the bucket name. For more information about access point
+ // ARNs, see Using Access Points
+ // (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) in
+ // the Amazon Simple Storage Service Developer Guide. When using this API with
+ // Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname.
+ // The S3 on Outposts hostname takes the form
+ // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using
+ // this operation using S3 on Outposts through the AWS SDKs, you provide the
+ // Outposts bucket ARN in place of the bucket name. For more information about S3
+ // on Outposts ARNs, see Using S3 on Outposts
+ // (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html) in the
+ // Amazon Simple Storage Service Developer Guide.
+ //
+ // This member is required.
+ Bucket *string
+
+ // Object key for which the multipart upload was initiated.
+ //
+ // This member is required.
+ Key *string
+
+ // Upload ID identifying the multipart upload whose parts are being listed.
+ //
+ // This member is required.
+ UploadId *string
+
+ // The account id of the expected bucket owner. If the bucket is owned by a
+ // different account, the request will fail with an HTTP 403 (Access Denied) error.
+ ExpectedBucketOwner *string
+
+ // Sets the maximum number of parts to return.
+ MaxParts int32
+
+ // Specifies the part after which listing should begin. Only parts with higher part
+ // numbers will be listed.
+ PartNumberMarker *string
+
+ // Confirms that the requester knows that they will be charged for the request.
+ // Bucket owners need not specify this parameter in their requests. For information
+ // about downloading objects from requester pays buckets, see Downloading Objects
+ // in Requestor Pays Buckets
+ // (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html)
+ // in the Amazon S3 Developer Guide.
+ RequestPayer types.RequestPayer
+}
+
+type ListPartsOutput struct {
+
+ // If the bucket has a lifecycle rule configured with an action to abort incomplete
+ // multipart uploads and the prefix in the lifecycle rule matches the object name
+ // in the request, then the response includes this header indicating when the
+ // initiated multipart upload will become eligible for abort operation. For more
+ // information, see Aborting Incomplete Multipart Uploads Using a Bucket Lifecycle
+ // Policy
+ // (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config).
+ // The response will also include the x-amz-abort-rule-id header that will provide
+ // the ID of the lifecycle configuration rule that defines this action.
+ AbortDate *time.Time
+
+ // This header is returned along with the x-amz-abort-date header. It identifies
+ // applicable lifecycle configuration rule that defines the action to abort
+ // incomplete multipart uploads.
+ AbortRuleId *string
+
+ // The name of the bucket to which the multipart upload was initiated.
+ Bucket *string
+
+ // Container element that identifies who initiated the multipart upload. If the
+ // initiator is an AWS account, this element provides the same information as the
+ // Owner element. If the initiator is an IAM User, this element provides the user
+ // ARN and display name.
+ Initiator *types.Initiator
+
+ // Indicates whether the returned list of parts is truncated. A true value
+ // indicates that the list was truncated. A list can be truncated if the number of
+ // parts exceeds the limit returned in the MaxParts element.
+ IsTruncated bool
+
+ // Object key for which the multipart upload was initiated.
+ Key *string
+
+ // Maximum number of parts that were allowed in the response.
+ MaxParts int32
+
+ // When a list is truncated, this element specifies the last part in the list, as
+ // well as the value to use for the part-number-marker request parameter in a
+ // subsequent request.
+ NextPartNumberMarker *string
+
+ // Container element that identifies the object owner, after the object is created.
+ // If multipart upload is initiated by an IAM user, this element provides the
+ // parent account ID and display name.
+ Owner *types.Owner
+
+ // When a list is truncated, this element specifies the last part in the list, as
+ // well as the value to use for the part-number-marker request parameter in a
+ // subsequent request.
+ PartNumberMarker *string
+
+ // Container for elements related to a particular part. A response can contain zero
+ // or more Part elements.
+ Parts []types.Part
+
+ // If present, indicates that the requester was successfully charged for the
+ // request.
+ RequestCharged types.RequestCharged
+
+ // Class of storage (STANDARD or REDUCED_REDUNDANCY) used to store the uploaded
+ // object.
+ StorageClass types.StorageClass
+
+ // Upload ID identifying the multipart upload whose parts are being listed.
+ UploadId *string
+
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+}
+
+func addOperationListPartsMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ err = stack.Serialize.Add(&awsRestxml_serializeOpListParts{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsRestxml_deserializeOpListParts{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
+ return err
+ }
+ if err = addRetryMiddlewares(stack, options); err != nil {
+ return err
+ }
+ if err = addHTTPSignerV4Middleware(stack, options); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addOpListPartsValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListParts(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addMetadataRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addListPartsUpdateEndpoint(stack, options); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil {
+ return err
+ }
+ if err = disableAcceptEncodingGzip(stack); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ return nil
+}
+
+// ListPartsAPIClient is a client that implements the ListParts operation.
+type ListPartsAPIClient interface {
+ ListParts(context.Context, *ListPartsInput, ...func(*Options)) (*ListPartsOutput, error)
+}
+
+var _ ListPartsAPIClient = (*Client)(nil)
+
+// ListPartsPaginatorOptions is the paginator options for ListParts
+type ListPartsPaginatorOptions struct {
+ // Sets the maximum number of parts to return.
+ Limit int32
+
+ // Set to true if pagination should stop if the service returns a pagination token
+ // that matches the most recent token provided to the service.
+ StopOnDuplicateToken bool
+}
+
+// ListPartsPaginator is a paginator for ListParts
+type ListPartsPaginator struct {
+ options ListPartsPaginatorOptions
+ client ListPartsAPIClient
+ params *ListPartsInput
+ nextToken *string
+ firstPage bool
+}
+
+// NewListPartsPaginator returns a new ListPartsPaginator
+func NewListPartsPaginator(client ListPartsAPIClient, params *ListPartsInput, optFns ...func(*ListPartsPaginatorOptions)) *ListPartsPaginator {
+ if params == nil {
+ params = &ListPartsInput{}
+ }
+
+ options := ListPartsPaginatorOptions{}
+ if params.MaxParts != 0 {
+ options.Limit = params.MaxParts
+ }
+
+ for _, fn := range optFns {
+ fn(&options)
+ }
+
+ return &ListPartsPaginator{
+ options: options,
+ client: client,
+ params: params,
+ firstPage: true,
+ }
+}
+
+// HasMorePages returns a boolean indicating whether more pages are available
+func (p *ListPartsPaginator) HasMorePages() bool {
+ return p.firstPage || p.nextToken != nil
+}
+
+// NextPage retrieves the next ListParts page.
+func (p *ListPartsPaginator) NextPage(ctx context.Context, optFns ...func(*Options)) (*ListPartsOutput, error) {
+ if !p.HasMorePages() {
+ return nil, fmt.Errorf("no more pages available")
+ }
+
+ params := *p.params
+ params.PartNumberMarker = p.nextToken
+
+ params.MaxParts = p.options.Limit
+
+ result, err := p.client.ListParts(ctx, ¶ms, optFns...)
+ if err != nil {
+ return nil, err
+ }
+ p.firstPage = false
+
+ prevToken := p.nextToken
+ p.nextToken = nil
+ if result.IsTruncated {
+ p.nextToken = result.NextPartNumberMarker
+ }
+
+ if p.options.StopOnDuplicateToken && prevToken != nil && p.nextToken != nil && *prevToken == *p.nextToken {
+ p.nextToken = nil
+ }
+
+ return result, nil
+}
+
+func newServiceMetadataMiddleware_opListParts(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ SigningName: "s3",
+ OperationName: "ListParts",
+ }
+}
+
+// getListPartsBucketMember returns a pointer to string denoting a provided bucket
+// member valueand a boolean indicating if the input has a modeled bucket name,
+func getListPartsBucketMember(input interface{}) (*string, bool) {
+ in := input.(*ListPartsInput)
+ if in.Bucket == nil {
+ return nil, false
+ }
+ return in.Bucket, true
+}
+func addListPartsUpdateEndpoint(stack *middleware.Stack, options Options) error {
+ return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{
+ Accessor: s3cust.UpdateEndpointParameterAccessor{
+ GetBucketFromInput: getListPartsBucketMember,
+ },
+ UsePathStyle: options.UsePathStyle,
+ UseAccelerate: options.UseAccelerate,
+ SupportsAccelerate: true,
+ EndpointResolver: options.EndpointResolver,
+ EndpointResolverOptions: options.EndpointOptions,
+ UseDualstack: options.UseDualstack,
+ UseARNRegion: options.UseARNRegion,
+ })
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketAccelerateConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketAccelerateConfiguration.go
new file mode 100644
index 000000000..38c7ab3e1
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketAccelerateConfiguration.go
@@ -0,0 +1,193 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package s3
+
+import (
+ "context"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
+ s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations"
+ "github.com/aws/aws-sdk-go-v2/service/s3/types"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// Sets the accelerate configuration of an existing bucket. Amazon S3 Transfer
+// Acceleration is a bucket-level feature that enables you to perform faster data
+// transfers to Amazon S3. To use this operation, you must have permission to
+// perform the s3:PutAccelerateConfiguration action. The bucket owner has this
+// permission by default. The bucket owner can grant this permission to others. For
+// more information about permissions, see Permissions Related to Bucket
+// Subresource Operations
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources)
+// and Managing Access Permissions to Your Amazon S3 Resources
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). The
+// Transfer Acceleration state of a bucket can be set to one of the following two
+// values:
+//
+// * Enabled – Enables accelerated data transfers to the bucket.
+//
+// *
+// Suspended – Disables accelerated data transfers to the bucket.
+//
+// The
+// GetBucketAccelerateConfiguration
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketAccelerateConfiguration.html)
+// operation returns the transfer acceleration state of a bucket. After setting the
+// Transfer Acceleration state of a bucket to Enabled, it might take up to thirty
+// minutes before the data transfer rates to the bucket increase. The name of the
+// bucket used for Transfer Acceleration must be DNS-compliant and must not contain
+// periods ("."). For more information about transfer acceleration, see Transfer
+// Acceleration
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html).
+// The following operations are related to PutBucketAccelerateConfiguration:
+//
+// *
+// GetBucketAccelerateConfiguration
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketAccelerateConfiguration.html)
+//
+// *
+// CreateBucket
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html)
+func (c *Client) PutBucketAccelerateConfiguration(ctx context.Context, params *PutBucketAccelerateConfigurationInput, optFns ...func(*Options)) (*PutBucketAccelerateConfigurationOutput, error) {
+ if params == nil {
+ params = &PutBucketAccelerateConfigurationInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "PutBucketAccelerateConfiguration", params, optFns, addOperationPutBucketAccelerateConfigurationMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*PutBucketAccelerateConfigurationOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type PutBucketAccelerateConfigurationInput struct {
+
+ // Container for setting the transfer acceleration state.
+ //
+ // This member is required.
+ AccelerateConfiguration *types.AccelerateConfiguration
+
+ // The name of the bucket for which the accelerate configuration is set.
+ //
+ // This member is required.
+ Bucket *string
+
+ // The account id of the expected bucket owner. If the bucket is owned by a
+ // different account, the request will fail with an HTTP 403 (Access Denied) error.
+ ExpectedBucketOwner *string
+}
+
+type PutBucketAccelerateConfigurationOutput struct {
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+}
+
+func addOperationPutBucketAccelerateConfigurationMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ err = stack.Serialize.Add(&awsRestxml_serializeOpPutBucketAccelerateConfiguration{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsRestxml_deserializeOpPutBucketAccelerateConfiguration{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
+ return err
+ }
+ if err = addRetryMiddlewares(stack, options); err != nil {
+ return err
+ }
+ if err = addHTTPSignerV4Middleware(stack, options); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addOpPutBucketAccelerateConfigurationValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opPutBucketAccelerateConfiguration(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addMetadataRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addPutBucketAccelerateConfigurationUpdateEndpoint(stack, options); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil {
+ return err
+ }
+ if err = disableAcceptEncodingGzip(stack); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ return nil
+}
+
+func newServiceMetadataMiddleware_opPutBucketAccelerateConfiguration(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ SigningName: "s3",
+ OperationName: "PutBucketAccelerateConfiguration",
+ }
+}
+
+// getPutBucketAccelerateConfigurationBucketMember returns a pointer to string
+// denoting a provided bucket member valueand a boolean indicating if the input has
+// a modeled bucket name,
+func getPutBucketAccelerateConfigurationBucketMember(input interface{}) (*string, bool) {
+ in := input.(*PutBucketAccelerateConfigurationInput)
+ if in.Bucket == nil {
+ return nil, false
+ }
+ return in.Bucket, true
+}
+func addPutBucketAccelerateConfigurationUpdateEndpoint(stack *middleware.Stack, options Options) error {
+ return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{
+ Accessor: s3cust.UpdateEndpointParameterAccessor{
+ GetBucketFromInput: getPutBucketAccelerateConfigurationBucketMember,
+ },
+ UsePathStyle: options.UsePathStyle,
+ UseAccelerate: options.UseAccelerate,
+ SupportsAccelerate: true,
+ EndpointResolver: options.EndpointResolver,
+ EndpointResolverOptions: options.EndpointOptions,
+ UseDualstack: options.UseDualstack,
+ UseARNRegion: options.UseARNRegion,
+ })
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketAcl.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketAcl.go
new file mode 100644
index 000000000..ac9812770
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketAcl.go
@@ -0,0 +1,314 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package s3
+
+import (
+ "context"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
+ s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations"
+ "github.com/aws/aws-sdk-go-v2/service/s3/types"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// Sets the permissions on an existing bucket using access control lists (ACL). For
+// more information, see Using ACLs
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3_ACLs_UsingACLs.html). To set
+// the ACL of a bucket, you must have WRITE_ACP permission. You can use one of the
+// following two ways to set a bucket's permissions:
+//
+// * Specify the ACL in the
+// request body
+//
+// * Specify permissions using request headers
+//
+// You cannot specify
+// access permission using both the body and the request headers. Depending on your
+// application needs, you may choose to set the ACL on a bucket using either the
+// request body or the headers. For example, if you have an existing application
+// that updates a bucket ACL using the request body, then you can continue to use
+// that approach. Access Permissions You can set access permissions using one of
+// the following methods:
+//
+// * Specify a canned ACL with the x-amz-acl request
+// header. Amazon S3 supports a set of predefined ACLs, known as canned ACLs. Each
+// canned ACL has a predefined set of grantees and permissions. Specify the canned
+// ACL name as the value of x-amz-acl. If you use this header, you cannot use other
+// access control-specific headers in your request. For more information, see
+// Canned ACL
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL).
+//
+// *
+// Specify access permissions explicitly with the x-amz-grant-read,
+// x-amz-grant-read-acp, x-amz-grant-write-acp, and x-amz-grant-full-control
+// headers. When using these headers, you specify explicit access permissions and
+// grantees (AWS accounts or Amazon S3 groups) who will receive the permission. If
+// you use these ACL-specific headers, you cannot use the x-amz-acl header to set a
+// canned ACL. These parameters map to the set of permissions that Amazon S3
+// supports in an ACL. For more information, see Access Control List (ACL) Overview
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html). You specify
+// each grantee as a type=value pair, where the type is one of the following:
+//
+// * id
+// – if the value specified is the canonical user ID of an AWS account
+//
+// * uri – if
+// you are granting permissions to a predefined group
+//
+// * emailAddress – if the
+// value specified is the email address of an AWS account Using email addresses to
+// specify a grantee is only supported in the following AWS Regions:
+//
+// * US East (N.
+// Virginia)
+//
+// * US West (N. California)
+//
+// * US West (Oregon)
+//
+// * Asia Pacific
+// (Singapore)
+//
+// * Asia Pacific (Sydney)
+//
+// * Asia Pacific (Tokyo)
+//
+// * Europe
+// (Ireland)
+//
+// * South America (São Paulo)
+//
+// For a list of all the Amazon S3
+// supported Regions and endpoints, see Regions and Endpoints
+// (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) in the AWS
+// General Reference.
+//
+// For example, the following x-amz-grant-write header grants
+// create, overwrite, and delete objects permission to LogDelivery group predefined
+// by Amazon S3 and two AWS accounts identified by their email addresses.
+// x-amz-grant-write: uri="http://acs.amazonaws.com/groups/s3/LogDelivery",
+// id="111122223333", id="555566667777"
+//
+// You can use either a canned ACL or specify
+// access permissions explicitly. You cannot do both. Grantee Values You can
+// specify the person (grantee) to whom you're assigning access rights (using
+// request elements) in the following ways:
+//
+// * By the person's ID:
+// <>ID<><>GranteesEmail<> DisplayName is optional and ignored in the request
+//
+// *
+// By URI: <>http://acs.amazonaws.com/groups/global/AuthenticatedUsers<>
+//
+// * By
+// Email address: <>Grantees@email.com<>lt;/Grantee> The grantee is resolved to the
+// CanonicalUser and, in a response to a GET Object acl request, appears as the
+// CanonicalUser. Using email addresses to specify a grantee is only supported in
+// the following AWS Regions:
+//
+// * US East (N. Virginia)
+//
+// * US West (N.
+// California)
+//
+// * US West (Oregon)
+//
+// * Asia Pacific (Singapore)
+//
+// * Asia Pacific
+// (Sydney)
+//
+// * Asia Pacific (Tokyo)
+//
+// * Europe (Ireland)
+//
+// * South America (São
+// Paulo)
+//
+// For a list of all the Amazon S3 supported Regions and endpoints, see
+// Regions and Endpoints
+// (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) in the AWS
+// General Reference.
+//
+// Related Resources
+//
+// * CreateBucket
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html)
+//
+// *
+// DeleteBucket
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucket.html)
+//
+// *
+// GetObjectAcl
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAcl.html)
+func (c *Client) PutBucketAcl(ctx context.Context, params *PutBucketAclInput, optFns ...func(*Options)) (*PutBucketAclOutput, error) {
+ if params == nil {
+ params = &PutBucketAclInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "PutBucketAcl", params, optFns, addOperationPutBucketAclMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*PutBucketAclOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type PutBucketAclInput struct {
+
+ // The bucket to which to apply the ACL.
+ //
+ // This member is required.
+ Bucket *string
+
+ // The canned ACL to apply to the bucket.
+ ACL types.BucketCannedACL
+
+ // Contains the elements that set the ACL permissions for an object per grantee.
+ AccessControlPolicy *types.AccessControlPolicy
+
+ // The base64-encoded 128-bit MD5 digest of the data. This header must be used as a
+ // message integrity check to verify that the request body was not corrupted in
+ // transit. For more information, go to RFC 1864.
+ // (http://www.ietf.org/rfc/rfc1864.txt) For requests made using the AWS Command
+ // Line Interface (CLI) or AWS SDKs, this field is calculated automatically.
+ ContentMD5 *string
+
+ // The account id of the expected bucket owner. If the bucket is owned by a
+ // different account, the request will fail with an HTTP 403 (Access Denied) error.
+ ExpectedBucketOwner *string
+
+ // Allows grantee the read, write, read ACP, and write ACP permissions on the
+ // bucket.
+ GrantFullControl *string
+
+ // Allows grantee to list the objects in the bucket.
+ GrantRead *string
+
+ // Allows grantee to read the bucket ACL.
+ GrantReadACP *string
+
+ // Allows grantee to create, overwrite, and delete any object in the bucket.
+ GrantWrite *string
+
+ // Allows grantee to write the ACL for the applicable bucket.
+ GrantWriteACP *string
+}
+
+type PutBucketAclOutput struct {
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+}
+
+func addOperationPutBucketAclMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ err = stack.Serialize.Add(&awsRestxml_serializeOpPutBucketAcl{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsRestxml_deserializeOpPutBucketAcl{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
+ return err
+ }
+ if err = addRetryMiddlewares(stack, options); err != nil {
+ return err
+ }
+ if err = addHTTPSignerV4Middleware(stack, options); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addOpPutBucketAclValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opPutBucketAcl(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addMetadataRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addPutBucketAclUpdateEndpoint(stack, options); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil {
+ return err
+ }
+ if err = disableAcceptEncodingGzip(stack); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddContentChecksumMiddleware(stack); err != nil {
+ return err
+ }
+ return nil
+}
+
+func newServiceMetadataMiddleware_opPutBucketAcl(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ SigningName: "s3",
+ OperationName: "PutBucketAcl",
+ }
+}
+
+// getPutBucketAclBucketMember returns a pointer to string denoting a provided
+// bucket member valueand a boolean indicating if the input has a modeled bucket
+// name,
+func getPutBucketAclBucketMember(input interface{}) (*string, bool) {
+ in := input.(*PutBucketAclInput)
+ if in.Bucket == nil {
+ return nil, false
+ }
+ return in.Bucket, true
+}
+func addPutBucketAclUpdateEndpoint(stack *middleware.Stack, options Options) error {
+ return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{
+ Accessor: s3cust.UpdateEndpointParameterAccessor{
+ GetBucketFromInput: getPutBucketAclBucketMember,
+ },
+ UsePathStyle: options.UsePathStyle,
+ UseAccelerate: options.UseAccelerate,
+ SupportsAccelerate: true,
+ EndpointResolver: options.EndpointResolver,
+ EndpointResolverOptions: options.EndpointOptions,
+ UseDualstack: options.UseDualstack,
+ UseARNRegion: options.UseARNRegion,
+ })
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketAnalyticsConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketAnalyticsConfiguration.go
new file mode 100644
index 000000000..337da2220
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketAnalyticsConfiguration.go
@@ -0,0 +1,225 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package s3
+
+import (
+ "context"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
+ s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations"
+ "github.com/aws/aws-sdk-go-v2/service/s3/types"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// Sets an analytics configuration for the bucket (specified by the analytics
+// configuration ID). You can have up to 1,000 analytics configurations per bucket.
+// You can choose to have storage class analysis export analysis reports sent to a
+// comma-separated values (CSV) flat file. See the DataExport request element.
+// Reports are updated daily and are based on the object filters that you
+// configure. When selecting data export, you specify a destination bucket and an
+// optional destination prefix where the file is written. You can export the data
+// to a destination bucket in a different account. However, the destination bucket
+// must be in the same Region as the bucket that you are making the PUT analytics
+// configuration to. For more information, see Amazon S3 Analytics – Storage Class
+// Analysis
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/analytics-storage-class.html).
+// You must create a bucket policy on the destination bucket where the exported
+// file is written to grant permissions to Amazon S3 to write objects to the
+// bucket. For an example policy, see Granting Permissions for Amazon S3 Inventory
+// and Storage Class Analysis
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/example-bucket-policies.html#example-bucket-policies-use-case-9).
+// To use this operation, you must have permissions to perform the
+// s3:PutAnalyticsConfiguration action. The bucket owner has this permission by
+// default. The bucket owner can grant this permission to others. For more
+// information about permissions, see Permissions Related to Bucket Subresource
+// Operations
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources)
+// and Managing Access Permissions to Your Amazon S3 Resources
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html).
+// Special Errors
+//
+// * HTTP Error: HTTP 400 Bad Request
+//
+// * Code: InvalidArgument
+//
+// *
+// Cause: Invalid argument.
+//
+// * HTTP Error: HTTP 400 Bad Request
+//
+// * Code:
+// TooManyConfigurations
+//
+// * Cause: You are attempting to create a new configuration
+// but have already reached the 1,000-configuration limit.
+//
+// * HTTP Error: HTTP 403
+// Forbidden
+//
+// * Code: AccessDenied
+//
+// * Cause: You are not the owner of the specified
+// bucket, or you do not have the s3:PutAnalyticsConfiguration bucket permission to
+// set the configuration on the bucket.
+//
+// Related Resources
+//
+// *
+// GetBucketAnalyticsConfiguration
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketAnalyticsConfiguration.html)
+//
+// *
+// DeleteBucketAnalyticsConfiguration
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketAnalyticsConfiguration.html)
+//
+// *
+// ListBucketAnalyticsConfigurations
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketAnalyticsConfigurations.html)
+func (c *Client) PutBucketAnalyticsConfiguration(ctx context.Context, params *PutBucketAnalyticsConfigurationInput, optFns ...func(*Options)) (*PutBucketAnalyticsConfigurationOutput, error) {
+ if params == nil {
+ params = &PutBucketAnalyticsConfigurationInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "PutBucketAnalyticsConfiguration", params, optFns, addOperationPutBucketAnalyticsConfigurationMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*PutBucketAnalyticsConfigurationOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type PutBucketAnalyticsConfigurationInput struct {
+
+ // The configuration and any analyses for the analytics filter.
+ //
+ // This member is required.
+ AnalyticsConfiguration *types.AnalyticsConfiguration
+
+ // The name of the bucket to which an analytics configuration is stored.
+ //
+ // This member is required.
+ Bucket *string
+
+ // The ID that identifies the analytics configuration.
+ //
+ // This member is required.
+ Id *string
+
+ // The account id of the expected bucket owner. If the bucket is owned by a
+ // different account, the request will fail with an HTTP 403 (Access Denied) error.
+ ExpectedBucketOwner *string
+}
+
+type PutBucketAnalyticsConfigurationOutput struct {
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+}
+
+func addOperationPutBucketAnalyticsConfigurationMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ err = stack.Serialize.Add(&awsRestxml_serializeOpPutBucketAnalyticsConfiguration{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsRestxml_deserializeOpPutBucketAnalyticsConfiguration{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
+ return err
+ }
+ if err = addRetryMiddlewares(stack, options); err != nil {
+ return err
+ }
+ if err = addHTTPSignerV4Middleware(stack, options); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addOpPutBucketAnalyticsConfigurationValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opPutBucketAnalyticsConfiguration(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addMetadataRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addPutBucketAnalyticsConfigurationUpdateEndpoint(stack, options); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil {
+ return err
+ }
+ if err = disableAcceptEncodingGzip(stack); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ return nil
+}
+
+func newServiceMetadataMiddleware_opPutBucketAnalyticsConfiguration(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ SigningName: "s3",
+ OperationName: "PutBucketAnalyticsConfiguration",
+ }
+}
+
+// getPutBucketAnalyticsConfigurationBucketMember returns a pointer to string
+// denoting a provided bucket member valueand a boolean indicating if the input has
+// a modeled bucket name,
+func getPutBucketAnalyticsConfigurationBucketMember(input interface{}) (*string, bool) {
+ in := input.(*PutBucketAnalyticsConfigurationInput)
+ if in.Bucket == nil {
+ return nil, false
+ }
+ return in.Bucket, true
+}
+func addPutBucketAnalyticsConfigurationUpdateEndpoint(stack *middleware.Stack, options Options) error {
+ return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{
+ Accessor: s3cust.UpdateEndpointParameterAccessor{
+ GetBucketFromInput: getPutBucketAnalyticsConfigurationBucketMember,
+ },
+ UsePathStyle: options.UsePathStyle,
+ UseAccelerate: options.UseAccelerate,
+ SupportsAccelerate: true,
+ EndpointResolver: options.EndpointResolver,
+ EndpointResolverOptions: options.EndpointOptions,
+ UseDualstack: options.UseDualstack,
+ UseARNRegion: options.UseARNRegion,
+ })
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketCors.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketCors.go
new file mode 100644
index 000000000..f547945d1
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketCors.go
@@ -0,0 +1,211 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package s3
+
+import (
+ "context"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
+ s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations"
+ "github.com/aws/aws-sdk-go-v2/service/s3/types"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// Sets the cors configuration for your bucket. If the configuration exists, Amazon
+// S3 replaces it. To use this operation, you must be allowed to perform the
+// s3:PutBucketCORS action. By default, the bucket owner has this permission and
+// can grant it to others. You set this configuration on a bucket so that the
+// bucket can service cross-origin requests. For example, you might want to enable
+// a request whose origin is http://www.example.com to access your Amazon S3 bucket
+// at my.example.bucket.com by using the browser's XMLHttpRequest capability. To
+// enable cross-origin resource sharing (CORS) on a bucket, you add the cors
+// subresource to the bucket. The cors subresource is an XML document in which you
+// configure rules that identify origins and the HTTP methods that can be executed
+// on your bucket. The document is limited to 64 KB in size. When Amazon S3
+// receives a cross-origin request (or a pre-flight OPTIONS request) against a
+// bucket, it evaluates the cors configuration on the bucket and uses the first
+// CORSRule rule that matches the incoming browser request to enable a cross-origin
+// request. For a rule to match, the following conditions must be met:
+//
+// * The
+// request's Origin header must match AllowedOrigin elements.
+//
+// * The request method
+// (for example, GET, PUT, HEAD, and so on) or the Access-Control-Request-Method
+// header in case of a pre-flight OPTIONS request must be one of the AllowedMethod
+// elements.
+//
+// * Every header specified in the Access-Control-Request-Headers
+// request header of a pre-flight request must match an AllowedHeader element.
+//
+// For
+// more information about CORS, go to Enabling Cross-Origin Resource Sharing
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html) in the Amazon Simple
+// Storage Service Developer Guide. Related Resources
+//
+// * GetBucketCors
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketCors.html)
+//
+// *
+// DeleteBucketCors
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketCors.html)
+//
+// *
+// RESTOPTIONSobject
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTOPTIONSobject.html)
+func (c *Client) PutBucketCors(ctx context.Context, params *PutBucketCorsInput, optFns ...func(*Options)) (*PutBucketCorsOutput, error) {
+ if params == nil {
+ params = &PutBucketCorsInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "PutBucketCors", params, optFns, addOperationPutBucketCorsMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*PutBucketCorsOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type PutBucketCorsInput struct {
+
+ // Specifies the bucket impacted by the corsconfiguration.
+ //
+ // This member is required.
+ Bucket *string
+
+ // Describes the cross-origin access configuration for objects in an Amazon S3
+ // bucket. For more information, see Enabling Cross-Origin Resource Sharing
+ // (https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html) in the Amazon Simple
+ // Storage Service Developer Guide.
+ //
+ // This member is required.
+ CORSConfiguration *types.CORSConfiguration
+
+ // The base64-encoded 128-bit MD5 digest of the data. This header must be used as a
+ // message integrity check to verify that the request body was not corrupted in
+ // transit. For more information, go to RFC 1864.
+ // (http://www.ietf.org/rfc/rfc1864.txt) For requests made using the AWS Command
+ // Line Interface (CLI) or AWS SDKs, this field is calculated automatically.
+ ContentMD5 *string
+
+ // The account id of the expected bucket owner. If the bucket is owned by a
+ // different account, the request will fail with an HTTP 403 (Access Denied) error.
+ ExpectedBucketOwner *string
+}
+
+type PutBucketCorsOutput struct {
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+}
+
+func addOperationPutBucketCorsMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ err = stack.Serialize.Add(&awsRestxml_serializeOpPutBucketCors{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsRestxml_deserializeOpPutBucketCors{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
+ return err
+ }
+ if err = addRetryMiddlewares(stack, options); err != nil {
+ return err
+ }
+ if err = addHTTPSignerV4Middleware(stack, options); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addOpPutBucketCorsValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opPutBucketCors(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addMetadataRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addPutBucketCorsUpdateEndpoint(stack, options); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil {
+ return err
+ }
+ if err = disableAcceptEncodingGzip(stack); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddContentChecksumMiddleware(stack); err != nil {
+ return err
+ }
+ return nil
+}
+
+func newServiceMetadataMiddleware_opPutBucketCors(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ SigningName: "s3",
+ OperationName: "PutBucketCors",
+ }
+}
+
+// getPutBucketCorsBucketMember returns a pointer to string denoting a provided
+// bucket member valueand a boolean indicating if the input has a modeled bucket
+// name,
+func getPutBucketCorsBucketMember(input interface{}) (*string, bool) {
+ in := input.(*PutBucketCorsInput)
+ if in.Bucket == nil {
+ return nil, false
+ }
+ return in.Bucket, true
+}
+func addPutBucketCorsUpdateEndpoint(stack *middleware.Stack, options Options) error {
+ return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{
+ Accessor: s3cust.UpdateEndpointParameterAccessor{
+ GetBucketFromInput: getPutBucketCorsBucketMember,
+ },
+ UsePathStyle: options.UsePathStyle,
+ UseAccelerate: options.UseAccelerate,
+ SupportsAccelerate: true,
+ EndpointResolver: options.EndpointResolver,
+ EndpointResolverOptions: options.EndpointOptions,
+ UseDualstack: options.UseDualstack,
+ UseARNRegion: options.UseARNRegion,
+ })
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketEncryption.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketEncryption.go
new file mode 100644
index 000000000..5c9d2c580
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketEncryption.go
@@ -0,0 +1,198 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package s3
+
+import (
+ "context"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
+ s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations"
+ "github.com/aws/aws-sdk-go-v2/service/s3/types"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// This operation uses the encryption subresource to configure default encryption
+// and Amazon S3 Bucket Key for an existing bucket. Default encryption for a bucket
+// can use server-side encryption with Amazon S3-managed keys (SSE-S3) or AWS KMS
+// customer master keys (SSE-KMS). If you specify default encryption using SSE-KMS,
+// you can also configure Amazon S3 Bucket Key. For information about default
+// encryption, see Amazon S3 default bucket encryption
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html) in the
+// Amazon Simple Storage Service Developer Guide. For more information about S3
+// Bucket Keys, see Amazon S3 Bucket Keys
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-key.html) in the Amazon
+// Simple Storage Service Developer Guide. This operation requires AWS Signature
+// Version 4. For more information, see Authenticating Requests (AWS Signature
+// Version 4). To use this operation, you must have permissions to perform the
+// s3:PutEncryptionConfiguration action. The bucket owner has this permission by
+// default. The bucket owner can grant this permission to others. For more
+// information about permissions, see Permissions Related to Bucket Subresource
+// Operations
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources)
+// and Managing Access Permissions to Your Amazon S3 Resources
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html) in the
+// Amazon Simple Storage Service Developer Guide. Related Resources
+//
+// *
+// GetBucketEncryption
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketEncryption.html)
+//
+// *
+// DeleteBucketEncryption
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketEncryption.html)
+func (c *Client) PutBucketEncryption(ctx context.Context, params *PutBucketEncryptionInput, optFns ...func(*Options)) (*PutBucketEncryptionOutput, error) {
+ if params == nil {
+ params = &PutBucketEncryptionInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "PutBucketEncryption", params, optFns, addOperationPutBucketEncryptionMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*PutBucketEncryptionOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type PutBucketEncryptionInput struct {
+
+ // Specifies default encryption for a bucket using server-side encryption with
+ // Amazon S3-managed keys (SSE-S3) or customer master keys stored in AWS KMS
+ // (SSE-KMS). For information about the Amazon S3 default encryption feature, see
+ // Amazon S3 Default Bucket Encryption
+ // (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html) in the
+ // Amazon Simple Storage Service Developer Guide.
+ //
+ // This member is required.
+ Bucket *string
+
+ // Specifies the default server-side-encryption configuration.
+ //
+ // This member is required.
+ ServerSideEncryptionConfiguration *types.ServerSideEncryptionConfiguration
+
+ // The base64-encoded 128-bit MD5 digest of the server-side encryption
+ // configuration. For requests made using the AWS Command Line Interface (CLI) or
+ // AWS SDKs, this field is calculated automatically.
+ ContentMD5 *string
+
+ // The account id of the expected bucket owner. If the bucket is owned by a
+ // different account, the request will fail with an HTTP 403 (Access Denied) error.
+ ExpectedBucketOwner *string
+}
+
+type PutBucketEncryptionOutput struct {
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+}
+
+func addOperationPutBucketEncryptionMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ err = stack.Serialize.Add(&awsRestxml_serializeOpPutBucketEncryption{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsRestxml_deserializeOpPutBucketEncryption{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
+ return err
+ }
+ if err = addRetryMiddlewares(stack, options); err != nil {
+ return err
+ }
+ if err = addHTTPSignerV4Middleware(stack, options); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addOpPutBucketEncryptionValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opPutBucketEncryption(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addMetadataRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addPutBucketEncryptionUpdateEndpoint(stack, options); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil {
+ return err
+ }
+ if err = disableAcceptEncodingGzip(stack); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddContentChecksumMiddleware(stack); err != nil {
+ return err
+ }
+ return nil
+}
+
+func newServiceMetadataMiddleware_opPutBucketEncryption(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ SigningName: "s3",
+ OperationName: "PutBucketEncryption",
+ }
+}
+
+// getPutBucketEncryptionBucketMember returns a pointer to string denoting a
+// provided bucket member valueand a boolean indicating if the input has a modeled
+// bucket name,
+func getPutBucketEncryptionBucketMember(input interface{}) (*string, bool) {
+ in := input.(*PutBucketEncryptionInput)
+ if in.Bucket == nil {
+ return nil, false
+ }
+ return in.Bucket, true
+}
+func addPutBucketEncryptionUpdateEndpoint(stack *middleware.Stack, options Options) error {
+ return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{
+ Accessor: s3cust.UpdateEndpointParameterAccessor{
+ GetBucketFromInput: getPutBucketEncryptionBucketMember,
+ },
+ UsePathStyle: options.UsePathStyle,
+ UseAccelerate: options.UseAccelerate,
+ SupportsAccelerate: true,
+ EndpointResolver: options.EndpointResolver,
+ EndpointResolverOptions: options.EndpointOptions,
+ UseDualstack: options.UseDualstack,
+ UseARNRegion: options.UseARNRegion,
+ })
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketIntelligentTieringConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketIntelligentTieringConfiguration.go
new file mode 100644
index 000000000..fb8984b91
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketIntelligentTieringConfiguration.go
@@ -0,0 +1,185 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package s3
+
+import (
+ "context"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
+ s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations"
+ "github.com/aws/aws-sdk-go-v2/service/s3/types"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// Puts a S3 Intelligent-Tiering configuration to the specified bucket. The S3
+// Intelligent-Tiering storage class is designed to optimize storage costs by
+// automatically moving data to the most cost-effective storage access tier,
+// without additional operational overhead. S3 Intelligent-Tiering delivers
+// automatic cost savings by moving data between access tiers, when access patterns
+// change. The S3 Intelligent-Tiering storage class is suitable for objects larger
+// than 128 KB that you plan to store for at least 30 days. If the size of an
+// object is less than 128 KB, it is not eligible for auto-tiering. Smaller objects
+// can be stored, but they are always charged at the frequent access tier rates in
+// the S3 Intelligent-Tiering storage class. If you delete an object before the end
+// of the 30-day minimum storage duration period, you are charged for 30 days. For
+// more information, see Storage class for automatically optimizing frequently and
+// infrequently accessed objects
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html#sc-dynamic-data-access).
+// Operations related to PutBucketIntelligentTieringConfiguration include:
+//
+// *
+// DeleteBucketIntelligentTieringConfiguration
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketIntelligentTieringConfiguration.html)
+//
+// *
+// GetBucketIntelligentTieringConfiguration
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketIntelligentTieringConfiguration.html)
+//
+// *
+// ListBucketIntelligentTieringConfigurations
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketIntelligentTieringConfigurations.html)
+func (c *Client) PutBucketIntelligentTieringConfiguration(ctx context.Context, params *PutBucketIntelligentTieringConfigurationInput, optFns ...func(*Options)) (*PutBucketIntelligentTieringConfigurationOutput, error) {
+ if params == nil {
+ params = &PutBucketIntelligentTieringConfigurationInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "PutBucketIntelligentTieringConfiguration", params, optFns, addOperationPutBucketIntelligentTieringConfigurationMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*PutBucketIntelligentTieringConfigurationOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type PutBucketIntelligentTieringConfigurationInput struct {
+
+ // The name of the Amazon S3 bucket whose configuration you want to modify or
+ // retrieve.
+ //
+ // This member is required.
+ Bucket *string
+
+ // The ID used to identify the S3 Intelligent-Tiering configuration.
+ //
+ // This member is required.
+ Id *string
+
+ // Container for S3 Intelligent-Tiering configuration.
+ //
+ // This member is required.
+ IntelligentTieringConfiguration *types.IntelligentTieringConfiguration
+}
+
+type PutBucketIntelligentTieringConfigurationOutput struct {
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+}
+
+func addOperationPutBucketIntelligentTieringConfigurationMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ err = stack.Serialize.Add(&awsRestxml_serializeOpPutBucketIntelligentTieringConfiguration{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsRestxml_deserializeOpPutBucketIntelligentTieringConfiguration{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
+ return err
+ }
+ if err = addRetryMiddlewares(stack, options); err != nil {
+ return err
+ }
+ if err = addHTTPSignerV4Middleware(stack, options); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addOpPutBucketIntelligentTieringConfigurationValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opPutBucketIntelligentTieringConfiguration(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addMetadataRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addPutBucketIntelligentTieringConfigurationUpdateEndpoint(stack, options); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil {
+ return err
+ }
+ if err = disableAcceptEncodingGzip(stack); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ return nil
+}
+
+func newServiceMetadataMiddleware_opPutBucketIntelligentTieringConfiguration(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ SigningName: "s3",
+ OperationName: "PutBucketIntelligentTieringConfiguration",
+ }
+}
+
+// getPutBucketIntelligentTieringConfigurationBucketMember returns a pointer to
+// string denoting a provided bucket member valueand a boolean indicating if the
+// input has a modeled bucket name,
+func getPutBucketIntelligentTieringConfigurationBucketMember(input interface{}) (*string, bool) {
+ in := input.(*PutBucketIntelligentTieringConfigurationInput)
+ if in.Bucket == nil {
+ return nil, false
+ }
+ return in.Bucket, true
+}
+func addPutBucketIntelligentTieringConfigurationUpdateEndpoint(stack *middleware.Stack, options Options) error {
+ return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{
+ Accessor: s3cust.UpdateEndpointParameterAccessor{
+ GetBucketFromInput: getPutBucketIntelligentTieringConfigurationBucketMember,
+ },
+ UsePathStyle: options.UsePathStyle,
+ UseAccelerate: options.UseAccelerate,
+ SupportsAccelerate: true,
+ EndpointResolver: options.EndpointResolver,
+ EndpointResolverOptions: options.EndpointOptions,
+ UseDualstack: options.UseDualstack,
+ UseARNRegion: options.UseARNRegion,
+ })
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketInventoryConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketInventoryConfiguration.go
new file mode 100644
index 000000000..2fd12d8c1
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketInventoryConfiguration.go
@@ -0,0 +1,225 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package s3
+
+import (
+ "context"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
+ s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations"
+ "github.com/aws/aws-sdk-go-v2/service/s3/types"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// This implementation of the PUT operation adds an inventory configuration
+// (identified by the inventory ID) to the bucket. You can have up to 1,000
+// inventory configurations per bucket. Amazon S3 inventory generates inventories
+// of the objects in the bucket on a daily or weekly basis, and the results are
+// published to a flat file. The bucket that is inventoried is called the source
+// bucket, and the bucket where the inventory flat file is stored is called the
+// destination bucket. The destination bucket must be in the same AWS Region as the
+// source bucket. When you configure an inventory for a source bucket, you specify
+// the destination bucket where you want the inventory to be stored, and whether to
+// generate the inventory daily or weekly. You can also configure what object
+// metadata to include and whether to inventory all object versions or only current
+// versions. For more information, see Amazon S3 Inventory
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-inventory.html) in the
+// Amazon Simple Storage Service Developer Guide. You must create a bucket policy
+// on the destination bucket to grant permissions to Amazon S3 to write objects to
+// the bucket in the defined location. For an example policy, see Granting
+// Permissions for Amazon S3 Inventory and Storage Class Analysis
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/example-bucket-policies.html#example-bucket-policies-use-case-9).
+// To use this operation, you must have permissions to perform the
+// s3:PutInventoryConfiguration action. The bucket owner has this permission by
+// default and can grant this permission to others. For more information about
+// permissions, see Permissions Related to Bucket Subresource Operations
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources)
+// and Managing Access Permissions to Your Amazon S3 Resources
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html) in the
+// Amazon Simple Storage Service Developer Guide. Special Errors
+//
+// * HTTP 400 Bad
+// Request Error
+//
+// * Code: InvalidArgument
+//
+// * Cause: Invalid Argument
+//
+// * HTTP 400
+// Bad Request Error
+//
+// * Code: TooManyConfigurations
+//
+// * Cause: You are attempting to
+// create a new configuration but have already reached the 1,000-configuration
+// limit.
+//
+// * HTTP 403 Forbidden Error
+//
+// * Code: AccessDenied
+//
+// * Cause: You are not
+// the owner of the specified bucket, or you do not have the
+// s3:PutInventoryConfiguration bucket permission to set the configuration on the
+// bucket.
+//
+// Related Resources
+//
+// * GetBucketInventoryConfiguration
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketInventoryConfiguration.html)
+//
+// *
+// DeleteBucketInventoryConfiguration
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketInventoryConfiguration.html)
+//
+// *
+// ListBucketInventoryConfigurations
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketInventoryConfigurations.html)
+func (c *Client) PutBucketInventoryConfiguration(ctx context.Context, params *PutBucketInventoryConfigurationInput, optFns ...func(*Options)) (*PutBucketInventoryConfigurationOutput, error) {
+ if params == nil {
+ params = &PutBucketInventoryConfigurationInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "PutBucketInventoryConfiguration", params, optFns, addOperationPutBucketInventoryConfigurationMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*PutBucketInventoryConfigurationOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type PutBucketInventoryConfigurationInput struct {
+
+ // The name of the bucket where the inventory configuration will be stored.
+ //
+ // This member is required.
+ Bucket *string
+
+ // The ID used to identify the inventory configuration.
+ //
+ // This member is required.
+ Id *string
+
+ // Specifies the inventory configuration.
+ //
+ // This member is required.
+ InventoryConfiguration *types.InventoryConfiguration
+
+ // The account id of the expected bucket owner. If the bucket is owned by a
+ // different account, the request will fail with an HTTP 403 (Access Denied) error.
+ ExpectedBucketOwner *string
+}
+
+type PutBucketInventoryConfigurationOutput struct {
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+}
+
+func addOperationPutBucketInventoryConfigurationMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ err = stack.Serialize.Add(&awsRestxml_serializeOpPutBucketInventoryConfiguration{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsRestxml_deserializeOpPutBucketInventoryConfiguration{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
+ return err
+ }
+ if err = addRetryMiddlewares(stack, options); err != nil {
+ return err
+ }
+ if err = addHTTPSignerV4Middleware(stack, options); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addOpPutBucketInventoryConfigurationValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opPutBucketInventoryConfiguration(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addMetadataRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addPutBucketInventoryConfigurationUpdateEndpoint(stack, options); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil {
+ return err
+ }
+ if err = disableAcceptEncodingGzip(stack); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ return nil
+}
+
+func newServiceMetadataMiddleware_opPutBucketInventoryConfiguration(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ SigningName: "s3",
+ OperationName: "PutBucketInventoryConfiguration",
+ }
+}
+
+// getPutBucketInventoryConfigurationBucketMember returns a pointer to string
+// denoting a provided bucket member valueand a boolean indicating if the input has
+// a modeled bucket name,
+func getPutBucketInventoryConfigurationBucketMember(input interface{}) (*string, bool) {
+ in := input.(*PutBucketInventoryConfigurationInput)
+ if in.Bucket == nil {
+ return nil, false
+ }
+ return in.Bucket, true
+}
+func addPutBucketInventoryConfigurationUpdateEndpoint(stack *middleware.Stack, options Options) error {
+ return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{
+ Accessor: s3cust.UpdateEndpointParameterAccessor{
+ GetBucketFromInput: getPutBucketInventoryConfigurationBucketMember,
+ },
+ UsePathStyle: options.UsePathStyle,
+ UseAccelerate: options.UseAccelerate,
+ SupportsAccelerate: true,
+ EndpointResolver: options.EndpointResolver,
+ EndpointResolverOptions: options.EndpointOptions,
+ UseDualstack: options.UseDualstack,
+ UseARNRegion: options.UseARNRegion,
+ })
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketLifecycleConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketLifecycleConfiguration.go
new file mode 100644
index 000000000..dd554732f
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketLifecycleConfiguration.go
@@ -0,0 +1,223 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package s3
+
+import (
+ "context"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
+ s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations"
+ "github.com/aws/aws-sdk-go-v2/service/s3/types"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// Creates a new lifecycle configuration for the bucket or replaces an existing
+// lifecycle configuration. For information about lifecycle configuration, see
+// Managing Access Permissions to Your Amazon S3 Resources
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). Bucket
+// lifecycle configuration now supports specifying a lifecycle rule using an object
+// key name prefix, one or more object tags, or a combination of both. Accordingly,
+// this section describes the latest API. The previous version of the API supported
+// filtering based only on an object key name prefix, which is supported for
+// backward compatibility. For the related API description, see PutBucketLifecycle
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycle.html).
+// Rules You specify the lifecycle configuration in your request body. The
+// lifecycle configuration is specified as XML consisting of one or more rules.
+// Each rule consists of the following:
+//
+// * Filter identifying a subset of objects
+// to which the rule applies. The filter can be based on a key name prefix, object
+// tags, or a combination of both.
+//
+// * Status whether the rule is in effect.
+//
+// * One
+// or more lifecycle transition and expiration actions that you want Amazon S3 to
+// perform on the objects identified by the filter. If the state of your bucket is
+// versioning-enabled or versioning-suspended, you can have many versions of the
+// same object (one current version and zero or more noncurrent versions). Amazon
+// S3 provides predefined actions that you can specify for current and noncurrent
+// object versions.
+//
+// For more information, see Object Lifecycle Management
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html) and
+// Lifecycle Configuration Elements
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/intro-lifecycle-rules.html).
+// Permissions By default, all Amazon S3 resources are private, including buckets,
+// objects, and related subresources (for example, lifecycle configuration and
+// website configuration). Only the resource owner (that is, the AWS account that
+// created it) can access the resource. The resource owner can optionally grant
+// access permissions to others by writing an access policy. For this operation, a
+// user must get the s3:PutLifecycleConfiguration permission. You can also
+// explicitly deny permissions. Explicit deny also supersedes any other
+// permissions. If you want to block users or accounts from removing or deleting
+// objects from your bucket, you must deny them permissions for the following
+// actions:
+//
+// * s3:DeleteObject
+//
+// * s3:DeleteObjectVersion
+//
+// *
+// s3:PutLifecycleConfiguration
+//
+// For more information about permissions, see
+// Managing Access Permissions to Your Amazon S3 Resources
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). The
+// following are related to PutBucketLifecycleConfiguration:
+//
+// * Examples of
+// Lifecycle Configuration
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/lifecycle-configuration-examples.html)
+//
+// *
+// GetBucketLifecycleConfiguration
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycleConfiguration.html)
+//
+// *
+// DeleteBucketLifecycle
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketLifecycle.html)
+func (c *Client) PutBucketLifecycleConfiguration(ctx context.Context, params *PutBucketLifecycleConfigurationInput, optFns ...func(*Options)) (*PutBucketLifecycleConfigurationOutput, error) {
+ if params == nil {
+ params = &PutBucketLifecycleConfigurationInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "PutBucketLifecycleConfiguration", params, optFns, addOperationPutBucketLifecycleConfigurationMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*PutBucketLifecycleConfigurationOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type PutBucketLifecycleConfigurationInput struct {
+
+ // The name of the bucket for which to set the configuration.
+ //
+ // This member is required.
+ Bucket *string
+
+ // The account id of the expected bucket owner. If the bucket is owned by a
+ // different account, the request will fail with an HTTP 403 (Access Denied) error.
+ ExpectedBucketOwner *string
+
+ // Container for lifecycle rules. You can add as many as 1,000 rules.
+ LifecycleConfiguration *types.BucketLifecycleConfiguration
+}
+
+type PutBucketLifecycleConfigurationOutput struct {
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+}
+
+func addOperationPutBucketLifecycleConfigurationMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ err = stack.Serialize.Add(&awsRestxml_serializeOpPutBucketLifecycleConfiguration{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsRestxml_deserializeOpPutBucketLifecycleConfiguration{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
+ return err
+ }
+ if err = addRetryMiddlewares(stack, options); err != nil {
+ return err
+ }
+ if err = addHTTPSignerV4Middleware(stack, options); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addOpPutBucketLifecycleConfigurationValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opPutBucketLifecycleConfiguration(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addMetadataRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addPutBucketLifecycleConfigurationUpdateEndpoint(stack, options); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil {
+ return err
+ }
+ if err = disableAcceptEncodingGzip(stack); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddContentChecksumMiddleware(stack); err != nil {
+ return err
+ }
+ return nil
+}
+
+func newServiceMetadataMiddleware_opPutBucketLifecycleConfiguration(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ SigningName: "s3",
+ OperationName: "PutBucketLifecycleConfiguration",
+ }
+}
+
+// getPutBucketLifecycleConfigurationBucketMember returns a pointer to string
+// denoting a provided bucket member valueand a boolean indicating if the input has
+// a modeled bucket name,
+func getPutBucketLifecycleConfigurationBucketMember(input interface{}) (*string, bool) {
+ in := input.(*PutBucketLifecycleConfigurationInput)
+ if in.Bucket == nil {
+ return nil, false
+ }
+ return in.Bucket, true
+}
+func addPutBucketLifecycleConfigurationUpdateEndpoint(stack *middleware.Stack, options Options) error {
+ return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{
+ Accessor: s3cust.UpdateEndpointParameterAccessor{
+ GetBucketFromInput: getPutBucketLifecycleConfigurationBucketMember,
+ },
+ UsePathStyle: options.UsePathStyle,
+ UseAccelerate: options.UseAccelerate,
+ SupportsAccelerate: true,
+ EndpointResolver: options.EndpointResolver,
+ EndpointResolverOptions: options.EndpointOptions,
+ UseDualstack: options.UseDualstack,
+ UseARNRegion: options.UseARNRegion,
+ })
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketLogging.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketLogging.go
new file mode 100644
index 000000000..b9fc986ff
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketLogging.go
@@ -0,0 +1,210 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package s3
+
+import (
+ "context"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
+ s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations"
+ "github.com/aws/aws-sdk-go-v2/service/s3/types"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// Set the logging parameters for a bucket and to specify permissions for who can
+// view and modify the logging parameters. All logs are saved to buckets in the
+// same AWS Region as the source bucket. To set the logging status of a bucket, you
+// must be the bucket owner. The bucket owner is automatically granted FULL_CONTROL
+// to all logs. You use the Grantee request element to grant access to other
+// people. The Permissions request element specifies the kind of access the grantee
+// has to the logs. Grantee Values You can specify the person (grantee) to whom
+// you're assigning access rights (using request elements) in the following
+// ways:
+//
+// * By the person's ID: <>ID<><>GranteesEmail<> DisplayName is optional
+// and ignored in the request.
+//
+// * By Email address: <>Grantees@email.com<> The
+// grantee is resolved to the CanonicalUser and, in a response to a GET Object acl
+// request, appears as the CanonicalUser.
+//
+// * By URI:
+// <>http://acs.amazonaws.com/groups/global/AuthenticatedUsers<>
+//
+// To enable
+// logging, you use LoggingEnabled and its children request elements. To disable
+// logging, you use an empty BucketLoggingStatus request element: For more
+// information about server access logging, see Server Access Logging
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerLogs.html). For more
+// information about creating a bucket, see CreateBucket
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html). For
+// more information about returning the logging status of a bucket, see
+// GetBucketLogging
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLogging.html). The
+// following operations are related to PutBucketLogging:
+//
+// * PutObject
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html)
+//
+// *
+// DeleteBucket
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucket.html)
+//
+// *
+// CreateBucket
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html)
+//
+// *
+// GetBucketLogging
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLogging.html)
+func (c *Client) PutBucketLogging(ctx context.Context, params *PutBucketLoggingInput, optFns ...func(*Options)) (*PutBucketLoggingOutput, error) {
+ if params == nil {
+ params = &PutBucketLoggingInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "PutBucketLogging", params, optFns, addOperationPutBucketLoggingMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*PutBucketLoggingOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type PutBucketLoggingInput struct {
+
+ // The name of the bucket for which to set the logging parameters.
+ //
+ // This member is required.
+ Bucket *string
+
+ // Container for logging status information.
+ //
+ // This member is required.
+ BucketLoggingStatus *types.BucketLoggingStatus
+
+ // The MD5 hash of the PutBucketLogging request body. For requests made using the
+ // AWS Command Line Interface (CLI) or AWS SDKs, this field is calculated
+ // automatically.
+ ContentMD5 *string
+
+ // The account id of the expected bucket owner. If the bucket is owned by a
+ // different account, the request will fail with an HTTP 403 (Access Denied) error.
+ ExpectedBucketOwner *string
+}
+
+type PutBucketLoggingOutput struct {
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+}
+
+func addOperationPutBucketLoggingMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ err = stack.Serialize.Add(&awsRestxml_serializeOpPutBucketLogging{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsRestxml_deserializeOpPutBucketLogging{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
+ return err
+ }
+ if err = addRetryMiddlewares(stack, options); err != nil {
+ return err
+ }
+ if err = addHTTPSignerV4Middleware(stack, options); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addOpPutBucketLoggingValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opPutBucketLogging(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addMetadataRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addPutBucketLoggingUpdateEndpoint(stack, options); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil {
+ return err
+ }
+ if err = disableAcceptEncodingGzip(stack); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddContentChecksumMiddleware(stack); err != nil {
+ return err
+ }
+ return nil
+}
+
+func newServiceMetadataMiddleware_opPutBucketLogging(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ SigningName: "s3",
+ OperationName: "PutBucketLogging",
+ }
+}
+
+// getPutBucketLoggingBucketMember returns a pointer to string denoting a provided
+// bucket member valueand a boolean indicating if the input has a modeled bucket
+// name,
+func getPutBucketLoggingBucketMember(input interface{}) (*string, bool) {
+ in := input.(*PutBucketLoggingInput)
+ if in.Bucket == nil {
+ return nil, false
+ }
+ return in.Bucket, true
+}
+func addPutBucketLoggingUpdateEndpoint(stack *middleware.Stack, options Options) error {
+ return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{
+ Accessor: s3cust.UpdateEndpointParameterAccessor{
+ GetBucketFromInput: getPutBucketLoggingBucketMember,
+ },
+ UsePathStyle: options.UsePathStyle,
+ UseAccelerate: options.UseAccelerate,
+ SupportsAccelerate: true,
+ EndpointResolver: options.EndpointResolver,
+ EndpointResolverOptions: options.EndpointOptions,
+ UseDualstack: options.UseDualstack,
+ UseARNRegion: options.UseARNRegion,
+ })
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketMetricsConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketMetricsConfiguration.go
new file mode 100644
index 000000000..ea997182c
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketMetricsConfiguration.go
@@ -0,0 +1,200 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package s3
+
+import (
+ "context"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
+ s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations"
+ "github.com/aws/aws-sdk-go-v2/service/s3/types"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// Sets a metrics configuration (specified by the metrics configuration ID) for the
+// bucket. You can have up to 1,000 metrics configurations per bucket. If you're
+// updating an existing metrics configuration, note that this is a full replacement
+// of the existing metrics configuration. If you don't include the elements you
+// want to keep, they are erased. To use this operation, you must have permissions
+// to perform the s3:PutMetricsConfiguration action. The bucket owner has this
+// permission by default. The bucket owner can grant this permission to others. For
+// more information about permissions, see Permissions Related to Bucket
+// Subresource Operations
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources)
+// and Managing Access Permissions to Your Amazon S3 Resources
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). For
+// information about CloudWatch request metrics for Amazon S3, see Monitoring
+// Metrics with Amazon CloudWatch
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/cloudwatch-monitoring.html).
+// The following operations are related to PutBucketMetricsConfiguration:
+//
+// *
+// DeleteBucketMetricsConfiguration
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketMetricsConfiguration.html)
+//
+// *
+// PutBucketMetricsConfiguration
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketMetricsConfiguration.html)
+//
+// *
+// ListBucketMetricsConfigurations
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketMetricsConfigurations.html)
+//
+// GetBucketLifecycle
+// has the following special error:
+//
+// * Error code: TooManyConfigurations
+//
+// *
+// Description: You are attempting to create a new configuration but have already
+// reached the 1,000-configuration limit.
+//
+// * HTTP Status Code: HTTP 400 Bad Request
+func (c *Client) PutBucketMetricsConfiguration(ctx context.Context, params *PutBucketMetricsConfigurationInput, optFns ...func(*Options)) (*PutBucketMetricsConfigurationOutput, error) {
+ if params == nil {
+ params = &PutBucketMetricsConfigurationInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "PutBucketMetricsConfiguration", params, optFns, addOperationPutBucketMetricsConfigurationMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*PutBucketMetricsConfigurationOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type PutBucketMetricsConfigurationInput struct {
+
+ // The name of the bucket for which the metrics configuration is set.
+ //
+ // This member is required.
+ Bucket *string
+
+ // The ID used to identify the metrics configuration.
+ //
+ // This member is required.
+ Id *string
+
+ // Specifies the metrics configuration.
+ //
+ // This member is required.
+ MetricsConfiguration *types.MetricsConfiguration
+
+ // The account id of the expected bucket owner. If the bucket is owned by a
+ // different account, the request will fail with an HTTP 403 (Access Denied) error.
+ ExpectedBucketOwner *string
+}
+
+type PutBucketMetricsConfigurationOutput struct {
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+}
+
+func addOperationPutBucketMetricsConfigurationMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ err = stack.Serialize.Add(&awsRestxml_serializeOpPutBucketMetricsConfiguration{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsRestxml_deserializeOpPutBucketMetricsConfiguration{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
+ return err
+ }
+ if err = addRetryMiddlewares(stack, options); err != nil {
+ return err
+ }
+ if err = addHTTPSignerV4Middleware(stack, options); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addOpPutBucketMetricsConfigurationValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opPutBucketMetricsConfiguration(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addMetadataRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addPutBucketMetricsConfigurationUpdateEndpoint(stack, options); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil {
+ return err
+ }
+ if err = disableAcceptEncodingGzip(stack); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ return nil
+}
+
+func newServiceMetadataMiddleware_opPutBucketMetricsConfiguration(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ SigningName: "s3",
+ OperationName: "PutBucketMetricsConfiguration",
+ }
+}
+
+// getPutBucketMetricsConfigurationBucketMember returns a pointer to string
+// denoting a provided bucket member valueand a boolean indicating if the input has
+// a modeled bucket name,
+func getPutBucketMetricsConfigurationBucketMember(input interface{}) (*string, bool) {
+ in := input.(*PutBucketMetricsConfigurationInput)
+ if in.Bucket == nil {
+ return nil, false
+ }
+ return in.Bucket, true
+}
+func addPutBucketMetricsConfigurationUpdateEndpoint(stack *middleware.Stack, options Options) error {
+ return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{
+ Accessor: s3cust.UpdateEndpointParameterAccessor{
+ GetBucketFromInput: getPutBucketMetricsConfigurationBucketMember,
+ },
+ UsePathStyle: options.UsePathStyle,
+ UseAccelerate: options.UseAccelerate,
+ SupportsAccelerate: true,
+ EndpointResolver: options.EndpointResolver,
+ EndpointResolverOptions: options.EndpointOptions,
+ UseDualstack: options.UseDualstack,
+ UseARNRegion: options.UseARNRegion,
+ })
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketNotificationConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketNotificationConfiguration.go
new file mode 100644
index 000000000..f63982222
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketNotificationConfiguration.go
@@ -0,0 +1,192 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package s3
+
+import (
+ "context"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
+ s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations"
+ "github.com/aws/aws-sdk-go-v2/service/s3/types"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// Enables notifications of specified events for a bucket. For more information
+// about event notifications, see Configuring Event Notifications
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html). Using
+// this API, you can replace an existing notification configuration. The
+// configuration is an XML file that defines the event types that you want Amazon
+// S3 to publish and the destination where you want Amazon S3 to publish an event
+// notification when it detects an event of the specified type. By default, your
+// bucket has no event notifications configured. That is, the notification
+// configuration will be an empty NotificationConfiguration. This operation
+// replaces the existing notification configuration with the configuration you
+// include in the request body. After Amazon S3 receives this request, it first
+// verifies that any Amazon Simple Notification Service (Amazon SNS) or Amazon
+// Simple Queue Service (Amazon SQS) destination exists, and that the bucket owner
+// has permission to publish to it by sending a test notification. In the case of
+// AWS Lambda destinations, Amazon S3 verifies that the Lambda function permissions
+// grant Amazon S3 permission to invoke the function from the Amazon S3 bucket. For
+// more information, see Configuring Notifications for Amazon S3 Events
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html). You
+// can disable notifications by adding the empty NotificationConfiguration element.
+// By default, only the bucket owner can configure notifications on a bucket.
+// However, bucket owners can use a bucket policy to grant permission to other
+// users to set this configuration with s3:PutBucketNotification permission. The
+// PUT notification is an atomic operation. For example, suppose your notification
+// configuration includes SNS topic, SQS queue, and Lambda function configurations.
+// When you send a PUT request with this configuration, Amazon S3 sends test
+// messages to your SNS topic. If the message fails, the entire PUT operation will
+// fail, and Amazon S3 will not add the configuration to your bucket. Responses If
+// the configuration in the request body includes only one TopicConfiguration
+// specifying only the s3:ReducedRedundancyLostObject event type, the response will
+// also include the x-amz-sns-test-message-id header containing the message ID of
+// the test notification sent to the topic. The following operation is related to
+// PutBucketNotificationConfiguration:
+//
+// * GetBucketNotificationConfiguration
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketNotificationConfiguration.html)
+func (c *Client) PutBucketNotificationConfiguration(ctx context.Context, params *PutBucketNotificationConfigurationInput, optFns ...func(*Options)) (*PutBucketNotificationConfigurationOutput, error) {
+ if params == nil {
+ params = &PutBucketNotificationConfigurationInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "PutBucketNotificationConfiguration", params, optFns, addOperationPutBucketNotificationConfigurationMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*PutBucketNotificationConfigurationOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type PutBucketNotificationConfigurationInput struct {
+
+ // The name of the bucket.
+ //
+ // This member is required.
+ Bucket *string
+
+ // A container for specifying the notification configuration of the bucket. If this
+ // element is empty, notifications are turned off for the bucket.
+ //
+ // This member is required.
+ NotificationConfiguration *types.NotificationConfiguration
+
+ // The account id of the expected bucket owner. If the bucket is owned by a
+ // different account, the request will fail with an HTTP 403 (Access Denied) error.
+ ExpectedBucketOwner *string
+}
+
+type PutBucketNotificationConfigurationOutput struct {
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+}
+
+func addOperationPutBucketNotificationConfigurationMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ err = stack.Serialize.Add(&awsRestxml_serializeOpPutBucketNotificationConfiguration{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsRestxml_deserializeOpPutBucketNotificationConfiguration{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
+ return err
+ }
+ if err = addRetryMiddlewares(stack, options); err != nil {
+ return err
+ }
+ if err = addHTTPSignerV4Middleware(stack, options); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addOpPutBucketNotificationConfigurationValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opPutBucketNotificationConfiguration(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addMetadataRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addPutBucketNotificationConfigurationUpdateEndpoint(stack, options); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil {
+ return err
+ }
+ if err = disableAcceptEncodingGzip(stack); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ return nil
+}
+
+func newServiceMetadataMiddleware_opPutBucketNotificationConfiguration(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ SigningName: "s3",
+ OperationName: "PutBucketNotificationConfiguration",
+ }
+}
+
+// getPutBucketNotificationConfigurationBucketMember returns a pointer to string
+// denoting a provided bucket member valueand a boolean indicating if the input has
+// a modeled bucket name,
+func getPutBucketNotificationConfigurationBucketMember(input interface{}) (*string, bool) {
+ in := input.(*PutBucketNotificationConfigurationInput)
+ if in.Bucket == nil {
+ return nil, false
+ }
+ return in.Bucket, true
+}
+func addPutBucketNotificationConfigurationUpdateEndpoint(stack *middleware.Stack, options Options) error {
+ return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{
+ Accessor: s3cust.UpdateEndpointParameterAccessor{
+ GetBucketFromInput: getPutBucketNotificationConfigurationBucketMember,
+ },
+ UsePathStyle: options.UsePathStyle,
+ UseAccelerate: options.UseAccelerate,
+ SupportsAccelerate: true,
+ EndpointResolver: options.EndpointResolver,
+ EndpointResolverOptions: options.EndpointOptions,
+ UseDualstack: options.UseDualstack,
+ UseARNRegion: options.UseARNRegion,
+ })
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketOwnershipControls.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketOwnershipControls.go
new file mode 100644
index 000000000..90132d8a3
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketOwnershipControls.go
@@ -0,0 +1,177 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package s3
+
+import (
+ "context"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
+ s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations"
+ "github.com/aws/aws-sdk-go-v2/service/s3/types"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// Creates or modifies OwnershipControls for an Amazon S3 bucket. To use this
+// operation, you must have the s3:PutBucketOwnershipControls permission. For more
+// information about Amazon S3 permissions, see Specifying Permissions in a Policy
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html).
+// For information about Amazon S3 Object Ownership, see Using Object Ownership
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/about-object-ownership.html).
+// The following operations are related to PutBucketOwnershipControls:
+//
+// *
+// GetBucketOwnershipControls
+//
+// * DeleteBucketOwnershipControls
+func (c *Client) PutBucketOwnershipControls(ctx context.Context, params *PutBucketOwnershipControlsInput, optFns ...func(*Options)) (*PutBucketOwnershipControlsOutput, error) {
+ if params == nil {
+ params = &PutBucketOwnershipControlsInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "PutBucketOwnershipControls", params, optFns, addOperationPutBucketOwnershipControlsMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*PutBucketOwnershipControlsOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type PutBucketOwnershipControlsInput struct {
+
+ // The name of the Amazon S3 bucket whose OwnershipControls you want to set.
+ //
+ // This member is required.
+ Bucket *string
+
+ // The OwnershipControls (BucketOwnerPreferred or ObjectWriter) that you want to
+ // apply to this Amazon S3 bucket.
+ //
+ // This member is required.
+ OwnershipControls *types.OwnershipControls
+
+ // The MD5 hash of the OwnershipControls request body. For requests made using the
+ // AWS Command Line Interface (CLI) or AWS SDKs, this field is calculated
+ // automatically.
+ ContentMD5 *string
+
+ // The account id of the expected bucket owner. If the bucket is owned by a
+ // different account, the request will fail with an HTTP 403 (Access Denied) error.
+ ExpectedBucketOwner *string
+}
+
+type PutBucketOwnershipControlsOutput struct {
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+}
+
+func addOperationPutBucketOwnershipControlsMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ err = stack.Serialize.Add(&awsRestxml_serializeOpPutBucketOwnershipControls{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsRestxml_deserializeOpPutBucketOwnershipControls{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
+ return err
+ }
+ if err = addRetryMiddlewares(stack, options); err != nil {
+ return err
+ }
+ if err = addHTTPSignerV4Middleware(stack, options); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addOpPutBucketOwnershipControlsValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opPutBucketOwnershipControls(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addMetadataRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addPutBucketOwnershipControlsUpdateEndpoint(stack, options); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil {
+ return err
+ }
+ if err = disableAcceptEncodingGzip(stack); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddContentChecksumMiddleware(stack); err != nil {
+ return err
+ }
+ return nil
+}
+
+func newServiceMetadataMiddleware_opPutBucketOwnershipControls(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ SigningName: "s3",
+ OperationName: "PutBucketOwnershipControls",
+ }
+}
+
+// getPutBucketOwnershipControlsBucketMember returns a pointer to string denoting a
+// provided bucket member valueand a boolean indicating if the input has a modeled
+// bucket name,
+func getPutBucketOwnershipControlsBucketMember(input interface{}) (*string, bool) {
+ in := input.(*PutBucketOwnershipControlsInput)
+ if in.Bucket == nil {
+ return nil, false
+ }
+ return in.Bucket, true
+}
+func addPutBucketOwnershipControlsUpdateEndpoint(stack *middleware.Stack, options Options) error {
+ return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{
+ Accessor: s3cust.UpdateEndpointParameterAccessor{
+ GetBucketFromInput: getPutBucketOwnershipControlsBucketMember,
+ },
+ UsePathStyle: options.UsePathStyle,
+ UseAccelerate: options.UseAccelerate,
+ SupportsAccelerate: true,
+ EndpointResolver: options.EndpointResolver,
+ EndpointResolverOptions: options.EndpointOptions,
+ UseDualstack: options.UseDualstack,
+ UseARNRegion: options.UseARNRegion,
+ })
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketPolicy.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketPolicy.go
new file mode 100644
index 000000000..aadcfe841
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketPolicy.go
@@ -0,0 +1,186 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package s3
+
+import (
+ "context"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
+ s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// Applies an Amazon S3 bucket policy to an Amazon S3 bucket. If you are using an
+// identity other than the root user of the AWS account that owns the bucket, the
+// calling identity must have the PutBucketPolicy permissions on the specified
+// bucket and belong to the bucket owner's account in order to use this operation.
+// If you don't have PutBucketPolicy permissions, Amazon S3 returns a 403 Access
+// Denied error. If you have the correct permissions, but you're not using an
+// identity that belongs to the bucket owner's account, Amazon S3 returns a 405
+// Method Not Allowed error. As a security precaution, the root user of the AWS
+// account that owns a bucket can always use this operation, even if the policy
+// explicitly denies the root user the ability to perform this action. For more
+// information about bucket policies, see Using Bucket Policies and User Policies
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html). The
+// following operations are related to PutBucketPolicy:
+//
+// * CreateBucket
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html)
+//
+// *
+// DeleteBucket
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucket.html)
+func (c *Client) PutBucketPolicy(ctx context.Context, params *PutBucketPolicyInput, optFns ...func(*Options)) (*PutBucketPolicyOutput, error) {
+ if params == nil {
+ params = &PutBucketPolicyInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "PutBucketPolicy", params, optFns, addOperationPutBucketPolicyMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*PutBucketPolicyOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type PutBucketPolicyInput struct {
+
+ // The name of the bucket.
+ //
+ // This member is required.
+ Bucket *string
+
+ // The bucket policy as a JSON document.
+ //
+ // This member is required.
+ Policy *string
+
+ // Set this parameter to true to confirm that you want to remove your permissions
+ // to change this bucket policy in the future.
+ ConfirmRemoveSelfBucketAccess bool
+
+ // The MD5 hash of the request body. For requests made using the AWS Command Line
+ // Interface (CLI) or AWS SDKs, this field is calculated automatically.
+ ContentMD5 *string
+
+ // The account id of the expected bucket owner. If the bucket is owned by a
+ // different account, the request will fail with an HTTP 403 (Access Denied) error.
+ ExpectedBucketOwner *string
+}
+
+type PutBucketPolicyOutput struct {
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+}
+
+func addOperationPutBucketPolicyMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ err = stack.Serialize.Add(&awsRestxml_serializeOpPutBucketPolicy{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsRestxml_deserializeOpPutBucketPolicy{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
+ return err
+ }
+ if err = addRetryMiddlewares(stack, options); err != nil {
+ return err
+ }
+ if err = addHTTPSignerV4Middleware(stack, options); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addOpPutBucketPolicyValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opPutBucketPolicy(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addMetadataRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addPutBucketPolicyUpdateEndpoint(stack, options); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil {
+ return err
+ }
+ if err = disableAcceptEncodingGzip(stack); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddContentChecksumMiddleware(stack); err != nil {
+ return err
+ }
+ return nil
+}
+
+func newServiceMetadataMiddleware_opPutBucketPolicy(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ SigningName: "s3",
+ OperationName: "PutBucketPolicy",
+ }
+}
+
+// getPutBucketPolicyBucketMember returns a pointer to string denoting a provided
+// bucket member valueand a boolean indicating if the input has a modeled bucket
+// name,
+func getPutBucketPolicyBucketMember(input interface{}) (*string, bool) {
+ in := input.(*PutBucketPolicyInput)
+ if in.Bucket == nil {
+ return nil, false
+ }
+ return in.Bucket, true
+}
+func addPutBucketPolicyUpdateEndpoint(stack *middleware.Stack, options Options) error {
+ return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{
+ Accessor: s3cust.UpdateEndpointParameterAccessor{
+ GetBucketFromInput: getPutBucketPolicyBucketMember,
+ },
+ UsePathStyle: options.UsePathStyle,
+ UseAccelerate: options.UseAccelerate,
+ SupportsAccelerate: true,
+ EndpointResolver: options.EndpointResolver,
+ EndpointResolverOptions: options.EndpointOptions,
+ UseDualstack: options.UseDualstack,
+ UseARNRegion: options.UseARNRegion,
+ })
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketReplication.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketReplication.go
new file mode 100644
index 000000000..a44af9b83
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketReplication.go
@@ -0,0 +1,221 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package s3
+
+import (
+ "context"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
+ s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations"
+ "github.com/aws/aws-sdk-go-v2/service/s3/types"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// Creates a replication configuration or replaces an existing one. For more
+// information, see Replication
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication.html) in the Amazon
+// S3 Developer Guide. To perform this operation, the user or role performing the
+// operation must have the iam:PassRole
+// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use_passrole.html)
+// permission. Specify the replication configuration in the request body. In the
+// replication configuration, you provide the name of the destination bucket or
+// buckets where you want Amazon S3 to replicate objects, the IAM role that Amazon
+// S3 can assume to replicate objects on your behalf, and other relevant
+// information. A replication configuration must include at least one rule, and can
+// contain a maximum of 1,000. Each rule identifies a subset of objects to
+// replicate by filtering the objects in the source bucket. To choose additional
+// subsets of objects to replicate, add a rule for each subset. To specify a subset
+// of the objects in the source bucket to apply a replication rule to, add the
+// Filter element as a child of the Rule element. You can filter objects based on
+// an object key prefix, one or more object tags, or both. When you add the Filter
+// element in the configuration, you must also add the following elements:
+// DeleteMarkerReplication, Status, and Priority. If you are using an earlier
+// version of the replication configuration, Amazon S3 handles replication of
+// delete markers differently. For more information, see Backward Compatibility
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html#replication-backward-compat-considerations).
+// For information about enabling versioning on a bucket, see Using Versioning
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/Versioning.html). By default, a
+// resource owner, in this case the AWS account that created the bucket, can
+// perform this operation. The resource owner can also grant others permissions to
+// perform the operation. For more information about permissions, see Specifying
+// Permissions in a Policy
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html) and
+// Managing Access Permissions to Your Amazon S3 Resources
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html).
+// Handling Replication of Encrypted Objects By default, Amazon S3 doesn't
+// replicate objects that are stored at rest using server-side encryption with CMKs
+// stored in AWS KMS. To replicate AWS KMS-encrypted objects, add the following:
+// SourceSelectionCriteria, SseKmsEncryptedObjects, Status,
+// EncryptionConfiguration, and ReplicaKmsKeyID. For information about replication
+// configuration, see Replicating Objects Created with SSE Using CMKs stored in AWS
+// KMS
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-config-for-kms-objects.html).
+// For information on PutBucketReplication errors, see List of replication-related
+// error codes
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ReplicationErrorCodeList)
+// The following operations are related to PutBucketReplication:
+//
+// *
+// GetBucketReplication
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketReplication.html)
+//
+// *
+// DeleteBucketReplication
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketReplication.html)
+func (c *Client) PutBucketReplication(ctx context.Context, params *PutBucketReplicationInput, optFns ...func(*Options)) (*PutBucketReplicationOutput, error) {
+ if params == nil {
+ params = &PutBucketReplicationInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "PutBucketReplication", params, optFns, addOperationPutBucketReplicationMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*PutBucketReplicationOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type PutBucketReplicationInput struct {
+
+ // The name of the bucket
+ //
+ // This member is required.
+ Bucket *string
+
+ // A container for replication rules. You can add up to 1,000 rules. The maximum
+ // size of a replication configuration is 2 MB.
+ //
+ // This member is required.
+ ReplicationConfiguration *types.ReplicationConfiguration
+
+ // The base64-encoded 128-bit MD5 digest of the data. You must use this header as a
+ // message integrity check to verify that the request body was not corrupted in
+ // transit. For more information, see RFC 1864
+ // (http://www.ietf.org/rfc/rfc1864.txt). For requests made using the AWS Command
+ // Line Interface (CLI) or AWS SDKs, this field is calculated automatically.
+ ContentMD5 *string
+
+ // The account id of the expected bucket owner. If the bucket is owned by a
+ // different account, the request will fail with an HTTP 403 (Access Denied) error.
+ ExpectedBucketOwner *string
+
+ // A token to allow Object Lock to be enabled for an existing bucket.
+ Token *string
+}
+
+type PutBucketReplicationOutput struct {
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+}
+
+func addOperationPutBucketReplicationMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ err = stack.Serialize.Add(&awsRestxml_serializeOpPutBucketReplication{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsRestxml_deserializeOpPutBucketReplication{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
+ return err
+ }
+ if err = addRetryMiddlewares(stack, options); err != nil {
+ return err
+ }
+ if err = addHTTPSignerV4Middleware(stack, options); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addOpPutBucketReplicationValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opPutBucketReplication(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addMetadataRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addPutBucketReplicationUpdateEndpoint(stack, options); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil {
+ return err
+ }
+ if err = disableAcceptEncodingGzip(stack); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddContentChecksumMiddleware(stack); err != nil {
+ return err
+ }
+ return nil
+}
+
+func newServiceMetadataMiddleware_opPutBucketReplication(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ SigningName: "s3",
+ OperationName: "PutBucketReplication",
+ }
+}
+
+// getPutBucketReplicationBucketMember returns a pointer to string denoting a
+// provided bucket member valueand a boolean indicating if the input has a modeled
+// bucket name,
+func getPutBucketReplicationBucketMember(input interface{}) (*string, bool) {
+ in := input.(*PutBucketReplicationInput)
+ if in.Bucket == nil {
+ return nil, false
+ }
+ return in.Bucket, true
+}
+func addPutBucketReplicationUpdateEndpoint(stack *middleware.Stack, options Options) error {
+ return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{
+ Accessor: s3cust.UpdateEndpointParameterAccessor{
+ GetBucketFromInput: getPutBucketReplicationBucketMember,
+ },
+ UsePathStyle: options.UsePathStyle,
+ UseAccelerate: options.UseAccelerate,
+ SupportsAccelerate: true,
+ EndpointResolver: options.EndpointResolver,
+ EndpointResolverOptions: options.EndpointOptions,
+ UseDualstack: options.UseDualstack,
+ UseARNRegion: options.UseARNRegion,
+ })
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketRequestPayment.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketRequestPayment.go
new file mode 100644
index 000000000..b486030c8
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketRequestPayment.go
@@ -0,0 +1,179 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package s3
+
+import (
+ "context"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
+ s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations"
+ "github.com/aws/aws-sdk-go-v2/service/s3/types"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// Sets the request payment configuration for a bucket. By default, the bucket
+// owner pays for downloads from the bucket. This configuration parameter enables
+// the bucket owner (only) to specify that the person requesting the download will
+// be charged for the download. For more information, see Requester Pays Buckets
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/RequesterPaysBuckets.html). The
+// following operations are related to PutBucketRequestPayment:
+//
+// * CreateBucket
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html)
+//
+// *
+// GetBucketRequestPayment
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketRequestPayment.html)
+func (c *Client) PutBucketRequestPayment(ctx context.Context, params *PutBucketRequestPaymentInput, optFns ...func(*Options)) (*PutBucketRequestPaymentOutput, error) {
+ if params == nil {
+ params = &PutBucketRequestPaymentInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "PutBucketRequestPayment", params, optFns, addOperationPutBucketRequestPaymentMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*PutBucketRequestPaymentOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type PutBucketRequestPaymentInput struct {
+
+ // The bucket name.
+ //
+ // This member is required.
+ Bucket *string
+
+ // Container for Payer.
+ //
+ // This member is required.
+ RequestPaymentConfiguration *types.RequestPaymentConfiguration
+
+ // >The base64-encoded 128-bit MD5 digest of the data. You must use this header as
+ // a message integrity check to verify that the request body was not corrupted in
+ // transit. For more information, see RFC 1864
+ // (http://www.ietf.org/rfc/rfc1864.txt). For requests made using the AWS Command
+ // Line Interface (CLI) or AWS SDKs, this field is calculated automatically.
+ ContentMD5 *string
+
+ // The account id of the expected bucket owner. If the bucket is owned by a
+ // different account, the request will fail with an HTTP 403 (Access Denied) error.
+ ExpectedBucketOwner *string
+}
+
+type PutBucketRequestPaymentOutput struct {
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+}
+
+func addOperationPutBucketRequestPaymentMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ err = stack.Serialize.Add(&awsRestxml_serializeOpPutBucketRequestPayment{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsRestxml_deserializeOpPutBucketRequestPayment{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
+ return err
+ }
+ if err = addRetryMiddlewares(stack, options); err != nil {
+ return err
+ }
+ if err = addHTTPSignerV4Middleware(stack, options); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addOpPutBucketRequestPaymentValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opPutBucketRequestPayment(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addMetadataRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addPutBucketRequestPaymentUpdateEndpoint(stack, options); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil {
+ return err
+ }
+ if err = disableAcceptEncodingGzip(stack); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddContentChecksumMiddleware(stack); err != nil {
+ return err
+ }
+ return nil
+}
+
+func newServiceMetadataMiddleware_opPutBucketRequestPayment(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ SigningName: "s3",
+ OperationName: "PutBucketRequestPayment",
+ }
+}
+
+// getPutBucketRequestPaymentBucketMember returns a pointer to string denoting a
+// provided bucket member valueand a boolean indicating if the input has a modeled
+// bucket name,
+func getPutBucketRequestPaymentBucketMember(input interface{}) (*string, bool) {
+ in := input.(*PutBucketRequestPaymentInput)
+ if in.Bucket == nil {
+ return nil, false
+ }
+ return in.Bucket, true
+}
+func addPutBucketRequestPaymentUpdateEndpoint(stack *middleware.Stack, options Options) error {
+ return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{
+ Accessor: s3cust.UpdateEndpointParameterAccessor{
+ GetBucketFromInput: getPutBucketRequestPaymentBucketMember,
+ },
+ UsePathStyle: options.UsePathStyle,
+ UseAccelerate: options.UseAccelerate,
+ SupportsAccelerate: true,
+ EndpointResolver: options.EndpointResolver,
+ EndpointResolverOptions: options.EndpointOptions,
+ UseDualstack: options.UseDualstack,
+ UseARNRegion: options.UseARNRegion,
+ })
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketTagging.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketTagging.go
new file mode 100644
index 000000000..33312209b
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketTagging.go
@@ -0,0 +1,223 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package s3
+
+import (
+ "context"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
+ s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations"
+ "github.com/aws/aws-sdk-go-v2/service/s3/types"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// Sets the tags for a bucket. Use tags to organize your AWS bill to reflect your
+// own cost structure. To do this, sign up to get your AWS account bill with tag
+// key values included. Then, to see the cost of combined resources, organize your
+// billing information according to resources with the same tag key values. For
+// example, you can tag several resources with a specific application name, and
+// then organize your billing information to see the total cost of that application
+// across several services. For more information, see Cost Allocation and Tagging
+// (https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/cost-alloc-tags.html).
+// Within a bucket, if you add a tag that has the same key as an existing tag, the
+// new value overwrites the old value. For more information, see Using Cost
+// Allocation in Amazon S3 Bucket Tags
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/CostAllocTagging.html). To use
+// this operation, you must have permissions to perform the s3:PutBucketTagging
+// action. The bucket owner has this permission by default and can grant this
+// permission to others. For more information about permissions, see Permissions
+// Related to Bucket Subresource Operations
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources)
+// and Managing Access Permissions to Your Amazon S3 Resources
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html).
+// PutBucketTagging has the following special errors:
+//
+// * Error code:
+// InvalidTagError
+//
+// * Description: The tag provided was not a valid tag. This error
+// can occur if the tag did not pass input validation. For information about tag
+// restrictions, see User-Defined Tag Restrictions
+// (https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/allocation-tag-restrictions.html)
+// and AWS-Generated Cost Allocation Tag Restrictions
+// (https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/aws-tag-restrictions.html).
+//
+// *
+// Error code: MalformedXMLError
+//
+// * Description: The XML provided does not match
+// the schema.
+//
+// * Error code: OperationAbortedError
+//
+// * Description: A conflicting
+// conditional operation is currently in progress against this resource. Please try
+// again.
+//
+// * Error code: InternalError
+//
+// * Description: The service was unable to
+// apply the provided tag to the bucket.
+//
+// The following operations are related to
+// PutBucketTagging:
+//
+// * GetBucketTagging
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketTagging.html)
+//
+// *
+// DeleteBucketTagging
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketTagging.html)
+func (c *Client) PutBucketTagging(ctx context.Context, params *PutBucketTaggingInput, optFns ...func(*Options)) (*PutBucketTaggingOutput, error) {
+ if params == nil {
+ params = &PutBucketTaggingInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "PutBucketTagging", params, optFns, addOperationPutBucketTaggingMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*PutBucketTaggingOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type PutBucketTaggingInput struct {
+
+ // The bucket name.
+ //
+ // This member is required.
+ Bucket *string
+
+ // Container for the TagSet and Tag elements.
+ //
+ // This member is required.
+ Tagging *types.Tagging
+
+ // The base64-encoded 128-bit MD5 digest of the data. You must use this header as a
+ // message integrity check to verify that the request body was not corrupted in
+ // transit. For more information, see RFC 1864
+ // (http://www.ietf.org/rfc/rfc1864.txt). For requests made using the AWS Command
+ // Line Interface (CLI) or AWS SDKs, this field is calculated automatically.
+ ContentMD5 *string
+
+ // The account id of the expected bucket owner. If the bucket is owned by a
+ // different account, the request will fail with an HTTP 403 (Access Denied) error.
+ ExpectedBucketOwner *string
+}
+
+type PutBucketTaggingOutput struct {
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+}
+
+func addOperationPutBucketTaggingMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ err = stack.Serialize.Add(&awsRestxml_serializeOpPutBucketTagging{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsRestxml_deserializeOpPutBucketTagging{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
+ return err
+ }
+ if err = addRetryMiddlewares(stack, options); err != nil {
+ return err
+ }
+ if err = addHTTPSignerV4Middleware(stack, options); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addOpPutBucketTaggingValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opPutBucketTagging(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addMetadataRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addPutBucketTaggingUpdateEndpoint(stack, options); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil {
+ return err
+ }
+ if err = disableAcceptEncodingGzip(stack); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddContentChecksumMiddleware(stack); err != nil {
+ return err
+ }
+ return nil
+}
+
+func newServiceMetadataMiddleware_opPutBucketTagging(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ SigningName: "s3",
+ OperationName: "PutBucketTagging",
+ }
+}
+
+// getPutBucketTaggingBucketMember returns a pointer to string denoting a provided
+// bucket member valueand a boolean indicating if the input has a modeled bucket
+// name,
+func getPutBucketTaggingBucketMember(input interface{}) (*string, bool) {
+ in := input.(*PutBucketTaggingInput)
+ if in.Bucket == nil {
+ return nil, false
+ }
+ return in.Bucket, true
+}
+func addPutBucketTaggingUpdateEndpoint(stack *middleware.Stack, options Options) error {
+ return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{
+ Accessor: s3cust.UpdateEndpointParameterAccessor{
+ GetBucketFromInput: getPutBucketTaggingBucketMember,
+ },
+ UsePathStyle: options.UsePathStyle,
+ UseAccelerate: options.UseAccelerate,
+ SupportsAccelerate: true,
+ EndpointResolver: options.EndpointResolver,
+ EndpointResolverOptions: options.EndpointOptions,
+ UseDualstack: options.UseDualstack,
+ UseARNRegion: options.UseARNRegion,
+ })
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketVersioning.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketVersioning.go
new file mode 100644
index 000000000..248d37928
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketVersioning.go
@@ -0,0 +1,201 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package s3
+
+import (
+ "context"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
+ s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations"
+ "github.com/aws/aws-sdk-go-v2/service/s3/types"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// Sets the versioning state of an existing bucket. To set the versioning state,
+// you must be the bucket owner. You can set the versioning state with one of the
+// following values: Enabled—Enables versioning for the objects in the bucket. All
+// objects added to the bucket receive a unique version ID. Suspended—Disables
+// versioning for the objects in the bucket. All objects added to the bucket
+// receive the version ID null. If the versioning state has never been set on a
+// bucket, it has no versioning state; a GetBucketVersioning
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketVersioning.html)
+// request does not return a versioning state value. If the bucket owner enables
+// MFA Delete in the bucket versioning configuration, the bucket owner must include
+// the x-amz-mfa request header and the Status and the MfaDelete request elements
+// in a request to set the versioning state of the bucket. If you have an object
+// expiration lifecycle policy in your non-versioned bucket and you want to
+// maintain the same permanent delete behavior when you enable versioning, you must
+// add a noncurrent expiration policy. The noncurrent expiration lifecycle policy
+// will manage the deletes of the noncurrent object versions in the version-enabled
+// bucket. (A version-enabled bucket maintains one current and zero or more
+// noncurrent object versions.) For more information, see Lifecycle and Versioning
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html#lifecycle-and-other-bucket-config).
+// Related Resources
+//
+// * CreateBucket
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html)
+//
+// *
+// DeleteBucket
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucket.html)
+//
+// *
+// GetBucketVersioning
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketVersioning.html)
+func (c *Client) PutBucketVersioning(ctx context.Context, params *PutBucketVersioningInput, optFns ...func(*Options)) (*PutBucketVersioningOutput, error) {
+ if params == nil {
+ params = &PutBucketVersioningInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "PutBucketVersioning", params, optFns, addOperationPutBucketVersioningMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*PutBucketVersioningOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type PutBucketVersioningInput struct {
+
+ // The bucket name.
+ //
+ // This member is required.
+ Bucket *string
+
+ // Container for setting the versioning state.
+ //
+ // This member is required.
+ VersioningConfiguration *types.VersioningConfiguration
+
+ // >The base64-encoded 128-bit MD5 digest of the data. You must use this header as
+ // a message integrity check to verify that the request body was not corrupted in
+ // transit. For more information, see RFC 1864
+ // (http://www.ietf.org/rfc/rfc1864.txt). For requests made using the AWS Command
+ // Line Interface (CLI) or AWS SDKs, this field is calculated automatically.
+ ContentMD5 *string
+
+ // The account id of the expected bucket owner. If the bucket is owned by a
+ // different account, the request will fail with an HTTP 403 (Access Denied) error.
+ ExpectedBucketOwner *string
+
+ // The concatenation of the authentication device's serial number, a space, and the
+ // value that is displayed on your authentication device.
+ MFA *string
+}
+
+type PutBucketVersioningOutput struct {
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+}
+
+func addOperationPutBucketVersioningMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ err = stack.Serialize.Add(&awsRestxml_serializeOpPutBucketVersioning{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsRestxml_deserializeOpPutBucketVersioning{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
+ return err
+ }
+ if err = addRetryMiddlewares(stack, options); err != nil {
+ return err
+ }
+ if err = addHTTPSignerV4Middleware(stack, options); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addOpPutBucketVersioningValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opPutBucketVersioning(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addMetadataRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addPutBucketVersioningUpdateEndpoint(stack, options); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil {
+ return err
+ }
+ if err = disableAcceptEncodingGzip(stack); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddContentChecksumMiddleware(stack); err != nil {
+ return err
+ }
+ return nil
+}
+
+func newServiceMetadataMiddleware_opPutBucketVersioning(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ SigningName: "s3",
+ OperationName: "PutBucketVersioning",
+ }
+}
+
+// getPutBucketVersioningBucketMember returns a pointer to string denoting a
+// provided bucket member valueand a boolean indicating if the input has a modeled
+// bucket name,
+func getPutBucketVersioningBucketMember(input interface{}) (*string, bool) {
+ in := input.(*PutBucketVersioningInput)
+ if in.Bucket == nil {
+ return nil, false
+ }
+ return in.Bucket, true
+}
+func addPutBucketVersioningUpdateEndpoint(stack *middleware.Stack, options Options) error {
+ return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{
+ Accessor: s3cust.UpdateEndpointParameterAccessor{
+ GetBucketFromInput: getPutBucketVersioningBucketMember,
+ },
+ UsePathStyle: options.UsePathStyle,
+ UseAccelerate: options.UseAccelerate,
+ SupportsAccelerate: true,
+ EndpointResolver: options.EndpointResolver,
+ EndpointResolverOptions: options.EndpointOptions,
+ UseDualstack: options.UseDualstack,
+ UseARNRegion: options.UseARNRegion,
+ })
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketWebsite.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketWebsite.go
new file mode 100644
index 000000000..7b36f959d
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketWebsite.go
@@ -0,0 +1,237 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package s3
+
+import (
+ "context"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
+ s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations"
+ "github.com/aws/aws-sdk-go-v2/service/s3/types"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// Sets the configuration of the website that is specified in the website
+// subresource. To configure a bucket as a website, you can add this subresource on
+// the bucket with website configuration information such as the file name of the
+// index document and any redirect rules. For more information, see Hosting
+// Websites on Amazon S3
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteHosting.html). This PUT
+// operation requires the S3:PutBucketWebsite permission. By default, only the
+// bucket owner can configure the website attached to a bucket; however, bucket
+// owners can allow other users to set the website configuration by writing a
+// bucket policy that grants them the S3:PutBucketWebsite permission. To redirect
+// all website requests sent to the bucket's website endpoint, you add a website
+// configuration with the following elements. Because all requests are sent to
+// another website, you don't need to provide index document name for the
+// bucket.
+//
+// * WebsiteConfiguration
+//
+// * RedirectAllRequestsTo
+//
+// * HostName
+//
+// *
+// Protocol
+//
+// If you want granular control over redirects, you can use the following
+// elements to add routing rules that describe conditions for redirecting requests
+// and information about the redirect destination. In this case, the website
+// configuration must provide an index document for the bucket, because some
+// requests might not be redirected.
+//
+// * WebsiteConfiguration
+//
+// * IndexDocument
+//
+// *
+// Suffix
+//
+// * ErrorDocument
+//
+// * Key
+//
+// * RoutingRules
+//
+// * RoutingRule
+//
+// * Condition
+//
+// *
+// HttpErrorCodeReturnedEquals
+//
+// * KeyPrefixEquals
+//
+// * Redirect
+//
+// * Protocol
+//
+// *
+// HostName
+//
+// * ReplaceKeyPrefixWith
+//
+// * ReplaceKeyWith
+//
+// * HttpRedirectCode
+//
+// Amazon
+// S3 has a limitation of 50 routing rules per website configuration. If you
+// require more than 50 routing rules, you can use object redirect. For more
+// information, see Configuring an Object Redirect
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/how-to-page-redirect.html) in
+// the Amazon Simple Storage Service Developer Guide.
+func (c *Client) PutBucketWebsite(ctx context.Context, params *PutBucketWebsiteInput, optFns ...func(*Options)) (*PutBucketWebsiteOutput, error) {
+ if params == nil {
+ params = &PutBucketWebsiteInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "PutBucketWebsite", params, optFns, addOperationPutBucketWebsiteMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*PutBucketWebsiteOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type PutBucketWebsiteInput struct {
+
+ // The bucket name.
+ //
+ // This member is required.
+ Bucket *string
+
+ // Container for the request.
+ //
+ // This member is required.
+ WebsiteConfiguration *types.WebsiteConfiguration
+
+ // The base64-encoded 128-bit MD5 digest of the data. You must use this header as a
+ // message integrity check to verify that the request body was not corrupted in
+ // transit. For more information, see RFC 1864
+ // (http://www.ietf.org/rfc/rfc1864.txt). For requests made using the AWS Command
+ // Line Interface (CLI) or AWS SDKs, this field is calculated automatically.
+ ContentMD5 *string
+
+ // The account id of the expected bucket owner. If the bucket is owned by a
+ // different account, the request will fail with an HTTP 403 (Access Denied) error.
+ ExpectedBucketOwner *string
+}
+
+type PutBucketWebsiteOutput struct {
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+}
+
+func addOperationPutBucketWebsiteMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ err = stack.Serialize.Add(&awsRestxml_serializeOpPutBucketWebsite{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsRestxml_deserializeOpPutBucketWebsite{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
+ return err
+ }
+ if err = addRetryMiddlewares(stack, options); err != nil {
+ return err
+ }
+ if err = addHTTPSignerV4Middleware(stack, options); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addOpPutBucketWebsiteValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opPutBucketWebsite(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addMetadataRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addPutBucketWebsiteUpdateEndpoint(stack, options); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil {
+ return err
+ }
+ if err = disableAcceptEncodingGzip(stack); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddContentChecksumMiddleware(stack); err != nil {
+ return err
+ }
+ return nil
+}
+
+func newServiceMetadataMiddleware_opPutBucketWebsite(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ SigningName: "s3",
+ OperationName: "PutBucketWebsite",
+ }
+}
+
+// getPutBucketWebsiteBucketMember returns a pointer to string denoting a provided
+// bucket member valueand a boolean indicating if the input has a modeled bucket
+// name,
+func getPutBucketWebsiteBucketMember(input interface{}) (*string, bool) {
+ in := input.(*PutBucketWebsiteInput)
+ if in.Bucket == nil {
+ return nil, false
+ }
+ return in.Bucket, true
+}
+func addPutBucketWebsiteUpdateEndpoint(stack *middleware.Stack, options Options) error {
+ return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{
+ Accessor: s3cust.UpdateEndpointParameterAccessor{
+ GetBucketFromInput: getPutBucketWebsiteBucketMember,
+ },
+ UsePathStyle: options.UsePathStyle,
+ UseAccelerate: options.UseAccelerate,
+ SupportsAccelerate: true,
+ EndpointResolver: options.EndpointResolver,
+ EndpointResolverOptions: options.EndpointOptions,
+ UseDualstack: options.UseDualstack,
+ UseARNRegion: options.UseARNRegion,
+ })
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObject.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObject.go
new file mode 100644
index 000000000..28badfb40
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObject.go
@@ -0,0 +1,477 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package s3
+
+import (
+ "context"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
+ awshttp "github.com/aws/aws-sdk-go-v2/aws/transport/http"
+ s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations"
+ "github.com/aws/aws-sdk-go-v2/service/s3/types"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+ "io"
+ "time"
+)
+
+// Adds an object to a bucket. You must have WRITE permissions on a bucket to add
+// an object to it. Amazon S3 never adds partial objects; if you receive a success
+// response, Amazon S3 added the entire object to the bucket. Amazon S3 is a
+// distributed system. If it receives multiple write requests for the same object
+// simultaneously, it overwrites all but the last object written. Amazon S3 does
+// not provide object locking; if you need this, make sure to build it into your
+// application layer or use versioning instead. To ensure that data is not
+// corrupted traversing the network, use the Content-MD5 header. When you use this
+// header, Amazon S3 checks the object against the provided MD5 value and, if they
+// do not match, returns an error. Additionally, you can calculate the MD5 while
+// putting an object to Amazon S3 and compare the returned ETag to the calculated
+// MD5 value. The Content-MD5 header is required for any request to upload an
+// object with a retention period configured using Amazon S3 Object Lock. For more
+// information about Amazon S3 Object Lock, see Amazon S3 Object Lock Overview
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock-overview.html) in
+// the Amazon Simple Storage Service Developer Guide. Server-side Encryption You
+// can optionally request server-side encryption. With server-side encryption,
+// Amazon S3 encrypts your data as it writes it to disks in its data centers and
+// decrypts the data when you access it. You have the option to provide your own
+// encryption key or use AWS managed encryption keys (SSE-S3 or SSE-KMS). For more
+// information, see Using Server-Side Encryption
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingServerSideEncryption.html).
+// If you request server-side encryption using AWS Key Management Service
+// (SSE-KMS), you can enable an S3 Bucket Key at the object-level. For more
+// information, see Amazon S3 Bucket Keys
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-key.html) in the Amazon
+// Simple Storage Service Developer Guide. Access Control List (ACL)-Specific
+// Request Headers You can use headers to grant ACL- based permissions. By default,
+// all objects are private. Only the owner has full access control. When adding a
+// new object, you can grant permissions to individual AWS accounts or to
+// predefined groups defined by Amazon S3. These permissions are then added to the
+// ACL on the object. For more information, see Access Control List (ACL) Overview
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html) and Managing
+// ACLs Using the REST API
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-using-rest-api.html).
+// Storage Class Options By default, Amazon S3 uses the STANDARD Storage Class to
+// store newly created objects. The STANDARD storage class provides high durability
+// and high availability. Depending on performance needs, you can specify a
+// different Storage Class. Amazon S3 on Outposts only uses the OUTPOSTS Storage
+// Class. For more information, see Storage Classes
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html) in
+// the Amazon S3 Service Developer Guide. Versioning If you enable versioning for a
+// bucket, Amazon S3 automatically generates a unique version ID for the object
+// being stored. Amazon S3 returns this ID in the response. When you enable
+// versioning for a bucket, if Amazon S3 receives multiple write requests for the
+// same object simultaneously, it stores all of the objects. For more information
+// about versioning, see Adding Objects to Versioning Enabled Buckets
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/AddingObjectstoVersioningEnabledBuckets.html).
+// For information about returning the versioning state of a bucket, see
+// GetBucketVersioning
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketVersioning.html).
+// Related Resources
+//
+// * CopyObject
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html)
+//
+// *
+// DeleteObject
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html)
+func (c *Client) PutObject(ctx context.Context, params *PutObjectInput, optFns ...func(*Options)) (*PutObjectOutput, error) {
+ if params == nil {
+ params = &PutObjectInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "PutObject", params, optFns, addOperationPutObjectMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*PutObjectOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type PutObjectInput struct {
+
+ // The bucket name to which the PUT operation was initiated. When using this API
+ // with an access point, you must direct requests to the access point hostname. The
+ // access point hostname takes the form
+ // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this
+ // operation with an access point through the AWS SDKs, you provide the access
+ // point ARN in place of the bucket name. For more information about access point
+ // ARNs, see Using Access Points
+ // (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) in
+ // the Amazon Simple Storage Service Developer Guide. When using this API with
+ // Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname.
+ // The S3 on Outposts hostname takes the form
+ // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using
+ // this operation using S3 on Outposts through the AWS SDKs, you provide the
+ // Outposts bucket ARN in place of the bucket name. For more information about S3
+ // on Outposts ARNs, see Using S3 on Outposts
+ // (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html) in the
+ // Amazon Simple Storage Service Developer Guide.
+ //
+ // This member is required.
+ Bucket *string
+
+ // Object key for which the PUT operation was initiated.
+ //
+ // This member is required.
+ Key *string
+
+ // The canned ACL to apply to the object. For more information, see Canned ACL
+ // (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL).
+ // This action is not supported by Amazon S3 on Outposts.
+ ACL types.ObjectCannedACL
+
+ // Object data.
+ Body io.Reader
+
+ // Specifies whether Amazon S3 should use an S3 Bucket Key for object encryption
+ // with server-side encryption using AWS KMS (SSE-KMS). Setting this header to true
+ // causes Amazon S3 to use an S3 Bucket Key for object encryption with SSE-KMS.
+ // Specifying this header with a PUT operation doesn’t affect bucket-level settings
+ // for S3 Bucket Key.
+ BucketKeyEnabled bool
+
+ // Can be used to specify caching behavior along the request/reply chain. For more
+ // information, see http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9
+ // (http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9).
+ CacheControl *string
+
+ // Specifies presentational information for the object. For more information, see
+ // http://www.w3.org/Protocols/rfc2616/rfc2616-sec19.html#sec19.5.1
+ // (http://www.w3.org/Protocols/rfc2616/rfc2616-sec19.html#sec19.5.1).
+ ContentDisposition *string
+
+ // Specifies what content encodings have been applied to the object and thus what
+ // decoding mechanisms must be applied to obtain the media-type referenced by the
+ // Content-Type header field. For more information, see
+ // http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.11
+ // (http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.11).
+ ContentEncoding *string
+
+ // The language the content is in.
+ ContentLanguage *string
+
+ // Size of the body in bytes. This parameter is useful when the size of the body
+ // cannot be determined automatically. For more information, see
+ // http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.13
+ // (http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.13).
+ ContentLength int64
+
+ // The base64-encoded 128-bit MD5 digest of the message (without the headers)
+ // according to RFC 1864. This header can be used as a message integrity check to
+ // verify that the data is the same data that was originally sent. Although it is
+ // optional, we recommend using the Content-MD5 mechanism as an end-to-end
+ // integrity check. For more information about REST request authentication, see
+ // REST Authentication
+ // (https://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html).
+ ContentMD5 *string
+
+ // A standard MIME type describing the format of the contents. For more
+ // information, see http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.17
+ // (http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.17).
+ ContentType *string
+
+ // The account id of the expected bucket owner. If the bucket is owned by a
+ // different account, the request will fail with an HTTP 403 (Access Denied) error.
+ ExpectedBucketOwner *string
+
+ // The date and time at which the object is no longer cacheable. For more
+ // information, see http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.21
+ // (http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.21).
+ Expires *time.Time
+
+ // Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object. This
+ // action is not supported by Amazon S3 on Outposts.
+ GrantFullControl *string
+
+ // Allows grantee to read the object data and its metadata. This action is not
+ // supported by Amazon S3 on Outposts.
+ GrantRead *string
+
+ // Allows grantee to read the object ACL. This action is not supported by Amazon S3
+ // on Outposts.
+ GrantReadACP *string
+
+ // Allows grantee to write the ACL for the applicable object. This action is not
+ // supported by Amazon S3 on Outposts.
+ GrantWriteACP *string
+
+ // A map of metadata to store with the object in S3.
+ Metadata map[string]string
+
+ // Specifies whether a legal hold will be applied to this object. For more
+ // information about S3 Object Lock, see Object Lock
+ // (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html).
+ ObjectLockLegalHoldStatus types.ObjectLockLegalHoldStatus
+
+ // The Object Lock mode that you want to apply to this object.
+ ObjectLockMode types.ObjectLockMode
+
+ // The date and time when you want this object's Object Lock to expire.
+ ObjectLockRetainUntilDate *time.Time
+
+ // Confirms that the requester knows that they will be charged for the request.
+ // Bucket owners need not specify this parameter in their requests. For information
+ // about downloading objects from requester pays buckets, see Downloading Objects
+ // in Requestor Pays Buckets
+ // (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html)
+ // in the Amazon S3 Developer Guide.
+ RequestPayer types.RequestPayer
+
+ // Specifies the algorithm to use to when encrypting the object (for example,
+ // AES256).
+ SSECustomerAlgorithm *string
+
+ // Specifies the customer-provided encryption key for Amazon S3 to use in
+ // encrypting data. This value is used to store the object and then it is
+ // discarded; Amazon S3 does not store the encryption key. The key must be
+ // appropriate for use with the algorithm specified in the
+ // x-amz-server-side-encryption-customer-algorithm header.
+ SSECustomerKey *string
+
+ // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321.
+ // Amazon S3 uses this header for a message integrity check to ensure that the
+ // encryption key was transmitted without error.
+ SSECustomerKeyMD5 *string
+
+ // Specifies the AWS KMS Encryption Context to use for object encryption. The value
+ // of this header is a base64-encoded UTF-8 string holding JSON with the encryption
+ // context key-value pairs.
+ SSEKMSEncryptionContext *string
+
+ // If x-amz-server-side-encryption is present and has the value of aws:kms, this
+ // header specifies the ID of the AWS Key Management Service (AWS KMS) symmetrical
+ // customer managed customer master key (CMK) that was used for the object. If the
+ // value of x-amz-server-side-encryption is aws:kms, this header specifies the ID
+ // of the symmetric customer managed AWS KMS CMK that will be used for the object.
+ // If you specify x-amz-server-side-encryption:aws:kms, but do not provide
+ // x-amz-server-side-encryption-aws-kms-key-id, Amazon S3 uses the AWS managed CMK
+ // in AWS to protect the data.
+ SSEKMSKeyId *string
+
+ // The server-side encryption algorithm used when storing this object in Amazon S3
+ // (for example, AES256, aws:kms).
+ ServerSideEncryption types.ServerSideEncryption
+
+ // By default, Amazon S3 uses the STANDARD Storage Class to store newly created
+ // objects. The STANDARD storage class provides high durability and high
+ // availability. Depending on performance needs, you can specify a different
+ // Storage Class. Amazon S3 on Outposts only uses the OUTPOSTS Storage Class. For
+ // more information, see Storage Classes
+ // (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html) in
+ // the Amazon S3 Service Developer Guide.
+ StorageClass types.StorageClass
+
+ // The tag-set for the object. The tag-set must be encoded as URL Query parameters.
+ // (For example, "Key1=Value1")
+ Tagging *string
+
+ // If the bucket is configured as a website, redirects requests for this object to
+ // another object in the same bucket or to an external URL. Amazon S3 stores the
+ // value of this header in the object metadata. For information about object
+ // metadata, see Object Key and Metadata
+ // (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMetadata.html). In the
+ // following example, the request header sets the redirect to an object
+ // (anotherPage.html) in the same bucket: x-amz-website-redirect-location:
+ // /anotherPage.html In the following example, the request header sets the object
+ // redirect to another website: x-amz-website-redirect-location:
+ // http://www.example.com/ For more information about website hosting in Amazon S3,
+ // see Hosting Websites on Amazon S3
+ // (https://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteHosting.html) and How to
+ // Configure Website Page Redirects
+ // (https://docs.aws.amazon.com/AmazonS3/latest/dev/how-to-page-redirect.html).
+ WebsiteRedirectLocation *string
+}
+
+type PutObjectOutput struct {
+
+ // Indicates whether the uploaded object uses an S3 Bucket Key for server-side
+ // encryption with AWS KMS (SSE-KMS).
+ BucketKeyEnabled bool
+
+ // Entity tag for the uploaded object.
+ ETag *string
+
+ // If the expiration is configured for the object (see
+ // PutBucketLifecycleConfiguration
+ // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html)),
+ // the response includes this header. It includes the expiry-date and rule-id
+ // key-value pairs that provide information about object expiration. The value of
+ // the rule-id is URL encoded.
+ Expiration *string
+
+ // If present, indicates that the requester was successfully charged for the
+ // request.
+ RequestCharged types.RequestCharged
+
+ // If server-side encryption with a customer-provided encryption key was requested,
+ // the response will include this header confirming the encryption algorithm used.
+ SSECustomerAlgorithm *string
+
+ // If server-side encryption with a customer-provided encryption key was requested,
+ // the response will include this header to provide round-trip message integrity
+ // verification of the customer-provided encryption key.
+ SSECustomerKeyMD5 *string
+
+ // If present, specifies the AWS KMS Encryption Context to use for object
+ // encryption. The value of this header is a base64-encoded UTF-8 string holding
+ // JSON with the encryption context key-value pairs.
+ SSEKMSEncryptionContext *string
+
+ // If x-amz-server-side-encryption is present and has the value of aws:kms, this
+ // header specifies the ID of the AWS Key Management Service (AWS KMS) symmetric
+ // customer managed customer master key (CMK) that was used for the object.
+ SSEKMSKeyId *string
+
+ // If you specified server-side encryption either with an AWS KMS customer master
+ // key (CMK) or Amazon S3-managed encryption key in your PUT request, the response
+ // includes this header. It confirms the encryption algorithm that Amazon S3 used
+ // to encrypt the object.
+ ServerSideEncryption types.ServerSideEncryption
+
+ // Version of the object.
+ VersionId *string
+
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+}
+
+func addOperationPutObjectMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ err = stack.Serialize.Add(&awsRestxml_serializeOpPutObject{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsRestxml_deserializeOpPutObject{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
+ return err
+ }
+ if err = addRetryMiddlewares(stack, options); err != nil {
+ return err
+ }
+ if err = addHTTPSignerV4Middleware(stack, options); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addOpPutObjectValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opPutObject(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addMetadataRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addPutObjectUpdateEndpoint(stack, options); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil {
+ return err
+ }
+ if err = disableAcceptEncodingGzip(stack); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ return nil
+}
+
+func newServiceMetadataMiddleware_opPutObject(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ SigningName: "s3",
+ OperationName: "PutObject",
+ }
+}
+
+// getPutObjectBucketMember returns a pointer to string denoting a provided bucket
+// member valueand a boolean indicating if the input has a modeled bucket name,
+func getPutObjectBucketMember(input interface{}) (*string, bool) {
+ in := input.(*PutObjectInput)
+ if in.Bucket == nil {
+ return nil, false
+ }
+ return in.Bucket, true
+}
+func addPutObjectUpdateEndpoint(stack *middleware.Stack, options Options) error {
+ return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{
+ Accessor: s3cust.UpdateEndpointParameterAccessor{
+ GetBucketFromInput: getPutObjectBucketMember,
+ },
+ UsePathStyle: options.UsePathStyle,
+ UseAccelerate: options.UseAccelerate,
+ SupportsAccelerate: true,
+ EndpointResolver: options.EndpointResolver,
+ EndpointResolverOptions: options.EndpointOptions,
+ UseDualstack: options.UseDualstack,
+ UseARNRegion: options.UseARNRegion,
+ })
+}
+
+// PresignPutObject is used to generate a presigned HTTP Request which contains
+// presigned URL, signed headers and HTTP method used.
+func (c *PresignClient) PresignPutObject(ctx context.Context, params *PutObjectInput, optFns ...func(*PresignOptions)) (*v4.PresignedHTTPRequest, error) {
+ if params == nil {
+ params = &PutObjectInput{}
+ }
+ options := c.options.copy()
+ for _, fn := range optFns {
+ fn(&options)
+ }
+ clientOptFns := append(options.ClientOptions, withNopHTTPClientAPIOption)
+
+ result, _, err := c.client.invokeOperation(ctx, "PutObject", params, clientOptFns,
+ addOperationPutObjectMiddlewares,
+ presignConverter(options).convertToPresignMiddleware,
+ func(stack *middleware.Stack, options Options) error {
+ return awshttp.RemoveContentTypeHeader(stack)
+ },
+ addPutObjectPayloadAsUnsigned,
+ )
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*v4.PresignedHTTPRequest)
+ return out, nil
+}
+
+func addPutObjectPayloadAsUnsigned(stack *middleware.Stack, options Options) error {
+ v4.RemoveContentSHA256HeaderMiddleware(stack)
+ v4.RemoveComputePayloadSHA256Middleware(stack)
+ return v4.AddUnsignedPayloadMiddleware(stack)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectAcl.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectAcl.go
new file mode 100644
index 000000000..1ce14c110
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectAcl.go
@@ -0,0 +1,356 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package s3
+
+import (
+ "context"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
+ s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations"
+ "github.com/aws/aws-sdk-go-v2/service/s3/types"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// Uses the acl subresource to set the access control list (ACL) permissions for a
+// new or existing object in an S3 bucket. You must have WRITE_ACP permission to
+// set the ACL of an object. For more information, see What permissions can I
+// grant?
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#permissions)
+// in the Amazon Simple Storage Service Developer Guide. This action is not
+// supported by Amazon S3 on Outposts. Depending on your application needs, you can
+// choose to set the ACL on an object using either the request body or the headers.
+// For example, if you have an existing application that updates a bucket ACL using
+// the request body, you can continue to use that approach. For more information,
+// see Access Control List (ACL) Overview
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html) in the
+// Amazon S3 Developer Guide. Access Permissions You can set access permissions
+// using one of the following methods:
+//
+// * Specify a canned ACL with the x-amz-acl
+// request header. Amazon S3 supports a set of predefined ACLs, known as canned
+// ACLs. Each canned ACL has a predefined set of grantees and permissions. Specify
+// the canned ACL name as the value of x-amz-acl. If you use this header, you
+// cannot use other access control-specific headers in your request. For more
+// information, see Canned ACL
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL).
+//
+// *
+// Specify access permissions explicitly with the x-amz-grant-read,
+// x-amz-grant-read-acp, x-amz-grant-write-acp, and x-amz-grant-full-control
+// headers. When using these headers, you specify explicit access permissions and
+// grantees (AWS accounts or Amazon S3 groups) who will receive the permission. If
+// you use these ACL-specific headers, you cannot use x-amz-acl header to set a
+// canned ACL. These parameters map to the set of permissions that Amazon S3
+// supports in an ACL. For more information, see Access Control List (ACL) Overview
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html). You specify
+// each grantee as a type=value pair, where the type is one of the following:
+//
+// * id
+// – if the value specified is the canonical user ID of an AWS account
+//
+// * uri – if
+// you are granting permissions to a predefined group
+//
+// * emailAddress – if the
+// value specified is the email address of an AWS account Using email addresses to
+// specify a grantee is only supported in the following AWS Regions:
+//
+// * US East (N.
+// Virginia)
+//
+// * US West (N. California)
+//
+// * US West (Oregon)
+//
+// * Asia Pacific
+// (Singapore)
+//
+// * Asia Pacific (Sydney)
+//
+// * Asia Pacific (Tokyo)
+//
+// * Europe
+// (Ireland)
+//
+// * South America (São Paulo)
+//
+// For a list of all the Amazon S3
+// supported Regions and endpoints, see Regions and Endpoints
+// (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) in the AWS
+// General Reference.
+//
+// For example, the following x-amz-grant-read header grants
+// list objects permission to the two AWS accounts identified by their email
+// addresses. x-amz-grant-read: emailAddress="xyz@amazon.com",
+// emailAddress="abc@amazon.com"
+//
+// You can use either a canned ACL or specify access
+// permissions explicitly. You cannot do both. Grantee Values You can specify the
+// person (grantee) to whom you're assigning access rights (using request elements)
+// in the following ways:
+//
+// * By the person's ID: <>ID<><>GranteesEmail<>
+// DisplayName is optional and ignored in the request.
+//
+// * By URI:
+// <>http://acs.amazonaws.com/groups/global/AuthenticatedUsers<>
+//
+// * By Email
+// address: <>Grantees@email.com<>lt;/Grantee> The grantee is resolved to the
+// CanonicalUser and, in a response to a GET Object acl request, appears as the
+// CanonicalUser. Using email addresses to specify a grantee is only supported in
+// the following AWS Regions:
+//
+// * US East (N. Virginia)
+//
+// * US West (N.
+// California)
+//
+// * US West (Oregon)
+//
+// * Asia Pacific (Singapore)
+//
+// * Asia Pacific
+// (Sydney)
+//
+// * Asia Pacific (Tokyo)
+//
+// * Europe (Ireland)
+//
+// * South America (São
+// Paulo)
+//
+// For a list of all the Amazon S3 supported Regions and endpoints, see
+// Regions and Endpoints
+// (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) in the AWS
+// General Reference.
+//
+// Versioning The ACL of an object is set at the object version
+// level. By default, PUT sets the ACL of the current version of an object. To set
+// the ACL of a different version, use the versionId subresource. Related
+// Resources
+//
+// * CopyObject
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html)
+//
+// *
+// GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html)
+func (c *Client) PutObjectAcl(ctx context.Context, params *PutObjectAclInput, optFns ...func(*Options)) (*PutObjectAclOutput, error) {
+ if params == nil {
+ params = &PutObjectAclInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "PutObjectAcl", params, optFns, addOperationPutObjectAclMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*PutObjectAclOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type PutObjectAclInput struct {
+
+ // The bucket name that contains the object to which you want to attach the ACL.
+ // When using this API with an access point, you must direct requests to the access
+ // point hostname. The access point hostname takes the form
+ // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this
+ // operation with an access point through the AWS SDKs, you provide the access
+ // point ARN in place of the bucket name. For more information about access point
+ // ARNs, see Using Access Points
+ // (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) in
+ // the Amazon Simple Storage Service Developer Guide.
+ //
+ // This member is required.
+ Bucket *string
+
+ // Key for which the PUT operation was initiated. When using this API with an
+ // access point, you must direct requests to the access point hostname. The access
+ // point hostname takes the form
+ // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this
+ // operation with an access point through the AWS SDKs, you provide the access
+ // point ARN in place of the bucket name. For more information about access point
+ // ARNs, see Using Access Points
+ // (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) in
+ // the Amazon Simple Storage Service Developer Guide. When using this API with
+ // Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname.
+ // The S3 on Outposts hostname takes the form
+ // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using
+ // this operation using S3 on Outposts through the AWS SDKs, you provide the
+ // Outposts bucket ARN in place of the bucket name. For more information about S3
+ // on Outposts ARNs, see Using S3 on Outposts
+ // (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html) in the
+ // Amazon Simple Storage Service Developer Guide.
+ //
+ // This member is required.
+ Key *string
+
+ // The canned ACL to apply to the object. For more information, see Canned ACL
+ // (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL).
+ ACL types.ObjectCannedACL
+
+ // Contains the elements that set the ACL permissions for an object per grantee.
+ AccessControlPolicy *types.AccessControlPolicy
+
+ // The base64-encoded 128-bit MD5 digest of the data. This header must be used as a
+ // message integrity check to verify that the request body was not corrupted in
+ // transit. For more information, go to RFC 1864.>
+ // (http://www.ietf.org/rfc/rfc1864.txt) For requests made using the AWS Command
+ // Line Interface (CLI) or AWS SDKs, this field is calculated automatically.
+ ContentMD5 *string
+
+ // The account id of the expected bucket owner. If the bucket is owned by a
+ // different account, the request will fail with an HTTP 403 (Access Denied) error.
+ ExpectedBucketOwner *string
+
+ // Allows grantee the read, write, read ACP, and write ACP permissions on the
+ // bucket. This action is not supported by Amazon S3 on Outposts.
+ GrantFullControl *string
+
+ // Allows grantee to list the objects in the bucket. This action is not supported
+ // by Amazon S3 on Outposts.
+ GrantRead *string
+
+ // Allows grantee to read the bucket ACL. This action is not supported by Amazon S3
+ // on Outposts.
+ GrantReadACP *string
+
+ // Allows grantee to create, overwrite, and delete any object in the bucket.
+ GrantWrite *string
+
+ // Allows grantee to write the ACL for the applicable bucket. This action is not
+ // supported by Amazon S3 on Outposts.
+ GrantWriteACP *string
+
+ // Confirms that the requester knows that they will be charged for the request.
+ // Bucket owners need not specify this parameter in their requests. For information
+ // about downloading objects from requester pays buckets, see Downloading Objects
+ // in Requestor Pays Buckets
+ // (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html)
+ // in the Amazon S3 Developer Guide.
+ RequestPayer types.RequestPayer
+
+ // VersionId used to reference a specific version of the object.
+ VersionId *string
+}
+
+type PutObjectAclOutput struct {
+
+ // If present, indicates that the requester was successfully charged for the
+ // request.
+ RequestCharged types.RequestCharged
+
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+}
+
+func addOperationPutObjectAclMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ err = stack.Serialize.Add(&awsRestxml_serializeOpPutObjectAcl{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsRestxml_deserializeOpPutObjectAcl{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
+ return err
+ }
+ if err = addRetryMiddlewares(stack, options); err != nil {
+ return err
+ }
+ if err = addHTTPSignerV4Middleware(stack, options); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addOpPutObjectAclValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opPutObjectAcl(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addMetadataRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addPutObjectAclUpdateEndpoint(stack, options); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil {
+ return err
+ }
+ if err = disableAcceptEncodingGzip(stack); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddContentChecksumMiddleware(stack); err != nil {
+ return err
+ }
+ return nil
+}
+
+func newServiceMetadataMiddleware_opPutObjectAcl(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ SigningName: "s3",
+ OperationName: "PutObjectAcl",
+ }
+}
+
+// getPutObjectAclBucketMember returns a pointer to string denoting a provided
+// bucket member valueand a boolean indicating if the input has a modeled bucket
+// name,
+func getPutObjectAclBucketMember(input interface{}) (*string, bool) {
+ in := input.(*PutObjectAclInput)
+ if in.Bucket == nil {
+ return nil, false
+ }
+ return in.Bucket, true
+}
+func addPutObjectAclUpdateEndpoint(stack *middleware.Stack, options Options) error {
+ return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{
+ Accessor: s3cust.UpdateEndpointParameterAccessor{
+ GetBucketFromInput: getPutObjectAclBucketMember,
+ },
+ UsePathStyle: options.UsePathStyle,
+ UseAccelerate: options.UseAccelerate,
+ SupportsAccelerate: true,
+ EndpointResolver: options.EndpointResolver,
+ EndpointResolverOptions: options.EndpointOptions,
+ UseDualstack: options.UseDualstack,
+ UseARNRegion: options.UseARNRegion,
+ })
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectLegalHold.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectLegalHold.go
new file mode 100644
index 000000000..3a47adaba
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectLegalHold.go
@@ -0,0 +1,196 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package s3
+
+import (
+ "context"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
+ s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations"
+ "github.com/aws/aws-sdk-go-v2/service/s3/types"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// Applies a Legal Hold configuration to the specified object. This action is not
+// supported by Amazon S3 on Outposts. Related Resources
+//
+// * Locking Objects
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html)
+func (c *Client) PutObjectLegalHold(ctx context.Context, params *PutObjectLegalHoldInput, optFns ...func(*Options)) (*PutObjectLegalHoldOutput, error) {
+ if params == nil {
+ params = &PutObjectLegalHoldInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "PutObjectLegalHold", params, optFns, addOperationPutObjectLegalHoldMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*PutObjectLegalHoldOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type PutObjectLegalHoldInput struct {
+
+ // The bucket name containing the object that you want to place a Legal Hold on.
+ // When using this API with an access point, you must direct requests to the access
+ // point hostname. The access point hostname takes the form
+ // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this
+ // operation with an access point through the AWS SDKs, you provide the access
+ // point ARN in place of the bucket name. For more information about access point
+ // ARNs, see Using Access Points
+ // (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) in
+ // the Amazon Simple Storage Service Developer Guide.
+ //
+ // This member is required.
+ Bucket *string
+
+ // The key name for the object that you want to place a Legal Hold on.
+ //
+ // This member is required.
+ Key *string
+
+ // The MD5 hash for the request body. For requests made using the AWS Command Line
+ // Interface (CLI) or AWS SDKs, this field is calculated automatically.
+ ContentMD5 *string
+
+ // The account id of the expected bucket owner. If the bucket is owned by a
+ // different account, the request will fail with an HTTP 403 (Access Denied) error.
+ ExpectedBucketOwner *string
+
+ // Container element for the Legal Hold configuration you want to apply to the
+ // specified object.
+ LegalHold *types.ObjectLockLegalHold
+
+ // Confirms that the requester knows that they will be charged for the request.
+ // Bucket owners need not specify this parameter in their requests. For information
+ // about downloading objects from requester pays buckets, see Downloading Objects
+ // in Requestor Pays Buckets
+ // (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html)
+ // in the Amazon S3 Developer Guide.
+ RequestPayer types.RequestPayer
+
+ // The version ID of the object that you want to place a Legal Hold on.
+ VersionId *string
+}
+
+type PutObjectLegalHoldOutput struct {
+
+ // If present, indicates that the requester was successfully charged for the
+ // request.
+ RequestCharged types.RequestCharged
+
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+}
+
+func addOperationPutObjectLegalHoldMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ err = stack.Serialize.Add(&awsRestxml_serializeOpPutObjectLegalHold{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsRestxml_deserializeOpPutObjectLegalHold{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
+ return err
+ }
+ if err = addRetryMiddlewares(stack, options); err != nil {
+ return err
+ }
+ if err = addHTTPSignerV4Middleware(stack, options); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addOpPutObjectLegalHoldValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opPutObjectLegalHold(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addMetadataRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addPutObjectLegalHoldUpdateEndpoint(stack, options); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil {
+ return err
+ }
+ if err = disableAcceptEncodingGzip(stack); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddContentChecksumMiddleware(stack); err != nil {
+ return err
+ }
+ return nil
+}
+
+func newServiceMetadataMiddleware_opPutObjectLegalHold(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ SigningName: "s3",
+ OperationName: "PutObjectLegalHold",
+ }
+}
+
+// getPutObjectLegalHoldBucketMember returns a pointer to string denoting a
+// provided bucket member valueand a boolean indicating if the input has a modeled
+// bucket name,
+func getPutObjectLegalHoldBucketMember(input interface{}) (*string, bool) {
+ in := input.(*PutObjectLegalHoldInput)
+ if in.Bucket == nil {
+ return nil, false
+ }
+ return in.Bucket, true
+}
+func addPutObjectLegalHoldUpdateEndpoint(stack *middleware.Stack, options Options) error {
+ return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{
+ Accessor: s3cust.UpdateEndpointParameterAccessor{
+ GetBucketFromInput: getPutObjectLegalHoldBucketMember,
+ },
+ UsePathStyle: options.UsePathStyle,
+ UseAccelerate: options.UseAccelerate,
+ SupportsAccelerate: true,
+ EndpointResolver: options.EndpointResolver,
+ EndpointResolverOptions: options.EndpointOptions,
+ UseDualstack: options.UseDualstack,
+ UseARNRegion: options.UseARNRegion,
+ })
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectLockConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectLockConfiguration.go
new file mode 100644
index 000000000..44148171a
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectLockConfiguration.go
@@ -0,0 +1,184 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package s3
+
+import (
+ "context"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
+ s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations"
+ "github.com/aws/aws-sdk-go-v2/service/s3/types"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// Places an Object Lock configuration on the specified bucket. The rule specified
+// in the Object Lock configuration will be applied by default to every new object
+// placed in the specified bucket. DefaultRetention requires either Days or Years.
+// You can't specify both at the same time. Related Resources
+//
+// * Locking Objects
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html)
+func (c *Client) PutObjectLockConfiguration(ctx context.Context, params *PutObjectLockConfigurationInput, optFns ...func(*Options)) (*PutObjectLockConfigurationOutput, error) {
+ if params == nil {
+ params = &PutObjectLockConfigurationInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "PutObjectLockConfiguration", params, optFns, addOperationPutObjectLockConfigurationMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*PutObjectLockConfigurationOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type PutObjectLockConfigurationInput struct {
+
+ // The bucket whose Object Lock configuration you want to create or replace.
+ //
+ // This member is required.
+ Bucket *string
+
+ // The MD5 hash for the request body. For requests made using the AWS Command Line
+ // Interface (CLI) or AWS SDKs, this field is calculated automatically.
+ ContentMD5 *string
+
+ // The account id of the expected bucket owner. If the bucket is owned by a
+ // different account, the request will fail with an HTTP 403 (Access Denied) error.
+ ExpectedBucketOwner *string
+
+ // The Object Lock configuration that you want to apply to the specified bucket.
+ ObjectLockConfiguration *types.ObjectLockConfiguration
+
+ // Confirms that the requester knows that they will be charged for the request.
+ // Bucket owners need not specify this parameter in their requests. For information
+ // about downloading objects from requester pays buckets, see Downloading Objects
+ // in Requestor Pays Buckets
+ // (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html)
+ // in the Amazon S3 Developer Guide.
+ RequestPayer types.RequestPayer
+
+ // A token to allow Object Lock to be enabled for an existing bucket.
+ Token *string
+}
+
+type PutObjectLockConfigurationOutput struct {
+
+ // If present, indicates that the requester was successfully charged for the
+ // request.
+ RequestCharged types.RequestCharged
+
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+}
+
+func addOperationPutObjectLockConfigurationMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ err = stack.Serialize.Add(&awsRestxml_serializeOpPutObjectLockConfiguration{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsRestxml_deserializeOpPutObjectLockConfiguration{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
+ return err
+ }
+ if err = addRetryMiddlewares(stack, options); err != nil {
+ return err
+ }
+ if err = addHTTPSignerV4Middleware(stack, options); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addOpPutObjectLockConfigurationValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opPutObjectLockConfiguration(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addMetadataRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addPutObjectLockConfigurationUpdateEndpoint(stack, options); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil {
+ return err
+ }
+ if err = disableAcceptEncodingGzip(stack); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddContentChecksumMiddleware(stack); err != nil {
+ return err
+ }
+ return nil
+}
+
+func newServiceMetadataMiddleware_opPutObjectLockConfiguration(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ SigningName: "s3",
+ OperationName: "PutObjectLockConfiguration",
+ }
+}
+
+// getPutObjectLockConfigurationBucketMember returns a pointer to string denoting a
+// provided bucket member valueand a boolean indicating if the input has a modeled
+// bucket name,
+func getPutObjectLockConfigurationBucketMember(input interface{}) (*string, bool) {
+ in := input.(*PutObjectLockConfigurationInput)
+ if in.Bucket == nil {
+ return nil, false
+ }
+ return in.Bucket, true
+}
+func addPutObjectLockConfigurationUpdateEndpoint(stack *middleware.Stack, options Options) error {
+ return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{
+ Accessor: s3cust.UpdateEndpointParameterAccessor{
+ GetBucketFromInput: getPutObjectLockConfigurationBucketMember,
+ },
+ UsePathStyle: options.UsePathStyle,
+ UseAccelerate: options.UseAccelerate,
+ SupportsAccelerate: true,
+ EndpointResolver: options.EndpointResolver,
+ EndpointResolverOptions: options.EndpointOptions,
+ UseDualstack: options.UseDualstack,
+ UseARNRegion: options.UseARNRegion,
+ })
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectRetention.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectRetention.go
new file mode 100644
index 000000000..c4f93ae81
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectRetention.go
@@ -0,0 +1,200 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package s3
+
+import (
+ "context"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
+ s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations"
+ "github.com/aws/aws-sdk-go-v2/service/s3/types"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// Places an Object Retention configuration on an object. This action is not
+// supported by Amazon S3 on Outposts. Related Resources
+//
+// * Locking Objects
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html)
+func (c *Client) PutObjectRetention(ctx context.Context, params *PutObjectRetentionInput, optFns ...func(*Options)) (*PutObjectRetentionOutput, error) {
+ if params == nil {
+ params = &PutObjectRetentionInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "PutObjectRetention", params, optFns, addOperationPutObjectRetentionMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*PutObjectRetentionOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type PutObjectRetentionInput struct {
+
+ // The bucket name that contains the object you want to apply this Object Retention
+ // configuration to. When using this API with an access point, you must direct
+ // requests to the access point hostname. The access point hostname takes the form
+ // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this
+ // operation with an access point through the AWS SDKs, you provide the access
+ // point ARN in place of the bucket name. For more information about access point
+ // ARNs, see Using Access Points
+ // (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) in
+ // the Amazon Simple Storage Service Developer Guide.
+ //
+ // This member is required.
+ Bucket *string
+
+ // The key name for the object that you want to apply this Object Retention
+ // configuration to.
+ //
+ // This member is required.
+ Key *string
+
+ // Indicates whether this operation should bypass Governance-mode restrictions.
+ BypassGovernanceRetention bool
+
+ // The MD5 hash for the request body. For requests made using the AWS Command Line
+ // Interface (CLI) or AWS SDKs, this field is calculated automatically.
+ ContentMD5 *string
+
+ // The account id of the expected bucket owner. If the bucket is owned by a
+ // different account, the request will fail with an HTTP 403 (Access Denied) error.
+ ExpectedBucketOwner *string
+
+ // Confirms that the requester knows that they will be charged for the request.
+ // Bucket owners need not specify this parameter in their requests. For information
+ // about downloading objects from requester pays buckets, see Downloading Objects
+ // in Requestor Pays Buckets
+ // (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html)
+ // in the Amazon S3 Developer Guide.
+ RequestPayer types.RequestPayer
+
+ // The container element for the Object Retention configuration.
+ Retention *types.ObjectLockRetention
+
+ // The version ID for the object that you want to apply this Object Retention
+ // configuration to.
+ VersionId *string
+}
+
+type PutObjectRetentionOutput struct {
+
+ // If present, indicates that the requester was successfully charged for the
+ // request.
+ RequestCharged types.RequestCharged
+
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+}
+
+func addOperationPutObjectRetentionMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ err = stack.Serialize.Add(&awsRestxml_serializeOpPutObjectRetention{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsRestxml_deserializeOpPutObjectRetention{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
+ return err
+ }
+ if err = addRetryMiddlewares(stack, options); err != nil {
+ return err
+ }
+ if err = addHTTPSignerV4Middleware(stack, options); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addOpPutObjectRetentionValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opPutObjectRetention(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addMetadataRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addPutObjectRetentionUpdateEndpoint(stack, options); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil {
+ return err
+ }
+ if err = disableAcceptEncodingGzip(stack); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddContentChecksumMiddleware(stack); err != nil {
+ return err
+ }
+ return nil
+}
+
+func newServiceMetadataMiddleware_opPutObjectRetention(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ SigningName: "s3",
+ OperationName: "PutObjectRetention",
+ }
+}
+
+// getPutObjectRetentionBucketMember returns a pointer to string denoting a
+// provided bucket member valueand a boolean indicating if the input has a modeled
+// bucket name,
+func getPutObjectRetentionBucketMember(input interface{}) (*string, bool) {
+ in := input.(*PutObjectRetentionInput)
+ if in.Bucket == nil {
+ return nil, false
+ }
+ return in.Bucket, true
+}
+func addPutObjectRetentionUpdateEndpoint(stack *middleware.Stack, options Options) error {
+ return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{
+ Accessor: s3cust.UpdateEndpointParameterAccessor{
+ GetBucketFromInput: getPutObjectRetentionBucketMember,
+ },
+ UsePathStyle: options.UsePathStyle,
+ UseAccelerate: options.UseAccelerate,
+ SupportsAccelerate: true,
+ EndpointResolver: options.EndpointResolver,
+ EndpointResolverOptions: options.EndpointOptions,
+ UseDualstack: options.UseDualstack,
+ UseARNRegion: options.UseARNRegion,
+ })
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectTagging.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectTagging.go
new file mode 100644
index 000000000..b8eb54f98
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectTagging.go
@@ -0,0 +1,236 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package s3
+
+import (
+ "context"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
+ s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations"
+ "github.com/aws/aws-sdk-go-v2/service/s3/types"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// Sets the supplied tag-set to an object that already exists in a bucket. A tag is
+// a key-value pair. You can associate tags with an object by sending a PUT request
+// against the tagging subresource that is associated with the object. You can
+// retrieve tags by sending a GET request. For more information, see
+// GetObjectTagging
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectTagging.html). For
+// tagging-related restrictions related to characters and encodings, see Tag
+// Restrictions
+// (https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/allocation-tag-restrictions.html).
+// Note that Amazon S3 limits the maximum number of tags to 10 tags per object. To
+// use this operation, you must have permission to perform the s3:PutObjectTagging
+// action. By default, the bucket owner has this permission and can grant this
+// permission to others. To put tags of any other version, use the versionId query
+// parameter. You also need permission for the s3:PutObjectVersionTagging action.
+// For information about the Amazon S3 object tagging feature, see Object Tagging
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-tagging.html). Special
+// Errors
+//
+// * Code: InvalidTagError
+//
+// * Cause: The tag provided was not a valid tag.
+// This error can occur if the tag did not pass input validation. For more
+// information, see Object Tagging
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-tagging.html).
+//
+// * Code:
+// MalformedXMLError
+//
+// * Cause: The XML provided does not match the schema.
+//
+// * Code:
+// OperationAbortedError
+//
+// * Cause: A conflicting conditional operation is currently
+// in progress against this resource. Please try again.
+//
+// * Code: InternalError
+//
+// *
+// Cause: The service was unable to apply the provided tag to the object.
+//
+// Related
+// Resources
+//
+// * GetObjectTagging
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectTagging.html)
+func (c *Client) PutObjectTagging(ctx context.Context, params *PutObjectTaggingInput, optFns ...func(*Options)) (*PutObjectTaggingOutput, error) {
+ if params == nil {
+ params = &PutObjectTaggingInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "PutObjectTagging", params, optFns, addOperationPutObjectTaggingMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*PutObjectTaggingOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type PutObjectTaggingInput struct {
+
+ // The bucket name containing the object. When using this API with an access point,
+ // you must direct requests to the access point hostname. The access point hostname
+ // takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com.
+ // When using this operation with an access point through the AWS SDKs, you provide
+ // the access point ARN in place of the bucket name. For more information about
+ // access point ARNs, see Using Access Points
+ // (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) in
+ // the Amazon Simple Storage Service Developer Guide. When using this API with
+ // Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname.
+ // The S3 on Outposts hostname takes the form
+ // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using
+ // this operation using S3 on Outposts through the AWS SDKs, you provide the
+ // Outposts bucket ARN in place of the bucket name. For more information about S3
+ // on Outposts ARNs, see Using S3 on Outposts
+ // (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html) in the
+ // Amazon Simple Storage Service Developer Guide.
+ //
+ // This member is required.
+ Bucket *string
+
+ // Name of the object key.
+ //
+ // This member is required.
+ Key *string
+
+ // Container for the TagSet and Tag elements
+ //
+ // This member is required.
+ Tagging *types.Tagging
+
+ // The MD5 hash for the request body. For requests made using the AWS Command Line
+ // Interface (CLI) or AWS SDKs, this field is calculated automatically.
+ ContentMD5 *string
+
+ // The account id of the expected bucket owner. If the bucket is owned by a
+ // different account, the request will fail with an HTTP 403 (Access Denied) error.
+ ExpectedBucketOwner *string
+
+ // The versionId of the object that the tag-set will be added to.
+ VersionId *string
+}
+
+type PutObjectTaggingOutput struct {
+
+ // The versionId of the object the tag-set was added to.
+ VersionId *string
+
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+}
+
+func addOperationPutObjectTaggingMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ err = stack.Serialize.Add(&awsRestxml_serializeOpPutObjectTagging{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsRestxml_deserializeOpPutObjectTagging{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
+ return err
+ }
+ if err = addRetryMiddlewares(stack, options); err != nil {
+ return err
+ }
+ if err = addHTTPSignerV4Middleware(stack, options); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addOpPutObjectTaggingValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opPutObjectTagging(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addMetadataRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addPutObjectTaggingUpdateEndpoint(stack, options); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil {
+ return err
+ }
+ if err = disableAcceptEncodingGzip(stack); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddContentChecksumMiddleware(stack); err != nil {
+ return err
+ }
+ return nil
+}
+
+func newServiceMetadataMiddleware_opPutObjectTagging(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ SigningName: "s3",
+ OperationName: "PutObjectTagging",
+ }
+}
+
+// getPutObjectTaggingBucketMember returns a pointer to string denoting a provided
+// bucket member valueand a boolean indicating if the input has a modeled bucket
+// name,
+func getPutObjectTaggingBucketMember(input interface{}) (*string, bool) {
+ in := input.(*PutObjectTaggingInput)
+ if in.Bucket == nil {
+ return nil, false
+ }
+ return in.Bucket, true
+}
+func addPutObjectTaggingUpdateEndpoint(stack *middleware.Stack, options Options) error {
+ return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{
+ Accessor: s3cust.UpdateEndpointParameterAccessor{
+ GetBucketFromInput: getPutObjectTaggingBucketMember,
+ },
+ UsePathStyle: options.UsePathStyle,
+ UseAccelerate: options.UseAccelerate,
+ SupportsAccelerate: true,
+ EndpointResolver: options.EndpointResolver,
+ EndpointResolverOptions: options.EndpointOptions,
+ UseDualstack: options.UseDualstack,
+ UseARNRegion: options.UseARNRegion,
+ })
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutPublicAccessBlock.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutPublicAccessBlock.go
new file mode 100644
index 000000000..b440b9f8f
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutPublicAccessBlock.go
@@ -0,0 +1,199 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package s3
+
+import (
+ "context"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
+ s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations"
+ "github.com/aws/aws-sdk-go-v2/service/s3/types"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// Creates or modifies the PublicAccessBlock configuration for an Amazon S3 bucket.
+// To use this operation, you must have the s3:PutBucketPublicAccessBlock
+// permission. For more information about Amazon S3 permissions, see Specifying
+// Permissions in a Policy
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html).
+// When Amazon S3 evaluates the PublicAccessBlock configuration for a bucket or an
+// object, it checks the PublicAccessBlock configuration for both the bucket (or
+// the bucket that contains the object) and the bucket owner's account. If the
+// PublicAccessBlock configurations are different between the bucket and the
+// account, Amazon S3 uses the most restrictive combination of the bucket-level and
+// account-level settings. For more information about when Amazon S3 considers a
+// bucket or an object public, see The Meaning of "Public"
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html#access-control-block-public-access-policy-status).
+// Related Resources
+//
+// * GetPublicAccessBlock
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetPublicAccessBlock.html)
+//
+// *
+// DeletePublicAccessBlock
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeletePublicAccessBlock.html)
+//
+// *
+// GetBucketPolicyStatus
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketPolicyStatus.html)
+//
+// *
+// Using Amazon S3 Block Public Access
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html)
+func (c *Client) PutPublicAccessBlock(ctx context.Context, params *PutPublicAccessBlockInput, optFns ...func(*Options)) (*PutPublicAccessBlockOutput, error) {
+ if params == nil {
+ params = &PutPublicAccessBlockInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "PutPublicAccessBlock", params, optFns, addOperationPutPublicAccessBlockMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*PutPublicAccessBlockOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type PutPublicAccessBlockInput struct {
+
+ // The name of the Amazon S3 bucket whose PublicAccessBlock configuration you want
+ // to set.
+ //
+ // This member is required.
+ Bucket *string
+
+ // The PublicAccessBlock configuration that you want to apply to this Amazon S3
+ // bucket. You can enable the configuration options in any combination. For more
+ // information about when Amazon S3 considers a bucket or object public, see The
+ // Meaning of "Public"
+ // (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html#access-control-block-public-access-policy-status)
+ // in the Amazon Simple Storage Service Developer Guide.
+ //
+ // This member is required.
+ PublicAccessBlockConfiguration *types.PublicAccessBlockConfiguration
+
+ // The MD5 hash of the PutPublicAccessBlock request body. For requests made using
+ // the AWS Command Line Interface (CLI) or AWS SDKs, this field is calculated
+ // automatically.
+ ContentMD5 *string
+
+ // The account id of the expected bucket owner. If the bucket is owned by a
+ // different account, the request will fail with an HTTP 403 (Access Denied) error.
+ ExpectedBucketOwner *string
+}
+
+type PutPublicAccessBlockOutput struct {
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+}
+
+func addOperationPutPublicAccessBlockMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ err = stack.Serialize.Add(&awsRestxml_serializeOpPutPublicAccessBlock{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsRestxml_deserializeOpPutPublicAccessBlock{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
+ return err
+ }
+ if err = addRetryMiddlewares(stack, options); err != nil {
+ return err
+ }
+ if err = addHTTPSignerV4Middleware(stack, options); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addOpPutPublicAccessBlockValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opPutPublicAccessBlock(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addMetadataRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addPutPublicAccessBlockUpdateEndpoint(stack, options); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil {
+ return err
+ }
+ if err = disableAcceptEncodingGzip(stack); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddContentChecksumMiddleware(stack); err != nil {
+ return err
+ }
+ return nil
+}
+
+func newServiceMetadataMiddleware_opPutPublicAccessBlock(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ SigningName: "s3",
+ OperationName: "PutPublicAccessBlock",
+ }
+}
+
+// getPutPublicAccessBlockBucketMember returns a pointer to string denoting a
+// provided bucket member valueand a boolean indicating if the input has a modeled
+// bucket name,
+func getPutPublicAccessBlockBucketMember(input interface{}) (*string, bool) {
+ in := input.(*PutPublicAccessBlockInput)
+ if in.Bucket == nil {
+ return nil, false
+ }
+ return in.Bucket, true
+}
+func addPutPublicAccessBlockUpdateEndpoint(stack *middleware.Stack, options Options) error {
+ return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{
+ Accessor: s3cust.UpdateEndpointParameterAccessor{
+ GetBucketFromInput: getPutPublicAccessBlockBucketMember,
+ },
+ UsePathStyle: options.UsePathStyle,
+ UseAccelerate: options.UseAccelerate,
+ SupportsAccelerate: true,
+ EndpointResolver: options.EndpointResolver,
+ EndpointResolverOptions: options.EndpointOptions,
+ UseDualstack: options.UseDualstack,
+ UseARNRegion: options.UseARNRegion,
+ })
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_RestoreObject.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_RestoreObject.go
new file mode 100644
index 000000000..063b29964
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_RestoreObject.go
@@ -0,0 +1,416 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package s3
+
+import (
+ "context"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
+ s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations"
+ "github.com/aws/aws-sdk-go-v2/service/s3/types"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// Restores an archived copy of an object back into Amazon S3 This action is not
+// supported by Amazon S3 on Outposts. This action performs the following types of
+// requests:
+//
+// * select - Perform a select query on an archived object
+//
+// * restore an
+// archive - Restore an archived object
+//
+// To use this operation, you must have
+// permissions to perform the s3:RestoreObject action. The bucket owner has this
+// permission by default and can grant this permission to others. For more
+// information about permissions, see Permissions Related to Bucket Subresource
+// Operations
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources)
+// and Managing Access Permissions to Your Amazon S3 Resources
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html) in the
+// Amazon Simple Storage Service Developer Guide. Querying Archives with Select
+// Requests You use a select type of request to perform SQL queries on archived
+// objects. The archived objects that are being queried by the select request must
+// be formatted as uncompressed comma-separated values (CSV) files. You can run
+// queries and custom analytics on your archived data without having to restore
+// your data to a hotter Amazon S3 tier. For an overview about select requests, see
+// Querying Archived Objects
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/querying-glacier-archives.html)
+// in the Amazon Simple Storage Service Developer Guide. When making a select
+// request, do the following:
+//
+// * Define an output location for the select query's
+// output. This must be an Amazon S3 bucket in the same AWS Region as the bucket
+// that contains the archive object that is being queried. The AWS account that
+// initiates the job must have permissions to write to the S3 bucket. You can
+// specify the storage class and encryption for the output objects stored in the
+// bucket. For more information about output, see Querying Archived Objects
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/querying-glacier-archives.html)
+// in the Amazon Simple Storage Service Developer Guide. For more information about
+// the S3 structure in the request body, see the following:
+//
+// * PutObject
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html)
+//
+// * Managing
+// Access with ACLs
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3_ACLs_UsingACLs.html) in the
+// Amazon Simple Storage Service Developer Guide
+//
+// * Protecting Data Using
+// Server-Side Encryption
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html) in
+// the Amazon Simple Storage Service Developer Guide
+//
+// * Define the SQL expression
+// for the SELECT type of restoration for your query in the request body's
+// SelectParameters structure. You can use expressions like the following
+// examples.
+//
+// * The following expression returns all records from the specified
+// object. SELECT * FROM Object
+//
+// * Assuming that you are not using any headers for
+// data stored in the object, you can specify columns with positional headers.
+// SELECT s._1, s._2 FROM Object s WHERE s._3 > 100
+//
+// * If you have headers and you
+// set the fileHeaderInfo in the CSV structure in the request body to USE, you can
+// specify headers in the query. (If you set the fileHeaderInfo field to IGNORE,
+// the first row is skipped for the query.) You cannot mix ordinal positions with
+// header column names. SELECT s.Id, s.FirstName, s.SSN FROM S3Object s
+//
+// For more
+// information about using SQL with S3 Glacier Select restore, see SQL Reference
+// for Amazon S3 Select and S3 Glacier Select
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-glacier-select-sql-reference.html)
+// in the Amazon Simple Storage Service Developer Guide. When making a select
+// request, you can also do the following:
+//
+// * To expedite your queries, specify the
+// Expedited tier. For more information about tiers, see "Restoring Archives,"
+// later in this topic.
+//
+// * Specify details about the data serialization format of
+// both the input object that is being queried and the serialization of the
+// CSV-encoded query results.
+//
+// The following are additional important facts about
+// the select feature:
+//
+// * The output results are new Amazon S3 objects. Unlike
+// archive retrievals, they are stored until explicitly deleted-manually or through
+// a lifecycle policy.
+//
+// * You can issue more than one select request on the same
+// Amazon S3 object. Amazon S3 doesn't deduplicate requests, so avoid issuing
+// duplicate requests.
+//
+// * Amazon S3 accepts a select request even if the object has
+// already been restored. A select request doesn’t return error response
+// 409.
+//
+// Restoring objects Objects that you archive to the S3 Glacier or S3 Glacier
+// Deep Archive storage class, and S3 Intelligent-Tiering Archive or S3
+// Intelligent-Tiering Deep Archive tiers are not accessible in real time. For
+// objects in Archive Access or Deep Archive Access tiers you must first initiate a
+// restore request, and then wait until the object is moved into the Frequent
+// Access tier. For objects in S3 Glacier or S3 Glacier Deep Archive storage
+// classes you must first initiate a restore request, and then wait until a
+// temporary copy of the object is available. To access an archived object, you
+// must restore the object for the duration (number of days) that you specify. To
+// restore a specific object version, you can provide a version ID. If you don't
+// provide a version ID, Amazon S3 restores the current version. When restoring an
+// archived object (or using a select request), you can specify one of the
+// following data access tier options in the Tier element of the request body:
+//
+// *
+// Expedited - Expedited retrievals allow you to quickly access your data stored in
+// the S3 Glacier storage class or S3 Intelligent-Tiering Archive tier when
+// occasional urgent requests for a subset of archives are required. For all but
+// the largest archived objects (250 MB+), data accessed using Expedited retrievals
+// is typically made available within 1–5 minutes. Provisioned capacity ensures
+// that retrieval capacity for Expedited retrievals is available when you need it.
+// Expedited retrievals and provisioned capacity are not available for objects
+// stored in the S3 Glacier Deep Archive storage class or S3 Intelligent-Tiering
+// Deep Archive tier.
+//
+// * Standard - Standard retrievals allow you to access any of
+// your archived objects within several hours. This is the default option for
+// retrieval requests that do not specify the retrieval option. Standard retrievals
+// typically finish within 3–5 hours for objects stored in the S3 Glacier storage
+// class or S3 Intelligent-Tiering Archive tier. They typically finish within 12
+// hours for objects stored in the S3 Glacier Deep Archive storage class or S3
+// Intelligent-Tiering Deep Archive tier. Standard retrievals are free for objects
+// stored in S3 Intelligent-Tiering.
+//
+// * Bulk - Bulk retrievals are the lowest-cost
+// retrieval option in S3 Glacier, enabling you to retrieve large amounts, even
+// petabytes, of data inexpensively. Bulk retrievals typically finish within 5–12
+// hours for objects stored in the S3 Glacier storage class or S3
+// Intelligent-Tiering Archive tier. They typically finish within 48 hours for
+// objects stored in the S3 Glacier Deep Archive storage class or S3
+// Intelligent-Tiering Deep Archive tier. Bulk retrievals are free for objects
+// stored in S3 Intelligent-Tiering.
+//
+// For more information about archive retrieval
+// options and provisioned capacity for Expedited data access, see Restoring
+// Archived Objects
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/restoring-objects.html) in the
+// Amazon Simple Storage Service Developer Guide. You can use Amazon S3 restore
+// speed upgrade to change the restore speed to a faster speed while it is in
+// progress. For more information, see Upgrading the speed of an in-progress
+// restore
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/restoring-objects.html#restoring-objects-upgrade-tier.title.html)
+// in the Amazon Simple Storage Service Developer Guide. To get the status of
+// object restoration, you can send a HEAD request. Operations return the
+// x-amz-restore header, which provides information about the restoration status,
+// in the response. You can use Amazon S3 event notifications to notify you when a
+// restore is initiated or completed. For more information, see Configuring Amazon
+// S3 Event Notifications
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) in the
+// Amazon Simple Storage Service Developer Guide. After restoring an archived
+// object, you can update the restoration period by reissuing the request with a
+// new period. Amazon S3 updates the restoration period relative to the current
+// time and charges only for the request-there are no data transfer charges. You
+// cannot update the restoration period when Amazon S3 is actively processing your
+// current restore request for the object. If your bucket has a lifecycle
+// configuration with a rule that includes an expiration action, the object
+// expiration overrides the life span that you specify in a restore request. For
+// example, if you restore an object copy for 10 days, but the object is scheduled
+// to expire in 3 days, Amazon S3 deletes the object in 3 days. For more
+// information about lifecycle configuration, see PutBucketLifecycleConfiguration
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html)
+// and Object Lifecycle Management
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html) in
+// Amazon Simple Storage Service Developer Guide. Responses A successful operation
+// returns either the 200 OK or 202 Accepted status code.
+//
+// * If the object is not
+// previously restored, then Amazon S3 returns 202 Accepted in the response.
+//
+// * If
+// the object is previously restored, Amazon S3 returns 200 OK in the
+// response.
+//
+// Special Errors
+//
+// * Code: RestoreAlreadyInProgress
+//
+// * Cause: Object
+// restore is already in progress. (This error does not apply to SELECT type
+// requests.)
+//
+// * HTTP Status Code: 409 Conflict
+//
+// * SOAP Fault Code Prefix:
+// Client
+//
+// * Code: GlacierExpeditedRetrievalNotAvailable
+//
+// * Cause: expedited
+// retrievals are currently not available. Try again later. (Returned if there is
+// insufficient capacity to process the Expedited request. This error applies only
+// to Expedited retrievals and not to S3 Standard or Bulk retrievals.)
+//
+// * HTTP
+// Status Code: 503
+//
+// * SOAP Fault Code Prefix: N/A
+//
+// Related Resources
+//
+// *
+// PutBucketLifecycleConfiguration
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html)
+//
+// *
+// GetBucketNotificationConfiguration
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketNotificationConfiguration.html)
+//
+// *
+// SQL Reference for Amazon S3 Select and S3 Glacier Select
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-glacier-select-sql-reference.html)
+// in the Amazon Simple Storage Service Developer Guide
+func (c *Client) RestoreObject(ctx context.Context, params *RestoreObjectInput, optFns ...func(*Options)) (*RestoreObjectOutput, error) {
+ if params == nil {
+ params = &RestoreObjectInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "RestoreObject", params, optFns, addOperationRestoreObjectMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*RestoreObjectOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type RestoreObjectInput struct {
+
+ // The bucket name containing the object to restore. When using this API with an
+ // access point, you must direct requests to the access point hostname. The access
+ // point hostname takes the form
+ // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this
+ // operation with an access point through the AWS SDKs, you provide the access
+ // point ARN in place of the bucket name. For more information about access point
+ // ARNs, see Using Access Points
+ // (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) in
+ // the Amazon Simple Storage Service Developer Guide. When using this API with
+ // Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname.
+ // The S3 on Outposts hostname takes the form
+ // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using
+ // this operation using S3 on Outposts through the AWS SDKs, you provide the
+ // Outposts bucket ARN in place of the bucket name. For more information about S3
+ // on Outposts ARNs, see Using S3 on Outposts
+ // (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html) in the
+ // Amazon Simple Storage Service Developer Guide.
+ //
+ // This member is required.
+ Bucket *string
+
+ // Object key for which the operation was initiated.
+ //
+ // This member is required.
+ Key *string
+
+ // The account id of the expected bucket owner. If the bucket is owned by a
+ // different account, the request will fail with an HTTP 403 (Access Denied) error.
+ ExpectedBucketOwner *string
+
+ // Confirms that the requester knows that they will be charged for the request.
+ // Bucket owners need not specify this parameter in their requests. For information
+ // about downloading objects from requester pays buckets, see Downloading Objects
+ // in Requestor Pays Buckets
+ // (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html)
+ // in the Amazon S3 Developer Guide.
+ RequestPayer types.RequestPayer
+
+ // Container for restore job parameters.
+ RestoreRequest *types.RestoreRequest
+
+ // VersionId used to reference a specific version of the object.
+ VersionId *string
+}
+
+type RestoreObjectOutput struct {
+
+ // If present, indicates that the requester was successfully charged for the
+ // request.
+ RequestCharged types.RequestCharged
+
+ // Indicates the path in the provided S3 output location where Select results will
+ // be restored to.
+ RestoreOutputPath *string
+
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+}
+
+func addOperationRestoreObjectMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ err = stack.Serialize.Add(&awsRestxml_serializeOpRestoreObject{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsRestxml_deserializeOpRestoreObject{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
+ return err
+ }
+ if err = addRetryMiddlewares(stack, options); err != nil {
+ return err
+ }
+ if err = addHTTPSignerV4Middleware(stack, options); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addOpRestoreObjectValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opRestoreObject(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addMetadataRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addRestoreObjectUpdateEndpoint(stack, options); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil {
+ return err
+ }
+ if err = disableAcceptEncodingGzip(stack); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ return nil
+}
+
+func newServiceMetadataMiddleware_opRestoreObject(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ SigningName: "s3",
+ OperationName: "RestoreObject",
+ }
+}
+
+// getRestoreObjectBucketMember returns a pointer to string denoting a provided
+// bucket member valueand a boolean indicating if the input has a modeled bucket
+// name,
+func getRestoreObjectBucketMember(input interface{}) (*string, bool) {
+ in := input.(*RestoreObjectInput)
+ if in.Bucket == nil {
+ return nil, false
+ }
+ return in.Bucket, true
+}
+func addRestoreObjectUpdateEndpoint(stack *middleware.Stack, options Options) error {
+ return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{
+ Accessor: s3cust.UpdateEndpointParameterAccessor{
+ GetBucketFromInput: getRestoreObjectBucketMember,
+ },
+ UsePathStyle: options.UsePathStyle,
+ UseAccelerate: options.UseAccelerate,
+ SupportsAccelerate: true,
+ EndpointResolver: options.EndpointResolver,
+ EndpointResolverOptions: options.EndpointOptions,
+ UseDualstack: options.UseDualstack,
+ UseARNRegion: options.UseARNRegion,
+ })
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_UploadPart.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_UploadPart.go
new file mode 100644
index 000000000..bcff02c89
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_UploadPart.go
@@ -0,0 +1,345 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package s3
+
+import (
+ "context"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
+ s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations"
+ "github.com/aws/aws-sdk-go-v2/service/s3/types"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+ "io"
+)
+
+// Uploads a part in a multipart upload. In this operation, you provide part data
+// in your request. However, you have an option to specify your existing Amazon S3
+// object as a data source for the part you are uploading. To upload a part from an
+// existing object, you use the UploadPartCopy
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html)
+// operation. You must initiate a multipart upload (see CreateMultipartUpload
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html))
+// before you can upload any part. In response to your initiate request, Amazon S3
+// returns an upload ID, a unique identifier, that you must include in your upload
+// part request. Part numbers can be any number from 1 to 10,000, inclusive. A part
+// number uniquely identifies a part and also defines its position within the
+// object being created. If you upload a new part using the same part number that
+// was used with a previous part, the previously uploaded part is overwritten. Each
+// part must be at least 5 MB in size, except the last part. There is no size limit
+// on the last part of your multipart upload. To ensure that data is not corrupted
+// when traversing the network, specify the Content-MD5 header in the upload part
+// request. Amazon S3 checks the part data against the provided MD5 value. If they
+// do not match, Amazon S3 returns an error. If the upload request is signed with
+// Signature Version 4, then AWS S3 uses the x-amz-content-sha256 header as a
+// checksum instead of Content-MD5. For more information see Authenticating
+// Requests: Using the Authorization Header (AWS Signature Version 4)
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-auth-using-authorization-header.html).
+// Note: After you initiate multipart upload and upload one or more parts, you must
+// either complete or abort multipart upload in order to stop getting charged for
+// storage of the uploaded parts. Only after you either complete or abort multipart
+// upload, Amazon S3 frees up the parts storage and stops charging you for the
+// parts storage. For more information on multipart uploads, go to Multipart Upload
+// Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html) in
+// the Amazon Simple Storage Service Developer Guide . For information on the
+// permissions required to use the multipart upload API, go to Multipart Upload API
+// and Permissions
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html) in the
+// Amazon Simple Storage Service Developer Guide. You can optionally request
+// server-side encryption where Amazon S3 encrypts your data as it writes it to
+// disks in its data centers and decrypts it for you when you access it. You have
+// the option of providing your own encryption key, or you can use the AWS managed
+// encryption keys. If you choose to provide your own encryption key, the request
+// headers you provide in the request must match the headers you used in the
+// request to initiate the upload by using CreateMultipartUpload
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html).
+// For more information, go to Using Server-Side Encryption
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingServerSideEncryption.html)
+// in the Amazon Simple Storage Service Developer Guide. Server-side encryption is
+// supported by the S3 Multipart Upload actions. Unless you are using a
+// customer-provided encryption key, you don't need to specify the encryption
+// parameters in each UploadPart request. Instead, you only need to specify the
+// server-side encryption parameters in the initial Initiate Multipart request. For
+// more information, see CreateMultipartUpload
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html).
+// If you requested server-side encryption using a customer-provided encryption key
+// in your initiate multipart upload request, you must provide identical encryption
+// information in each part upload using the following headers.
+//
+// *
+// x-amz-server-side-encryption-customer-algorithm
+//
+// *
+// x-amz-server-side-encryption-customer-key
+//
+// *
+// x-amz-server-side-encryption-customer-key-MD5
+//
+// Special Errors
+//
+// * Code:
+// NoSuchUpload
+//
+// * Cause: The specified multipart upload does not exist. The upload
+// ID might be invalid, or the multipart upload might have been aborted or
+// completed.
+//
+// * HTTP Status Code: 404 Not Found
+//
+// * SOAP Fault Code Prefix:
+// Client
+//
+// Related Resources
+//
+// * CreateMultipartUpload
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html)
+//
+// *
+// CompleteMultipartUpload
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html)
+//
+// *
+// AbortMultipartUpload
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html)
+//
+// *
+// ListParts
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html)
+//
+// *
+// ListMultipartUploads
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html)
+func (c *Client) UploadPart(ctx context.Context, params *UploadPartInput, optFns ...func(*Options)) (*UploadPartOutput, error) {
+ if params == nil {
+ params = &UploadPartInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "UploadPart", params, optFns, addOperationUploadPartMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*UploadPartOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type UploadPartInput struct {
+
+ // The name of the bucket to which the multipart upload was initiated. When using
+ // this API with an access point, you must direct requests to the access point
+ // hostname. The access point hostname takes the form
+ // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this
+ // operation with an access point through the AWS SDKs, you provide the access
+ // point ARN in place of the bucket name. For more information about access point
+ // ARNs, see Using Access Points
+ // (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) in
+ // the Amazon Simple Storage Service Developer Guide. When using this API with
+ // Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname.
+ // The S3 on Outposts hostname takes the form
+ // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using
+ // this operation using S3 on Outposts through the AWS SDKs, you provide the
+ // Outposts bucket ARN in place of the bucket name. For more information about S3
+ // on Outposts ARNs, see Using S3 on Outposts
+ // (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html) in the
+ // Amazon Simple Storage Service Developer Guide.
+ //
+ // This member is required.
+ Bucket *string
+
+ // Object key for which the multipart upload was initiated.
+ //
+ // This member is required.
+ Key *string
+
+ // Part number of part being uploaded. This is a positive integer between 1 and
+ // 10,000.
+ //
+ // This member is required.
+ PartNumber int32
+
+ // Upload ID identifying the multipart upload whose part is being uploaded.
+ //
+ // This member is required.
+ UploadId *string
+
+ // Object data.
+ Body io.Reader
+
+ // Size of the body in bytes. This parameter is useful when the size of the body
+ // cannot be determined automatically.
+ ContentLength int64
+
+ // The base64-encoded 128-bit MD5 digest of the part data. This parameter is
+ // auto-populated when using the command from the CLI. This parameter is required
+ // if object lock parameters are specified.
+ ContentMD5 *string
+
+ // The account id of the expected bucket owner. If the bucket is owned by a
+ // different account, the request will fail with an HTTP 403 (Access Denied) error.
+ ExpectedBucketOwner *string
+
+ // Confirms that the requester knows that they will be charged for the request.
+ // Bucket owners need not specify this parameter in their requests. For information
+ // about downloading objects from requester pays buckets, see Downloading Objects
+ // in Requestor Pays Buckets
+ // (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html)
+ // in the Amazon S3 Developer Guide.
+ RequestPayer types.RequestPayer
+
+ // Specifies the algorithm to use to when encrypting the object (for example,
+ // AES256).
+ SSECustomerAlgorithm *string
+
+ // Specifies the customer-provided encryption key for Amazon S3 to use in
+ // encrypting data. This value is used to store the object and then it is
+ // discarded; Amazon S3 does not store the encryption key. The key must be
+ // appropriate for use with the algorithm specified in the
+ // x-amz-server-side-encryption-customer-algorithm header. This must be the same
+ // encryption key specified in the initiate multipart upload request.
+ SSECustomerKey *string
+
+ // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321.
+ // Amazon S3 uses this header for a message integrity check to ensure that the
+ // encryption key was transmitted without error.
+ SSECustomerKeyMD5 *string
+}
+
+type UploadPartOutput struct {
+
+ // Indicates whether the multipart upload uses an S3 Bucket Key for server-side
+ // encryption with AWS KMS (SSE-KMS).
+ BucketKeyEnabled bool
+
+ // Entity tag for the uploaded object.
+ ETag *string
+
+ // If present, indicates that the requester was successfully charged for the
+ // request.
+ RequestCharged types.RequestCharged
+
+ // If server-side encryption with a customer-provided encryption key was requested,
+ // the response will include this header confirming the encryption algorithm used.
+ SSECustomerAlgorithm *string
+
+ // If server-side encryption with a customer-provided encryption key was requested,
+ // the response will include this header to provide round-trip message integrity
+ // verification of the customer-provided encryption key.
+ SSECustomerKeyMD5 *string
+
+ // If present, specifies the ID of the AWS Key Management Service (AWS KMS)
+ // symmetric customer managed customer master key (CMK) was used for the object.
+ SSEKMSKeyId *string
+
+ // The server-side encryption algorithm used when storing this object in Amazon S3
+ // (for example, AES256, aws:kms).
+ ServerSideEncryption types.ServerSideEncryption
+
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+}
+
+func addOperationUploadPartMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ err = stack.Serialize.Add(&awsRestxml_serializeOpUploadPart{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsRestxml_deserializeOpUploadPart{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
+ return err
+ }
+ if err = addRetryMiddlewares(stack, options); err != nil {
+ return err
+ }
+ if err = addHTTPSignerV4Middleware(stack, options); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addOpUploadPartValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opUploadPart(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addMetadataRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addUploadPartUpdateEndpoint(stack, options); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil {
+ return err
+ }
+ if err = disableAcceptEncodingGzip(stack); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ return nil
+}
+
+func newServiceMetadataMiddleware_opUploadPart(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ SigningName: "s3",
+ OperationName: "UploadPart",
+ }
+}
+
+// getUploadPartBucketMember returns a pointer to string denoting a provided bucket
+// member valueand a boolean indicating if the input has a modeled bucket name,
+func getUploadPartBucketMember(input interface{}) (*string, bool) {
+ in := input.(*UploadPartInput)
+ if in.Bucket == nil {
+ return nil, false
+ }
+ return in.Bucket, true
+}
+func addUploadPartUpdateEndpoint(stack *middleware.Stack, options Options) error {
+ return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{
+ Accessor: s3cust.UpdateEndpointParameterAccessor{
+ GetBucketFromInput: getUploadPartBucketMember,
+ },
+ UsePathStyle: options.UsePathStyle,
+ UseAccelerate: options.UseAccelerate,
+ SupportsAccelerate: true,
+ EndpointResolver: options.EndpointResolver,
+ EndpointResolverOptions: options.EndpointOptions,
+ UseDualstack: options.UseDualstack,
+ UseARNRegion: options.UseARNRegion,
+ })
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_UploadPartCopy.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_UploadPartCopy.go
new file mode 100644
index 000000000..5cbdb601f
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_UploadPartCopy.go
@@ -0,0 +1,432 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package s3
+
+import (
+ "context"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
+ s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations"
+ "github.com/aws/aws-sdk-go-v2/service/s3/types"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+ "time"
+)
+
+// Uploads a part by copying data from an existing object as data source. You
+// specify the data source by adding the request header x-amz-copy-source in your
+// request and a byte range by adding the request header x-amz-copy-source-range in
+// your request. The minimum allowable part size for a multipart upload is 5 MB.
+// For more information about multipart upload limits, go to Quick Facts
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/qfacts.html) in the Amazon
+// Simple Storage Service Developer Guide. Instead of using an existing object as
+// part data, you might use the UploadPart
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) operation
+// and provide data in your request. You must initiate a multipart upload before
+// you can upload any part. In response to your initiate request. Amazon S3 returns
+// a unique identifier, the upload ID, that you must include in your upload part
+// request. For more information about using the UploadPartCopy operation, see the
+// following:
+//
+// * For conceptual information about multipart uploads, see Uploading
+// Objects Using Multipart Upload
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html) in the
+// Amazon Simple Storage Service Developer Guide.
+//
+// * For information about
+// permissions required to use the multipart upload API, see Multipart Upload API
+// and Permissions
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html) in the
+// Amazon Simple Storage Service Developer Guide.
+//
+// * For information about copying
+// objects using a single atomic operation vs. the multipart upload, see Operations
+// on Objects
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectOperations.html) in the
+// Amazon Simple Storage Service Developer Guide.
+//
+// * For information about using
+// server-side encryption with customer-provided encryption keys with the
+// UploadPartCopy operation, see CopyObject
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html) and
+// UploadPart
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html).
+//
+// Note the
+// following additional considerations about the request headers
+// x-amz-copy-source-if-match, x-amz-copy-source-if-none-match,
+// x-amz-copy-source-if-unmodified-since, and
+// x-amz-copy-source-if-modified-since:
+//
+// * Consideration 1 - If both of the
+// x-amz-copy-source-if-match and x-amz-copy-source-if-unmodified-since headers are
+// present in the request as follows: x-amz-copy-source-if-match condition
+// evaluates to true, and; x-amz-copy-source-if-unmodified-since condition
+// evaluates to false; Amazon S3 returns 200 OK and copies the data.
+//
+// *
+// Consideration 2 - If both of the x-amz-copy-source-if-none-match and
+// x-amz-copy-source-if-modified-since headers are present in the request as
+// follows: x-amz-copy-source-if-none-match condition evaluates to false, and;
+// x-amz-copy-source-if-modified-since condition evaluates to true; Amazon S3
+// returns 412 Precondition Failed response code.
+//
+// Versioning If your bucket has
+// versioning enabled, you could have multiple versions of the same object. By
+// default, x-amz-copy-source identifies the current version of the object to copy.
+// If the current version is a delete marker and you don't specify a versionId in
+// the x-amz-copy-source, Amazon S3 returns a 404 error, because the object does
+// not exist. If you specify versionId in the x-amz-copy-source and the versionId
+// is a delete marker, Amazon S3 returns an HTTP 400 error, because you are not
+// allowed to specify a delete marker as a version for the x-amz-copy-source. You
+// can optionally specify a specific version of the source object to copy by adding
+// the versionId subresource as shown in the following example: x-amz-copy-source:
+// /bucket/object?versionId=version id Special Errors
+//
+// * Code: NoSuchUpload
+//
+// *
+// Cause: The specified multipart upload does not exist. The upload ID might be
+// invalid, or the multipart upload might have been aborted or completed.
+//
+// * HTTP
+// Status Code: 404 Not Found
+//
+// * Code: InvalidRequest
+//
+// * Cause: The specified copy
+// source is not supported as a byte-range copy source.
+//
+// * HTTP Status Code: 400
+// Bad Request
+//
+// Related Resources
+//
+// * CreateMultipartUpload
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html)
+//
+// *
+// UploadPart
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html)
+//
+// *
+// CompleteMultipartUpload
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html)
+//
+// *
+// AbortMultipartUpload
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html)
+//
+// *
+// ListParts
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html)
+//
+// *
+// ListMultipartUploads
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html)
+func (c *Client) UploadPartCopy(ctx context.Context, params *UploadPartCopyInput, optFns ...func(*Options)) (*UploadPartCopyOutput, error) {
+ if params == nil {
+ params = &UploadPartCopyInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "UploadPartCopy", params, optFns, addOperationUploadPartCopyMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*UploadPartCopyOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type UploadPartCopyInput struct {
+
+ // The bucket name. When using this API with an access point, you must direct
+ // requests to the access point hostname. The access point hostname takes the form
+ // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this
+ // operation with an access point through the AWS SDKs, you provide the access
+ // point ARN in place of the bucket name. For more information about access point
+ // ARNs, see Using Access Points
+ // (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) in
+ // the Amazon Simple Storage Service Developer Guide. When using this API with
+ // Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname.
+ // The S3 on Outposts hostname takes the form
+ // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using
+ // this operation using S3 on Outposts through the AWS SDKs, you provide the
+ // Outposts bucket ARN in place of the bucket name. For more information about S3
+ // on Outposts ARNs, see Using S3 on Outposts
+ // (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html) in the
+ // Amazon Simple Storage Service Developer Guide.
+ //
+ // This member is required.
+ Bucket *string
+
+ // Specifies the source object for the copy operation. You specify the value in one
+ // of two formats, depending on whether you want to access the source object
+ // through an access point
+ // (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-points.html):
+ //
+ // * For
+ // objects not accessed through an access point, specify the name of the source
+ // bucket and key of the source object, separated by a slash (/). For example, to
+ // copy the object reports/january.pdf from the bucket awsexamplebucket, use
+ // awsexamplebucket/reports/january.pdf. The value must be URL encoded.
+ //
+ // * For
+ // objects accessed through access points, specify the Amazon Resource Name (ARN)
+ // of the object as accessed through the access point, in the format
+ // arn:aws:s3:::accesspoint//object/. For example, to copy the object
+ // reports/january.pdf through access point my-access-point owned by account
+ // 123456789012 in Region us-west-2, use the URL encoding of
+ // arn:aws:s3:us-west-2:123456789012:accesspoint/my-access-point/object/reports/january.pdf.
+ // The value must be URL encoded. Amazon S3 supports copy operations using access
+ // points only when the source and destination buckets are in the same AWS Region.
+ // Alternatively, for objects accessed through Amazon S3 on Outposts, specify the
+ // ARN of the object as accessed in the format
+ // arn:aws:s3-outposts:::outpost//object/. For example, to copy the object
+ // reports/january.pdf through outpost my-outpost owned by account 123456789012 in
+ // Region us-west-2, use the URL encoding of
+ // arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/object/reports/january.pdf.
+ // The value must be URL encoded.
+ //
+ // To copy a specific version of an object, append
+ // ?versionId= to the value (for example,
+ // awsexamplebucket/reports/january.pdf?versionId=QUpfdndhfd8438MNFDN93jdnJFkdmqnh893).
+ // If you don't specify a version ID, Amazon S3 copies the latest version of the
+ // source object.
+ //
+ // This member is required.
+ CopySource *string
+
+ // Object key for which the multipart upload was initiated.
+ //
+ // This member is required.
+ Key *string
+
+ // Part number of part being copied. This is a positive integer between 1 and
+ // 10,000.
+ //
+ // This member is required.
+ PartNumber int32
+
+ // Upload ID identifying the multipart upload whose part is being copied.
+ //
+ // This member is required.
+ UploadId *string
+
+ // Copies the object if its entity tag (ETag) matches the specified tag.
+ CopySourceIfMatch *string
+
+ // Copies the object if it has been modified since the specified time.
+ CopySourceIfModifiedSince *time.Time
+
+ // Copies the object if its entity tag (ETag) is different than the specified ETag.
+ CopySourceIfNoneMatch *string
+
+ // Copies the object if it hasn't been modified since the specified time.
+ CopySourceIfUnmodifiedSince *time.Time
+
+ // The range of bytes to copy from the source object. The range value must use the
+ // form bytes=first-last, where the first and last are the zero-based byte offsets
+ // to copy. For example, bytes=0-9 indicates that you want to copy the first 10
+ // bytes of the source. You can copy a range only if the source object is greater
+ // than 5 MB.
+ CopySourceRange *string
+
+ // Specifies the algorithm to use when decrypting the source object (for example,
+ // AES256).
+ CopySourceSSECustomerAlgorithm *string
+
+ // Specifies the customer-provided encryption key for Amazon S3 to use to decrypt
+ // the source object. The encryption key provided in this header must be one that
+ // was used when the source object was created.
+ CopySourceSSECustomerKey *string
+
+ // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321.
+ // Amazon S3 uses this header for a message integrity check to ensure that the
+ // encryption key was transmitted without error.
+ CopySourceSSECustomerKeyMD5 *string
+
+ // The account id of the expected destination bucket owner. If the destination
+ // bucket is owned by a different account, the request will fail with an HTTP 403
+ // (Access Denied) error.
+ ExpectedBucketOwner *string
+
+ // The account id of the expected source bucket owner. If the source bucket is
+ // owned by a different account, the request will fail with an HTTP 403 (Access
+ // Denied) error.
+ ExpectedSourceBucketOwner *string
+
+ // Confirms that the requester knows that they will be charged for the request.
+ // Bucket owners need not specify this parameter in their requests. For information
+ // about downloading objects from requester pays buckets, see Downloading Objects
+ // in Requestor Pays Buckets
+ // (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html)
+ // in the Amazon S3 Developer Guide.
+ RequestPayer types.RequestPayer
+
+ // Specifies the algorithm to use to when encrypting the object (for example,
+ // AES256).
+ SSECustomerAlgorithm *string
+
+ // Specifies the customer-provided encryption key for Amazon S3 to use in
+ // encrypting data. This value is used to store the object and then it is
+ // discarded; Amazon S3 does not store the encryption key. The key must be
+ // appropriate for use with the algorithm specified in the
+ // x-amz-server-side-encryption-customer-algorithm header. This must be the same
+ // encryption key specified in the initiate multipart upload request.
+ SSECustomerKey *string
+
+ // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321.
+ // Amazon S3 uses this header for a message integrity check to ensure that the
+ // encryption key was transmitted without error.
+ SSECustomerKeyMD5 *string
+}
+
+type UploadPartCopyOutput struct {
+
+ // Indicates whether the multipart upload uses an S3 Bucket Key for server-side
+ // encryption with AWS KMS (SSE-KMS).
+ BucketKeyEnabled bool
+
+ // Container for all response elements.
+ CopyPartResult *types.CopyPartResult
+
+ // The version of the source object that was copied, if you have enabled versioning
+ // on the source bucket.
+ CopySourceVersionId *string
+
+ // If present, indicates that the requester was successfully charged for the
+ // request.
+ RequestCharged types.RequestCharged
+
+ // If server-side encryption with a customer-provided encryption key was requested,
+ // the response will include this header confirming the encryption algorithm used.
+ SSECustomerAlgorithm *string
+
+ // If server-side encryption with a customer-provided encryption key was requested,
+ // the response will include this header to provide round-trip message integrity
+ // verification of the customer-provided encryption key.
+ SSECustomerKeyMD5 *string
+
+ // If present, specifies the ID of the AWS Key Management Service (AWS KMS)
+ // symmetric customer managed customer master key (CMK) that was used for the
+ // object.
+ SSEKMSKeyId *string
+
+ // The server-side encryption algorithm used when storing this object in Amazon S3
+ // (for example, AES256, aws:kms).
+ ServerSideEncryption types.ServerSideEncryption
+
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+}
+
+func addOperationUploadPartCopyMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ err = stack.Serialize.Add(&awsRestxml_serializeOpUploadPartCopy{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsRestxml_deserializeOpUploadPartCopy{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
+ return err
+ }
+ if err = addRetryMiddlewares(stack, options); err != nil {
+ return err
+ }
+ if err = addHTTPSignerV4Middleware(stack, options); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addOpUploadPartCopyValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opUploadPartCopy(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addMetadataRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addUploadPartCopyUpdateEndpoint(stack, options); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil {
+ return err
+ }
+ if err = disableAcceptEncodingGzip(stack); err != nil {
+ return err
+ }
+ if err = s3cust.HandleResponseErrorWith200Status(stack); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ return nil
+}
+
+func newServiceMetadataMiddleware_opUploadPartCopy(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ SigningName: "s3",
+ OperationName: "UploadPartCopy",
+ }
+}
+
+// getUploadPartCopyBucketMember returns a pointer to string denoting a provided
+// bucket member valueand a boolean indicating if the input has a modeled bucket
+// name,
+func getUploadPartCopyBucketMember(input interface{}) (*string, bool) {
+ in := input.(*UploadPartCopyInput)
+ if in.Bucket == nil {
+ return nil, false
+ }
+ return in.Bucket, true
+}
+func addUploadPartCopyUpdateEndpoint(stack *middleware.Stack, options Options) error {
+ return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{
+ Accessor: s3cust.UpdateEndpointParameterAccessor{
+ GetBucketFromInput: getUploadPartCopyBucketMember,
+ },
+ UsePathStyle: options.UsePathStyle,
+ UseAccelerate: options.UseAccelerate,
+ SupportsAccelerate: true,
+ EndpointResolver: options.EndpointResolver,
+ EndpointResolverOptions: options.EndpointOptions,
+ UseDualstack: options.UseDualstack,
+ UseARNRegion: options.UseARNRegion,
+ })
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/deserializers.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/deserializers.go
new file mode 100644
index 000000000..cf4405df2
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/deserializers.go
@@ -0,0 +1,20105 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package s3
+
+import (
+ "bytes"
+ "context"
+ "encoding/xml"
+ "fmt"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/service/internal/s3shared"
+ "github.com/aws/aws-sdk-go-v2/service/s3/types"
+ smithy "github.com/aws/smithy-go"
+ smithyxml "github.com/aws/smithy-go/encoding/xml"
+ smithyio "github.com/aws/smithy-go/io"
+ "github.com/aws/smithy-go/middleware"
+ "github.com/aws/smithy-go/ptr"
+ smithytime "github.com/aws/smithy-go/time"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+ "io"
+ "io/ioutil"
+ "strconv"
+ "strings"
+)
+
+type awsRestxml_deserializeOpAbortMultipartUpload struct {
+}
+
+func (*awsRestxml_deserializeOpAbortMultipartUpload) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsRestxml_deserializeOpAbortMultipartUpload) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsRestxml_deserializeOpErrorAbortMultipartUpload(response, &metadata)
+ }
+ output := &AbortMultipartUploadOutput{}
+ out.Result = output
+
+ err = awsRestxml_deserializeOpHttpBindingsAbortMultipartUploadOutput(output, response)
+ if err != nil {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)}
+ }
+
+ return out, metadata, err
+}
+
+func awsRestxml_deserializeOpErrorAbortMultipartUpload(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{
+ UseStatusCode: true, StatusCode: response.StatusCode,
+ })
+ if err != nil {
+ return err
+ }
+ if hostID := errorComponents.HostID; len(hostID) != 0 {
+ s3shared.SetHostIDMetadata(metadata, hostID)
+ }
+ if reqID := errorComponents.RequestID; len(reqID) != 0 {
+ awsmiddleware.SetRequestIDMetadata(metadata, reqID)
+ }
+ if len(errorComponents.Code) != 0 {
+ errorCode = errorComponents.Code
+ }
+ if len(errorComponents.Message) != 0 {
+ errorMessage = errorComponents.Message
+ }
+ errorBody.Seek(0, io.SeekStart)
+ switch {
+ case strings.EqualFold("NoSuchUpload", errorCode):
+ return awsRestxml_deserializeErrorNoSuchUpload(response, errorBody)
+
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+func awsRestxml_deserializeOpHttpBindingsAbortMultipartUploadOutput(v *AbortMultipartUploadOutput, response *smithyhttp.Response) error {
+ if v == nil {
+ return fmt.Errorf("unsupported deserialization for nil %T", v)
+ }
+
+ if headerValues := response.Header.Values("x-amz-request-charged"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.RequestCharged = types.RequestCharged(headerValues[0])
+ }
+
+ return nil
+}
+
+type awsRestxml_deserializeOpCompleteMultipartUpload struct {
+}
+
+func (*awsRestxml_deserializeOpCompleteMultipartUpload) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsRestxml_deserializeOpCompleteMultipartUpload) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsRestxml_deserializeOpErrorCompleteMultipartUpload(response, &metadata)
+ }
+ output := &CompleteMultipartUploadOutput{}
+ out.Result = output
+
+ err = awsRestxml_deserializeOpHttpBindingsCompleteMultipartUploadOutput(output, response)
+ if err != nil {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)}
+ }
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+ body := io.TeeReader(response.Body, ringBuffer)
+ rootDecoder := xml.NewDecoder(body)
+ t, err := smithyxml.FetchRootElement(rootDecoder)
+ if err == io.EOF {
+ return out, metadata, nil
+ }
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ decoder := smithyxml.WrapNodeDecoder(rootDecoder, t)
+ err = awsRestxml_deserializeOpDocumentCompleteMultipartUploadOutput(&output, decoder)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ return out, metadata, err
+}
+
+func awsRestxml_deserializeOpErrorCompleteMultipartUpload(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{
+ UseStatusCode: true, StatusCode: response.StatusCode,
+ })
+ if err != nil {
+ return err
+ }
+ if hostID := errorComponents.HostID; len(hostID) != 0 {
+ s3shared.SetHostIDMetadata(metadata, hostID)
+ }
+ if reqID := errorComponents.RequestID; len(reqID) != 0 {
+ awsmiddleware.SetRequestIDMetadata(metadata, reqID)
+ }
+ if len(errorComponents.Code) != 0 {
+ errorCode = errorComponents.Code
+ }
+ if len(errorComponents.Message) != 0 {
+ errorMessage = errorComponents.Message
+ }
+ errorBody.Seek(0, io.SeekStart)
+ switch {
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+func awsRestxml_deserializeOpHttpBindingsCompleteMultipartUploadOutput(v *CompleteMultipartUploadOutput, response *smithyhttp.Response) error {
+ if v == nil {
+ return fmt.Errorf("unsupported deserialization for nil %T", v)
+ }
+
+ if headerValues := response.Header.Values("x-amz-server-side-encryption-bucket-key-enabled"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ vv, err := strconv.ParseBool(headerValues[0])
+ if err != nil {
+ return err
+ }
+ v.BucketKeyEnabled = vv
+ }
+
+ if headerValues := response.Header.Values("x-amz-expiration"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.Expiration = ptr.String(headerValues[0])
+ }
+
+ if headerValues := response.Header.Values("x-amz-request-charged"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.RequestCharged = types.RequestCharged(headerValues[0])
+ }
+
+ if headerValues := response.Header.Values("x-amz-server-side-encryption"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.ServerSideEncryption = types.ServerSideEncryption(headerValues[0])
+ }
+
+ if headerValues := response.Header.Values("x-amz-server-side-encryption-aws-kms-key-id"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.SSEKMSKeyId = ptr.String(headerValues[0])
+ }
+
+ if headerValues := response.Header.Values("x-amz-version-id"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.VersionId = ptr.String(headerValues[0])
+ }
+
+ return nil
+}
+func awsRestxml_deserializeOpDocumentCompleteMultipartUploadOutput(v **CompleteMultipartUploadOutput, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *CompleteMultipartUploadOutput
+ if *v == nil {
+ sv = &CompleteMultipartUploadOutput{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("Bucket", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.Bucket = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("ETag", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.ETag = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("Key", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.Key = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("Location", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.Location = ptr.String(xtv)
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+type awsRestxml_deserializeOpCopyObject struct {
+}
+
+func (*awsRestxml_deserializeOpCopyObject) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsRestxml_deserializeOpCopyObject) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsRestxml_deserializeOpErrorCopyObject(response, &metadata)
+ }
+ output := &CopyObjectOutput{}
+ out.Result = output
+
+ err = awsRestxml_deserializeOpHttpBindingsCopyObjectOutput(output, response)
+ if err != nil {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)}
+ }
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+ body := io.TeeReader(response.Body, ringBuffer)
+ rootDecoder := xml.NewDecoder(body)
+ t, err := smithyxml.FetchRootElement(rootDecoder)
+ if err == io.EOF {
+ return out, metadata, nil
+ }
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ decoder := smithyxml.WrapNodeDecoder(rootDecoder, t)
+ err = awsRestxml_deserializeDocumentCopyObjectResult(&output.CopyObjectResult, decoder)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ return out, metadata, err
+}
+
+func awsRestxml_deserializeOpErrorCopyObject(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{
+ UseStatusCode: true, StatusCode: response.StatusCode,
+ })
+ if err != nil {
+ return err
+ }
+ if hostID := errorComponents.HostID; len(hostID) != 0 {
+ s3shared.SetHostIDMetadata(metadata, hostID)
+ }
+ if reqID := errorComponents.RequestID; len(reqID) != 0 {
+ awsmiddleware.SetRequestIDMetadata(metadata, reqID)
+ }
+ if len(errorComponents.Code) != 0 {
+ errorCode = errorComponents.Code
+ }
+ if len(errorComponents.Message) != 0 {
+ errorMessage = errorComponents.Message
+ }
+ errorBody.Seek(0, io.SeekStart)
+ switch {
+ case strings.EqualFold("ObjectNotInActiveTierError", errorCode):
+ return awsRestxml_deserializeErrorObjectNotInActiveTierError(response, errorBody)
+
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+func awsRestxml_deserializeOpHttpBindingsCopyObjectOutput(v *CopyObjectOutput, response *smithyhttp.Response) error {
+ if v == nil {
+ return fmt.Errorf("unsupported deserialization for nil %T", v)
+ }
+
+ if headerValues := response.Header.Values("x-amz-server-side-encryption-bucket-key-enabled"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ vv, err := strconv.ParseBool(headerValues[0])
+ if err != nil {
+ return err
+ }
+ v.BucketKeyEnabled = vv
+ }
+
+ if headerValues := response.Header.Values("x-amz-copy-source-version-id"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.CopySourceVersionId = ptr.String(headerValues[0])
+ }
+
+ if headerValues := response.Header.Values("x-amz-expiration"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.Expiration = ptr.String(headerValues[0])
+ }
+
+ if headerValues := response.Header.Values("x-amz-request-charged"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.RequestCharged = types.RequestCharged(headerValues[0])
+ }
+
+ if headerValues := response.Header.Values("x-amz-server-side-encryption"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.ServerSideEncryption = types.ServerSideEncryption(headerValues[0])
+ }
+
+ if headerValues := response.Header.Values("x-amz-server-side-encryption-customer-algorithm"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.SSECustomerAlgorithm = ptr.String(headerValues[0])
+ }
+
+ if headerValues := response.Header.Values("x-amz-server-side-encryption-customer-key-MD5"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.SSECustomerKeyMD5 = ptr.String(headerValues[0])
+ }
+
+ if headerValues := response.Header.Values("x-amz-server-side-encryption-context"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.SSEKMSEncryptionContext = ptr.String(headerValues[0])
+ }
+
+ if headerValues := response.Header.Values("x-amz-server-side-encryption-aws-kms-key-id"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.SSEKMSKeyId = ptr.String(headerValues[0])
+ }
+
+ if headerValues := response.Header.Values("x-amz-version-id"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.VersionId = ptr.String(headerValues[0])
+ }
+
+ return nil
+}
+func awsRestxml_deserializeOpDocumentCopyObjectOutput(v **CopyObjectOutput, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *CopyObjectOutput
+ if *v == nil {
+ sv = &CopyObjectOutput{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("CopyObjectResult", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentCopyObjectResult(&sv.CopyObjectResult, nodeDecoder); err != nil {
+ return err
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+type awsRestxml_deserializeOpCreateBucket struct {
+}
+
+func (*awsRestxml_deserializeOpCreateBucket) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsRestxml_deserializeOpCreateBucket) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsRestxml_deserializeOpErrorCreateBucket(response, &metadata)
+ }
+ output := &CreateBucketOutput{}
+ out.Result = output
+
+ err = awsRestxml_deserializeOpHttpBindingsCreateBucketOutput(output, response)
+ if err != nil {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)}
+ }
+
+ return out, metadata, err
+}
+
+func awsRestxml_deserializeOpErrorCreateBucket(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{
+ UseStatusCode: true, StatusCode: response.StatusCode,
+ })
+ if err != nil {
+ return err
+ }
+ if hostID := errorComponents.HostID; len(hostID) != 0 {
+ s3shared.SetHostIDMetadata(metadata, hostID)
+ }
+ if reqID := errorComponents.RequestID; len(reqID) != 0 {
+ awsmiddleware.SetRequestIDMetadata(metadata, reqID)
+ }
+ if len(errorComponents.Code) != 0 {
+ errorCode = errorComponents.Code
+ }
+ if len(errorComponents.Message) != 0 {
+ errorMessage = errorComponents.Message
+ }
+ errorBody.Seek(0, io.SeekStart)
+ switch {
+ case strings.EqualFold("BucketAlreadyExists", errorCode):
+ return awsRestxml_deserializeErrorBucketAlreadyExists(response, errorBody)
+
+ case strings.EqualFold("BucketAlreadyOwnedByYou", errorCode):
+ return awsRestxml_deserializeErrorBucketAlreadyOwnedByYou(response, errorBody)
+
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+func awsRestxml_deserializeOpHttpBindingsCreateBucketOutput(v *CreateBucketOutput, response *smithyhttp.Response) error {
+ if v == nil {
+ return fmt.Errorf("unsupported deserialization for nil %T", v)
+ }
+
+ if headerValues := response.Header.Values("Location"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.Location = ptr.String(headerValues[0])
+ }
+
+ return nil
+}
+
+type awsRestxml_deserializeOpCreateMultipartUpload struct {
+}
+
+func (*awsRestxml_deserializeOpCreateMultipartUpload) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsRestxml_deserializeOpCreateMultipartUpload) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsRestxml_deserializeOpErrorCreateMultipartUpload(response, &metadata)
+ }
+ output := &CreateMultipartUploadOutput{}
+ out.Result = output
+
+ err = awsRestxml_deserializeOpHttpBindingsCreateMultipartUploadOutput(output, response)
+ if err != nil {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)}
+ }
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+ body := io.TeeReader(response.Body, ringBuffer)
+ rootDecoder := xml.NewDecoder(body)
+ t, err := smithyxml.FetchRootElement(rootDecoder)
+ if err == io.EOF {
+ return out, metadata, nil
+ }
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ decoder := smithyxml.WrapNodeDecoder(rootDecoder, t)
+ err = awsRestxml_deserializeOpDocumentCreateMultipartUploadOutput(&output, decoder)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ return out, metadata, err
+}
+
+func awsRestxml_deserializeOpErrorCreateMultipartUpload(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{
+ UseStatusCode: true, StatusCode: response.StatusCode,
+ })
+ if err != nil {
+ return err
+ }
+ if hostID := errorComponents.HostID; len(hostID) != 0 {
+ s3shared.SetHostIDMetadata(metadata, hostID)
+ }
+ if reqID := errorComponents.RequestID; len(reqID) != 0 {
+ awsmiddleware.SetRequestIDMetadata(metadata, reqID)
+ }
+ if len(errorComponents.Code) != 0 {
+ errorCode = errorComponents.Code
+ }
+ if len(errorComponents.Message) != 0 {
+ errorMessage = errorComponents.Message
+ }
+ errorBody.Seek(0, io.SeekStart)
+ switch {
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+func awsRestxml_deserializeOpHttpBindingsCreateMultipartUploadOutput(v *CreateMultipartUploadOutput, response *smithyhttp.Response) error {
+ if v == nil {
+ return fmt.Errorf("unsupported deserialization for nil %T", v)
+ }
+
+ if headerValues := response.Header.Values("x-amz-abort-date"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ t, err := smithytime.ParseHTTPDate(headerValues[0])
+ if err != nil {
+ return err
+ }
+ v.AbortDate = ptr.Time(t)
+ }
+
+ if headerValues := response.Header.Values("x-amz-abort-rule-id"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.AbortRuleId = ptr.String(headerValues[0])
+ }
+
+ if headerValues := response.Header.Values("x-amz-server-side-encryption-bucket-key-enabled"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ vv, err := strconv.ParseBool(headerValues[0])
+ if err != nil {
+ return err
+ }
+ v.BucketKeyEnabled = vv
+ }
+
+ if headerValues := response.Header.Values("x-amz-request-charged"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.RequestCharged = types.RequestCharged(headerValues[0])
+ }
+
+ if headerValues := response.Header.Values("x-amz-server-side-encryption"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.ServerSideEncryption = types.ServerSideEncryption(headerValues[0])
+ }
+
+ if headerValues := response.Header.Values("x-amz-server-side-encryption-customer-algorithm"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.SSECustomerAlgorithm = ptr.String(headerValues[0])
+ }
+
+ if headerValues := response.Header.Values("x-amz-server-side-encryption-customer-key-MD5"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.SSECustomerKeyMD5 = ptr.String(headerValues[0])
+ }
+
+ if headerValues := response.Header.Values("x-amz-server-side-encryption-context"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.SSEKMSEncryptionContext = ptr.String(headerValues[0])
+ }
+
+ if headerValues := response.Header.Values("x-amz-server-side-encryption-aws-kms-key-id"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.SSEKMSKeyId = ptr.String(headerValues[0])
+ }
+
+ return nil
+}
+func awsRestxml_deserializeOpDocumentCreateMultipartUploadOutput(v **CreateMultipartUploadOutput, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *CreateMultipartUploadOutput
+ if *v == nil {
+ sv = &CreateMultipartUploadOutput{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("Bucket", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.Bucket = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("Key", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.Key = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("UploadId", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.UploadId = ptr.String(xtv)
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+type awsRestxml_deserializeOpDeleteBucket struct {
+}
+
+func (*awsRestxml_deserializeOpDeleteBucket) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsRestxml_deserializeOpDeleteBucket) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsRestxml_deserializeOpErrorDeleteBucket(response, &metadata)
+ }
+ output := &DeleteBucketOutput{}
+ out.Result = output
+
+ if _, err = io.Copy(ioutil.Discard, response.Body); err != nil {
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to discard response body, %w", err),
+ }
+ }
+
+ return out, metadata, err
+}
+
+func awsRestxml_deserializeOpErrorDeleteBucket(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{
+ UseStatusCode: true, StatusCode: response.StatusCode,
+ })
+ if err != nil {
+ return err
+ }
+ if hostID := errorComponents.HostID; len(hostID) != 0 {
+ s3shared.SetHostIDMetadata(metadata, hostID)
+ }
+ if reqID := errorComponents.RequestID; len(reqID) != 0 {
+ awsmiddleware.SetRequestIDMetadata(metadata, reqID)
+ }
+ if len(errorComponents.Code) != 0 {
+ errorCode = errorComponents.Code
+ }
+ if len(errorComponents.Message) != 0 {
+ errorMessage = errorComponents.Message
+ }
+ errorBody.Seek(0, io.SeekStart)
+ switch {
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+type awsRestxml_deserializeOpDeleteBucketAnalyticsConfiguration struct {
+}
+
+func (*awsRestxml_deserializeOpDeleteBucketAnalyticsConfiguration) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsRestxml_deserializeOpDeleteBucketAnalyticsConfiguration) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsRestxml_deserializeOpErrorDeleteBucketAnalyticsConfiguration(response, &metadata)
+ }
+ output := &DeleteBucketAnalyticsConfigurationOutput{}
+ out.Result = output
+
+ if _, err = io.Copy(ioutil.Discard, response.Body); err != nil {
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to discard response body, %w", err),
+ }
+ }
+
+ return out, metadata, err
+}
+
+func awsRestxml_deserializeOpErrorDeleteBucketAnalyticsConfiguration(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{
+ UseStatusCode: true, StatusCode: response.StatusCode,
+ })
+ if err != nil {
+ return err
+ }
+ if hostID := errorComponents.HostID; len(hostID) != 0 {
+ s3shared.SetHostIDMetadata(metadata, hostID)
+ }
+ if reqID := errorComponents.RequestID; len(reqID) != 0 {
+ awsmiddleware.SetRequestIDMetadata(metadata, reqID)
+ }
+ if len(errorComponents.Code) != 0 {
+ errorCode = errorComponents.Code
+ }
+ if len(errorComponents.Message) != 0 {
+ errorMessage = errorComponents.Message
+ }
+ errorBody.Seek(0, io.SeekStart)
+ switch {
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+type awsRestxml_deserializeOpDeleteBucketCors struct {
+}
+
+func (*awsRestxml_deserializeOpDeleteBucketCors) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsRestxml_deserializeOpDeleteBucketCors) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsRestxml_deserializeOpErrorDeleteBucketCors(response, &metadata)
+ }
+ output := &DeleteBucketCorsOutput{}
+ out.Result = output
+
+ if _, err = io.Copy(ioutil.Discard, response.Body); err != nil {
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to discard response body, %w", err),
+ }
+ }
+
+ return out, metadata, err
+}
+
+func awsRestxml_deserializeOpErrorDeleteBucketCors(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{
+ UseStatusCode: true, StatusCode: response.StatusCode,
+ })
+ if err != nil {
+ return err
+ }
+ if hostID := errorComponents.HostID; len(hostID) != 0 {
+ s3shared.SetHostIDMetadata(metadata, hostID)
+ }
+ if reqID := errorComponents.RequestID; len(reqID) != 0 {
+ awsmiddleware.SetRequestIDMetadata(metadata, reqID)
+ }
+ if len(errorComponents.Code) != 0 {
+ errorCode = errorComponents.Code
+ }
+ if len(errorComponents.Message) != 0 {
+ errorMessage = errorComponents.Message
+ }
+ errorBody.Seek(0, io.SeekStart)
+ switch {
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+type awsRestxml_deserializeOpDeleteBucketEncryption struct {
+}
+
+func (*awsRestxml_deserializeOpDeleteBucketEncryption) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsRestxml_deserializeOpDeleteBucketEncryption) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsRestxml_deserializeOpErrorDeleteBucketEncryption(response, &metadata)
+ }
+ output := &DeleteBucketEncryptionOutput{}
+ out.Result = output
+
+ if _, err = io.Copy(ioutil.Discard, response.Body); err != nil {
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to discard response body, %w", err),
+ }
+ }
+
+ return out, metadata, err
+}
+
+func awsRestxml_deserializeOpErrorDeleteBucketEncryption(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{
+ UseStatusCode: true, StatusCode: response.StatusCode,
+ })
+ if err != nil {
+ return err
+ }
+ if hostID := errorComponents.HostID; len(hostID) != 0 {
+ s3shared.SetHostIDMetadata(metadata, hostID)
+ }
+ if reqID := errorComponents.RequestID; len(reqID) != 0 {
+ awsmiddleware.SetRequestIDMetadata(metadata, reqID)
+ }
+ if len(errorComponents.Code) != 0 {
+ errorCode = errorComponents.Code
+ }
+ if len(errorComponents.Message) != 0 {
+ errorMessage = errorComponents.Message
+ }
+ errorBody.Seek(0, io.SeekStart)
+ switch {
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+type awsRestxml_deserializeOpDeleteBucketIntelligentTieringConfiguration struct {
+}
+
+func (*awsRestxml_deserializeOpDeleteBucketIntelligentTieringConfiguration) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsRestxml_deserializeOpDeleteBucketIntelligentTieringConfiguration) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsRestxml_deserializeOpErrorDeleteBucketIntelligentTieringConfiguration(response, &metadata)
+ }
+ output := &DeleteBucketIntelligentTieringConfigurationOutput{}
+ out.Result = output
+
+ if _, err = io.Copy(ioutil.Discard, response.Body); err != nil {
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to discard response body, %w", err),
+ }
+ }
+
+ return out, metadata, err
+}
+
+func awsRestxml_deserializeOpErrorDeleteBucketIntelligentTieringConfiguration(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{
+ UseStatusCode: true, StatusCode: response.StatusCode,
+ })
+ if err != nil {
+ return err
+ }
+ if hostID := errorComponents.HostID; len(hostID) != 0 {
+ s3shared.SetHostIDMetadata(metadata, hostID)
+ }
+ if reqID := errorComponents.RequestID; len(reqID) != 0 {
+ awsmiddleware.SetRequestIDMetadata(metadata, reqID)
+ }
+ if len(errorComponents.Code) != 0 {
+ errorCode = errorComponents.Code
+ }
+ if len(errorComponents.Message) != 0 {
+ errorMessage = errorComponents.Message
+ }
+ errorBody.Seek(0, io.SeekStart)
+ switch {
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+type awsRestxml_deserializeOpDeleteBucketInventoryConfiguration struct {
+}
+
+func (*awsRestxml_deserializeOpDeleteBucketInventoryConfiguration) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsRestxml_deserializeOpDeleteBucketInventoryConfiguration) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsRestxml_deserializeOpErrorDeleteBucketInventoryConfiguration(response, &metadata)
+ }
+ output := &DeleteBucketInventoryConfigurationOutput{}
+ out.Result = output
+
+ if _, err = io.Copy(ioutil.Discard, response.Body); err != nil {
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to discard response body, %w", err),
+ }
+ }
+
+ return out, metadata, err
+}
+
+func awsRestxml_deserializeOpErrorDeleteBucketInventoryConfiguration(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{
+ UseStatusCode: true, StatusCode: response.StatusCode,
+ })
+ if err != nil {
+ return err
+ }
+ if hostID := errorComponents.HostID; len(hostID) != 0 {
+ s3shared.SetHostIDMetadata(metadata, hostID)
+ }
+ if reqID := errorComponents.RequestID; len(reqID) != 0 {
+ awsmiddleware.SetRequestIDMetadata(metadata, reqID)
+ }
+ if len(errorComponents.Code) != 0 {
+ errorCode = errorComponents.Code
+ }
+ if len(errorComponents.Message) != 0 {
+ errorMessage = errorComponents.Message
+ }
+ errorBody.Seek(0, io.SeekStart)
+ switch {
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+type awsRestxml_deserializeOpDeleteBucketLifecycle struct {
+}
+
+func (*awsRestxml_deserializeOpDeleteBucketLifecycle) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsRestxml_deserializeOpDeleteBucketLifecycle) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsRestxml_deserializeOpErrorDeleteBucketLifecycle(response, &metadata)
+ }
+ output := &DeleteBucketLifecycleOutput{}
+ out.Result = output
+
+ if _, err = io.Copy(ioutil.Discard, response.Body); err != nil {
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to discard response body, %w", err),
+ }
+ }
+
+ return out, metadata, err
+}
+
+func awsRestxml_deserializeOpErrorDeleteBucketLifecycle(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{
+ UseStatusCode: true, StatusCode: response.StatusCode,
+ })
+ if err != nil {
+ return err
+ }
+ if hostID := errorComponents.HostID; len(hostID) != 0 {
+ s3shared.SetHostIDMetadata(metadata, hostID)
+ }
+ if reqID := errorComponents.RequestID; len(reqID) != 0 {
+ awsmiddleware.SetRequestIDMetadata(metadata, reqID)
+ }
+ if len(errorComponents.Code) != 0 {
+ errorCode = errorComponents.Code
+ }
+ if len(errorComponents.Message) != 0 {
+ errorMessage = errorComponents.Message
+ }
+ errorBody.Seek(0, io.SeekStart)
+ switch {
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+type awsRestxml_deserializeOpDeleteBucketMetricsConfiguration struct {
+}
+
+func (*awsRestxml_deserializeOpDeleteBucketMetricsConfiguration) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsRestxml_deserializeOpDeleteBucketMetricsConfiguration) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsRestxml_deserializeOpErrorDeleteBucketMetricsConfiguration(response, &metadata)
+ }
+ output := &DeleteBucketMetricsConfigurationOutput{}
+ out.Result = output
+
+ if _, err = io.Copy(ioutil.Discard, response.Body); err != nil {
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to discard response body, %w", err),
+ }
+ }
+
+ return out, metadata, err
+}
+
+func awsRestxml_deserializeOpErrorDeleteBucketMetricsConfiguration(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{
+ UseStatusCode: true, StatusCode: response.StatusCode,
+ })
+ if err != nil {
+ return err
+ }
+ if hostID := errorComponents.HostID; len(hostID) != 0 {
+ s3shared.SetHostIDMetadata(metadata, hostID)
+ }
+ if reqID := errorComponents.RequestID; len(reqID) != 0 {
+ awsmiddleware.SetRequestIDMetadata(metadata, reqID)
+ }
+ if len(errorComponents.Code) != 0 {
+ errorCode = errorComponents.Code
+ }
+ if len(errorComponents.Message) != 0 {
+ errorMessage = errorComponents.Message
+ }
+ errorBody.Seek(0, io.SeekStart)
+ switch {
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+type awsRestxml_deserializeOpDeleteBucketOwnershipControls struct {
+}
+
+func (*awsRestxml_deserializeOpDeleteBucketOwnershipControls) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsRestxml_deserializeOpDeleteBucketOwnershipControls) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsRestxml_deserializeOpErrorDeleteBucketOwnershipControls(response, &metadata)
+ }
+ output := &DeleteBucketOwnershipControlsOutput{}
+ out.Result = output
+
+ if _, err = io.Copy(ioutil.Discard, response.Body); err != nil {
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to discard response body, %w", err),
+ }
+ }
+
+ return out, metadata, err
+}
+
+func awsRestxml_deserializeOpErrorDeleteBucketOwnershipControls(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{
+ UseStatusCode: true, StatusCode: response.StatusCode,
+ })
+ if err != nil {
+ return err
+ }
+ if hostID := errorComponents.HostID; len(hostID) != 0 {
+ s3shared.SetHostIDMetadata(metadata, hostID)
+ }
+ if reqID := errorComponents.RequestID; len(reqID) != 0 {
+ awsmiddleware.SetRequestIDMetadata(metadata, reqID)
+ }
+ if len(errorComponents.Code) != 0 {
+ errorCode = errorComponents.Code
+ }
+ if len(errorComponents.Message) != 0 {
+ errorMessage = errorComponents.Message
+ }
+ errorBody.Seek(0, io.SeekStart)
+ switch {
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+type awsRestxml_deserializeOpDeleteBucketPolicy struct {
+}
+
+func (*awsRestxml_deserializeOpDeleteBucketPolicy) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsRestxml_deserializeOpDeleteBucketPolicy) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsRestxml_deserializeOpErrorDeleteBucketPolicy(response, &metadata)
+ }
+ output := &DeleteBucketPolicyOutput{}
+ out.Result = output
+
+ if _, err = io.Copy(ioutil.Discard, response.Body); err != nil {
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to discard response body, %w", err),
+ }
+ }
+
+ return out, metadata, err
+}
+
+func awsRestxml_deserializeOpErrorDeleteBucketPolicy(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{
+ UseStatusCode: true, StatusCode: response.StatusCode,
+ })
+ if err != nil {
+ return err
+ }
+ if hostID := errorComponents.HostID; len(hostID) != 0 {
+ s3shared.SetHostIDMetadata(metadata, hostID)
+ }
+ if reqID := errorComponents.RequestID; len(reqID) != 0 {
+ awsmiddleware.SetRequestIDMetadata(metadata, reqID)
+ }
+ if len(errorComponents.Code) != 0 {
+ errorCode = errorComponents.Code
+ }
+ if len(errorComponents.Message) != 0 {
+ errorMessage = errorComponents.Message
+ }
+ errorBody.Seek(0, io.SeekStart)
+ switch {
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+type awsRestxml_deserializeOpDeleteBucketReplication struct {
+}
+
+func (*awsRestxml_deserializeOpDeleteBucketReplication) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsRestxml_deserializeOpDeleteBucketReplication) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsRestxml_deserializeOpErrorDeleteBucketReplication(response, &metadata)
+ }
+ output := &DeleteBucketReplicationOutput{}
+ out.Result = output
+
+ if _, err = io.Copy(ioutil.Discard, response.Body); err != nil {
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to discard response body, %w", err),
+ }
+ }
+
+ return out, metadata, err
+}
+
+func awsRestxml_deserializeOpErrorDeleteBucketReplication(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{
+ UseStatusCode: true, StatusCode: response.StatusCode,
+ })
+ if err != nil {
+ return err
+ }
+ if hostID := errorComponents.HostID; len(hostID) != 0 {
+ s3shared.SetHostIDMetadata(metadata, hostID)
+ }
+ if reqID := errorComponents.RequestID; len(reqID) != 0 {
+ awsmiddleware.SetRequestIDMetadata(metadata, reqID)
+ }
+ if len(errorComponents.Code) != 0 {
+ errorCode = errorComponents.Code
+ }
+ if len(errorComponents.Message) != 0 {
+ errorMessage = errorComponents.Message
+ }
+ errorBody.Seek(0, io.SeekStart)
+ switch {
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+type awsRestxml_deserializeOpDeleteBucketTagging struct {
+}
+
+func (*awsRestxml_deserializeOpDeleteBucketTagging) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsRestxml_deserializeOpDeleteBucketTagging) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsRestxml_deserializeOpErrorDeleteBucketTagging(response, &metadata)
+ }
+ output := &DeleteBucketTaggingOutput{}
+ out.Result = output
+
+ if _, err = io.Copy(ioutil.Discard, response.Body); err != nil {
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to discard response body, %w", err),
+ }
+ }
+
+ return out, metadata, err
+}
+
+func awsRestxml_deserializeOpErrorDeleteBucketTagging(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{
+ UseStatusCode: true, StatusCode: response.StatusCode,
+ })
+ if err != nil {
+ return err
+ }
+ if hostID := errorComponents.HostID; len(hostID) != 0 {
+ s3shared.SetHostIDMetadata(metadata, hostID)
+ }
+ if reqID := errorComponents.RequestID; len(reqID) != 0 {
+ awsmiddleware.SetRequestIDMetadata(metadata, reqID)
+ }
+ if len(errorComponents.Code) != 0 {
+ errorCode = errorComponents.Code
+ }
+ if len(errorComponents.Message) != 0 {
+ errorMessage = errorComponents.Message
+ }
+ errorBody.Seek(0, io.SeekStart)
+ switch {
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+type awsRestxml_deserializeOpDeleteBucketWebsite struct {
+}
+
+func (*awsRestxml_deserializeOpDeleteBucketWebsite) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsRestxml_deserializeOpDeleteBucketWebsite) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsRestxml_deserializeOpErrorDeleteBucketWebsite(response, &metadata)
+ }
+ output := &DeleteBucketWebsiteOutput{}
+ out.Result = output
+
+ if _, err = io.Copy(ioutil.Discard, response.Body); err != nil {
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to discard response body, %w", err),
+ }
+ }
+
+ return out, metadata, err
+}
+
+func awsRestxml_deserializeOpErrorDeleteBucketWebsite(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{
+ UseStatusCode: true, StatusCode: response.StatusCode,
+ })
+ if err != nil {
+ return err
+ }
+ if hostID := errorComponents.HostID; len(hostID) != 0 {
+ s3shared.SetHostIDMetadata(metadata, hostID)
+ }
+ if reqID := errorComponents.RequestID; len(reqID) != 0 {
+ awsmiddleware.SetRequestIDMetadata(metadata, reqID)
+ }
+ if len(errorComponents.Code) != 0 {
+ errorCode = errorComponents.Code
+ }
+ if len(errorComponents.Message) != 0 {
+ errorMessage = errorComponents.Message
+ }
+ errorBody.Seek(0, io.SeekStart)
+ switch {
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+type awsRestxml_deserializeOpDeleteObject struct {
+}
+
+func (*awsRestxml_deserializeOpDeleteObject) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsRestxml_deserializeOpDeleteObject) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsRestxml_deserializeOpErrorDeleteObject(response, &metadata)
+ }
+ output := &DeleteObjectOutput{}
+ out.Result = output
+
+ err = awsRestxml_deserializeOpHttpBindingsDeleteObjectOutput(output, response)
+ if err != nil {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)}
+ }
+
+ return out, metadata, err
+}
+
+func awsRestxml_deserializeOpErrorDeleteObject(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{
+ UseStatusCode: true, StatusCode: response.StatusCode,
+ })
+ if err != nil {
+ return err
+ }
+ if hostID := errorComponents.HostID; len(hostID) != 0 {
+ s3shared.SetHostIDMetadata(metadata, hostID)
+ }
+ if reqID := errorComponents.RequestID; len(reqID) != 0 {
+ awsmiddleware.SetRequestIDMetadata(metadata, reqID)
+ }
+ if len(errorComponents.Code) != 0 {
+ errorCode = errorComponents.Code
+ }
+ if len(errorComponents.Message) != 0 {
+ errorMessage = errorComponents.Message
+ }
+ errorBody.Seek(0, io.SeekStart)
+ switch {
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+func awsRestxml_deserializeOpHttpBindingsDeleteObjectOutput(v *DeleteObjectOutput, response *smithyhttp.Response) error {
+ if v == nil {
+ return fmt.Errorf("unsupported deserialization for nil %T", v)
+ }
+
+ if headerValues := response.Header.Values("x-amz-delete-marker"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ vv, err := strconv.ParseBool(headerValues[0])
+ if err != nil {
+ return err
+ }
+ v.DeleteMarker = vv
+ }
+
+ if headerValues := response.Header.Values("x-amz-request-charged"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.RequestCharged = types.RequestCharged(headerValues[0])
+ }
+
+ if headerValues := response.Header.Values("x-amz-version-id"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.VersionId = ptr.String(headerValues[0])
+ }
+
+ return nil
+}
+
+type awsRestxml_deserializeOpDeleteObjects struct {
+}
+
+func (*awsRestxml_deserializeOpDeleteObjects) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsRestxml_deserializeOpDeleteObjects) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsRestxml_deserializeOpErrorDeleteObjects(response, &metadata)
+ }
+ output := &DeleteObjectsOutput{}
+ out.Result = output
+
+ err = awsRestxml_deserializeOpHttpBindingsDeleteObjectsOutput(output, response)
+ if err != nil {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)}
+ }
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+ body := io.TeeReader(response.Body, ringBuffer)
+ rootDecoder := xml.NewDecoder(body)
+ t, err := smithyxml.FetchRootElement(rootDecoder)
+ if err == io.EOF {
+ return out, metadata, nil
+ }
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ decoder := smithyxml.WrapNodeDecoder(rootDecoder, t)
+ err = awsRestxml_deserializeOpDocumentDeleteObjectsOutput(&output, decoder)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ return out, metadata, err
+}
+
+func awsRestxml_deserializeOpErrorDeleteObjects(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{
+ UseStatusCode: true, StatusCode: response.StatusCode,
+ })
+ if err != nil {
+ return err
+ }
+ if hostID := errorComponents.HostID; len(hostID) != 0 {
+ s3shared.SetHostIDMetadata(metadata, hostID)
+ }
+ if reqID := errorComponents.RequestID; len(reqID) != 0 {
+ awsmiddleware.SetRequestIDMetadata(metadata, reqID)
+ }
+ if len(errorComponents.Code) != 0 {
+ errorCode = errorComponents.Code
+ }
+ if len(errorComponents.Message) != 0 {
+ errorMessage = errorComponents.Message
+ }
+ errorBody.Seek(0, io.SeekStart)
+ switch {
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+func awsRestxml_deserializeOpHttpBindingsDeleteObjectsOutput(v *DeleteObjectsOutput, response *smithyhttp.Response) error {
+ if v == nil {
+ return fmt.Errorf("unsupported deserialization for nil %T", v)
+ }
+
+ if headerValues := response.Header.Values("x-amz-request-charged"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.RequestCharged = types.RequestCharged(headerValues[0])
+ }
+
+ return nil
+}
+func awsRestxml_deserializeOpDocumentDeleteObjectsOutput(v **DeleteObjectsOutput, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *DeleteObjectsOutput
+ if *v == nil {
+ sv = &DeleteObjectsOutput{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("Deleted", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentDeletedObjectsUnwrapped(&sv.Deleted, nodeDecoder); err != nil {
+ return err
+ }
+
+ case strings.EqualFold("Error", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentErrorsUnwrapped(&sv.Errors, nodeDecoder); err != nil {
+ return err
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+type awsRestxml_deserializeOpDeleteObjectTagging struct {
+}
+
+func (*awsRestxml_deserializeOpDeleteObjectTagging) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsRestxml_deserializeOpDeleteObjectTagging) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsRestxml_deserializeOpErrorDeleteObjectTagging(response, &metadata)
+ }
+ output := &DeleteObjectTaggingOutput{}
+ out.Result = output
+
+ err = awsRestxml_deserializeOpHttpBindingsDeleteObjectTaggingOutput(output, response)
+ if err != nil {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)}
+ }
+
+ return out, metadata, err
+}
+
+func awsRestxml_deserializeOpErrorDeleteObjectTagging(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{
+ UseStatusCode: true, StatusCode: response.StatusCode,
+ })
+ if err != nil {
+ return err
+ }
+ if hostID := errorComponents.HostID; len(hostID) != 0 {
+ s3shared.SetHostIDMetadata(metadata, hostID)
+ }
+ if reqID := errorComponents.RequestID; len(reqID) != 0 {
+ awsmiddleware.SetRequestIDMetadata(metadata, reqID)
+ }
+ if len(errorComponents.Code) != 0 {
+ errorCode = errorComponents.Code
+ }
+ if len(errorComponents.Message) != 0 {
+ errorMessage = errorComponents.Message
+ }
+ errorBody.Seek(0, io.SeekStart)
+ switch {
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+func awsRestxml_deserializeOpHttpBindingsDeleteObjectTaggingOutput(v *DeleteObjectTaggingOutput, response *smithyhttp.Response) error {
+ if v == nil {
+ return fmt.Errorf("unsupported deserialization for nil %T", v)
+ }
+
+ if headerValues := response.Header.Values("x-amz-version-id"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.VersionId = ptr.String(headerValues[0])
+ }
+
+ return nil
+}
+
+type awsRestxml_deserializeOpDeletePublicAccessBlock struct {
+}
+
+func (*awsRestxml_deserializeOpDeletePublicAccessBlock) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsRestxml_deserializeOpDeletePublicAccessBlock) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsRestxml_deserializeOpErrorDeletePublicAccessBlock(response, &metadata)
+ }
+ output := &DeletePublicAccessBlockOutput{}
+ out.Result = output
+
+ if _, err = io.Copy(ioutil.Discard, response.Body); err != nil {
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to discard response body, %w", err),
+ }
+ }
+
+ return out, metadata, err
+}
+
+func awsRestxml_deserializeOpErrorDeletePublicAccessBlock(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{
+ UseStatusCode: true, StatusCode: response.StatusCode,
+ })
+ if err != nil {
+ return err
+ }
+ if hostID := errorComponents.HostID; len(hostID) != 0 {
+ s3shared.SetHostIDMetadata(metadata, hostID)
+ }
+ if reqID := errorComponents.RequestID; len(reqID) != 0 {
+ awsmiddleware.SetRequestIDMetadata(metadata, reqID)
+ }
+ if len(errorComponents.Code) != 0 {
+ errorCode = errorComponents.Code
+ }
+ if len(errorComponents.Message) != 0 {
+ errorMessage = errorComponents.Message
+ }
+ errorBody.Seek(0, io.SeekStart)
+ switch {
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+type awsRestxml_deserializeOpGetBucketAccelerateConfiguration struct {
+}
+
+func (*awsRestxml_deserializeOpGetBucketAccelerateConfiguration) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsRestxml_deserializeOpGetBucketAccelerateConfiguration) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsRestxml_deserializeOpErrorGetBucketAccelerateConfiguration(response, &metadata)
+ }
+ output := &GetBucketAccelerateConfigurationOutput{}
+ out.Result = output
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+ body := io.TeeReader(response.Body, ringBuffer)
+ rootDecoder := xml.NewDecoder(body)
+ t, err := smithyxml.FetchRootElement(rootDecoder)
+ if err == io.EOF {
+ return out, metadata, nil
+ }
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ decoder := smithyxml.WrapNodeDecoder(rootDecoder, t)
+ err = awsRestxml_deserializeOpDocumentGetBucketAccelerateConfigurationOutput(&output, decoder)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ return out, metadata, err
+}
+
+func awsRestxml_deserializeOpErrorGetBucketAccelerateConfiguration(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{
+ UseStatusCode: true, StatusCode: response.StatusCode,
+ })
+ if err != nil {
+ return err
+ }
+ if hostID := errorComponents.HostID; len(hostID) != 0 {
+ s3shared.SetHostIDMetadata(metadata, hostID)
+ }
+ if reqID := errorComponents.RequestID; len(reqID) != 0 {
+ awsmiddleware.SetRequestIDMetadata(metadata, reqID)
+ }
+ if len(errorComponents.Code) != 0 {
+ errorCode = errorComponents.Code
+ }
+ if len(errorComponents.Message) != 0 {
+ errorMessage = errorComponents.Message
+ }
+ errorBody.Seek(0, io.SeekStart)
+ switch {
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+func awsRestxml_deserializeOpDocumentGetBucketAccelerateConfigurationOutput(v **GetBucketAccelerateConfigurationOutput, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *GetBucketAccelerateConfigurationOutput
+ if *v == nil {
+ sv = &GetBucketAccelerateConfigurationOutput{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("Status", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.Status = types.BucketAccelerateStatus(xtv)
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+type awsRestxml_deserializeOpGetBucketAcl struct {
+}
+
+func (*awsRestxml_deserializeOpGetBucketAcl) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsRestxml_deserializeOpGetBucketAcl) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsRestxml_deserializeOpErrorGetBucketAcl(response, &metadata)
+ }
+ output := &GetBucketAclOutput{}
+ out.Result = output
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+ body := io.TeeReader(response.Body, ringBuffer)
+ rootDecoder := xml.NewDecoder(body)
+ t, err := smithyxml.FetchRootElement(rootDecoder)
+ if err == io.EOF {
+ return out, metadata, nil
+ }
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ decoder := smithyxml.WrapNodeDecoder(rootDecoder, t)
+ err = awsRestxml_deserializeOpDocumentGetBucketAclOutput(&output, decoder)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ return out, metadata, err
+}
+
+func awsRestxml_deserializeOpErrorGetBucketAcl(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{
+ UseStatusCode: true, StatusCode: response.StatusCode,
+ })
+ if err != nil {
+ return err
+ }
+ if hostID := errorComponents.HostID; len(hostID) != 0 {
+ s3shared.SetHostIDMetadata(metadata, hostID)
+ }
+ if reqID := errorComponents.RequestID; len(reqID) != 0 {
+ awsmiddleware.SetRequestIDMetadata(metadata, reqID)
+ }
+ if len(errorComponents.Code) != 0 {
+ errorCode = errorComponents.Code
+ }
+ if len(errorComponents.Message) != 0 {
+ errorMessage = errorComponents.Message
+ }
+ errorBody.Seek(0, io.SeekStart)
+ switch {
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+func awsRestxml_deserializeOpDocumentGetBucketAclOutput(v **GetBucketAclOutput, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *GetBucketAclOutput
+ if *v == nil {
+ sv = &GetBucketAclOutput{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("AccessControlList", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentGrants(&sv.Grants, nodeDecoder); err != nil {
+ return err
+ }
+
+ case strings.EqualFold("Owner", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentOwner(&sv.Owner, nodeDecoder); err != nil {
+ return err
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+type awsRestxml_deserializeOpGetBucketAnalyticsConfiguration struct {
+}
+
+func (*awsRestxml_deserializeOpGetBucketAnalyticsConfiguration) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsRestxml_deserializeOpGetBucketAnalyticsConfiguration) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsRestxml_deserializeOpErrorGetBucketAnalyticsConfiguration(response, &metadata)
+ }
+ output := &GetBucketAnalyticsConfigurationOutput{}
+ out.Result = output
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+ body := io.TeeReader(response.Body, ringBuffer)
+ rootDecoder := xml.NewDecoder(body)
+ t, err := smithyxml.FetchRootElement(rootDecoder)
+ if err == io.EOF {
+ return out, metadata, nil
+ }
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ decoder := smithyxml.WrapNodeDecoder(rootDecoder, t)
+ err = awsRestxml_deserializeDocumentAnalyticsConfiguration(&output.AnalyticsConfiguration, decoder)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ return out, metadata, err
+}
+
+func awsRestxml_deserializeOpErrorGetBucketAnalyticsConfiguration(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{
+ UseStatusCode: true, StatusCode: response.StatusCode,
+ })
+ if err != nil {
+ return err
+ }
+ if hostID := errorComponents.HostID; len(hostID) != 0 {
+ s3shared.SetHostIDMetadata(metadata, hostID)
+ }
+ if reqID := errorComponents.RequestID; len(reqID) != 0 {
+ awsmiddleware.SetRequestIDMetadata(metadata, reqID)
+ }
+ if len(errorComponents.Code) != 0 {
+ errorCode = errorComponents.Code
+ }
+ if len(errorComponents.Message) != 0 {
+ errorMessage = errorComponents.Message
+ }
+ errorBody.Seek(0, io.SeekStart)
+ switch {
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+func awsRestxml_deserializeOpDocumentGetBucketAnalyticsConfigurationOutput(v **GetBucketAnalyticsConfigurationOutput, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *GetBucketAnalyticsConfigurationOutput
+ if *v == nil {
+ sv = &GetBucketAnalyticsConfigurationOutput{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("AnalyticsConfiguration", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentAnalyticsConfiguration(&sv.AnalyticsConfiguration, nodeDecoder); err != nil {
+ return err
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+type awsRestxml_deserializeOpGetBucketCors struct {
+}
+
+func (*awsRestxml_deserializeOpGetBucketCors) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsRestxml_deserializeOpGetBucketCors) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsRestxml_deserializeOpErrorGetBucketCors(response, &metadata)
+ }
+ output := &GetBucketCorsOutput{}
+ out.Result = output
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+ body := io.TeeReader(response.Body, ringBuffer)
+ rootDecoder := xml.NewDecoder(body)
+ t, err := smithyxml.FetchRootElement(rootDecoder)
+ if err == io.EOF {
+ return out, metadata, nil
+ }
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ decoder := smithyxml.WrapNodeDecoder(rootDecoder, t)
+ err = awsRestxml_deserializeOpDocumentGetBucketCorsOutput(&output, decoder)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ return out, metadata, err
+}
+
+func awsRestxml_deserializeOpErrorGetBucketCors(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{
+ UseStatusCode: true, StatusCode: response.StatusCode,
+ })
+ if err != nil {
+ return err
+ }
+ if hostID := errorComponents.HostID; len(hostID) != 0 {
+ s3shared.SetHostIDMetadata(metadata, hostID)
+ }
+ if reqID := errorComponents.RequestID; len(reqID) != 0 {
+ awsmiddleware.SetRequestIDMetadata(metadata, reqID)
+ }
+ if len(errorComponents.Code) != 0 {
+ errorCode = errorComponents.Code
+ }
+ if len(errorComponents.Message) != 0 {
+ errorMessage = errorComponents.Message
+ }
+ errorBody.Seek(0, io.SeekStart)
+ switch {
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+func awsRestxml_deserializeOpDocumentGetBucketCorsOutput(v **GetBucketCorsOutput, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *GetBucketCorsOutput
+ if *v == nil {
+ sv = &GetBucketCorsOutput{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("CORSRule", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentCORSRulesUnwrapped(&sv.CORSRules, nodeDecoder); err != nil {
+ return err
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+type awsRestxml_deserializeOpGetBucketEncryption struct {
+}
+
+func (*awsRestxml_deserializeOpGetBucketEncryption) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsRestxml_deserializeOpGetBucketEncryption) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsRestxml_deserializeOpErrorGetBucketEncryption(response, &metadata)
+ }
+ output := &GetBucketEncryptionOutput{}
+ out.Result = output
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+ body := io.TeeReader(response.Body, ringBuffer)
+ rootDecoder := xml.NewDecoder(body)
+ t, err := smithyxml.FetchRootElement(rootDecoder)
+ if err == io.EOF {
+ return out, metadata, nil
+ }
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ decoder := smithyxml.WrapNodeDecoder(rootDecoder, t)
+ err = awsRestxml_deserializeDocumentServerSideEncryptionConfiguration(&output.ServerSideEncryptionConfiguration, decoder)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ return out, metadata, err
+}
+
+func awsRestxml_deserializeOpErrorGetBucketEncryption(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{
+ UseStatusCode: true, StatusCode: response.StatusCode,
+ })
+ if err != nil {
+ return err
+ }
+ if hostID := errorComponents.HostID; len(hostID) != 0 {
+ s3shared.SetHostIDMetadata(metadata, hostID)
+ }
+ if reqID := errorComponents.RequestID; len(reqID) != 0 {
+ awsmiddleware.SetRequestIDMetadata(metadata, reqID)
+ }
+ if len(errorComponents.Code) != 0 {
+ errorCode = errorComponents.Code
+ }
+ if len(errorComponents.Message) != 0 {
+ errorMessage = errorComponents.Message
+ }
+ errorBody.Seek(0, io.SeekStart)
+ switch {
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+func awsRestxml_deserializeOpDocumentGetBucketEncryptionOutput(v **GetBucketEncryptionOutput, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *GetBucketEncryptionOutput
+ if *v == nil {
+ sv = &GetBucketEncryptionOutput{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("ServerSideEncryptionConfiguration", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentServerSideEncryptionConfiguration(&sv.ServerSideEncryptionConfiguration, nodeDecoder); err != nil {
+ return err
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+type awsRestxml_deserializeOpGetBucketIntelligentTieringConfiguration struct {
+}
+
+func (*awsRestxml_deserializeOpGetBucketIntelligentTieringConfiguration) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsRestxml_deserializeOpGetBucketIntelligentTieringConfiguration) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsRestxml_deserializeOpErrorGetBucketIntelligentTieringConfiguration(response, &metadata)
+ }
+ output := &GetBucketIntelligentTieringConfigurationOutput{}
+ out.Result = output
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+ body := io.TeeReader(response.Body, ringBuffer)
+ rootDecoder := xml.NewDecoder(body)
+ t, err := smithyxml.FetchRootElement(rootDecoder)
+ if err == io.EOF {
+ return out, metadata, nil
+ }
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ decoder := smithyxml.WrapNodeDecoder(rootDecoder, t)
+ err = awsRestxml_deserializeDocumentIntelligentTieringConfiguration(&output.IntelligentTieringConfiguration, decoder)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ return out, metadata, err
+}
+
+func awsRestxml_deserializeOpErrorGetBucketIntelligentTieringConfiguration(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{
+ UseStatusCode: true, StatusCode: response.StatusCode,
+ })
+ if err != nil {
+ return err
+ }
+ if hostID := errorComponents.HostID; len(hostID) != 0 {
+ s3shared.SetHostIDMetadata(metadata, hostID)
+ }
+ if reqID := errorComponents.RequestID; len(reqID) != 0 {
+ awsmiddleware.SetRequestIDMetadata(metadata, reqID)
+ }
+ if len(errorComponents.Code) != 0 {
+ errorCode = errorComponents.Code
+ }
+ if len(errorComponents.Message) != 0 {
+ errorMessage = errorComponents.Message
+ }
+ errorBody.Seek(0, io.SeekStart)
+ switch {
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+func awsRestxml_deserializeOpDocumentGetBucketIntelligentTieringConfigurationOutput(v **GetBucketIntelligentTieringConfigurationOutput, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *GetBucketIntelligentTieringConfigurationOutput
+ if *v == nil {
+ sv = &GetBucketIntelligentTieringConfigurationOutput{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("IntelligentTieringConfiguration", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentIntelligentTieringConfiguration(&sv.IntelligentTieringConfiguration, nodeDecoder); err != nil {
+ return err
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+type awsRestxml_deserializeOpGetBucketInventoryConfiguration struct {
+}
+
+func (*awsRestxml_deserializeOpGetBucketInventoryConfiguration) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsRestxml_deserializeOpGetBucketInventoryConfiguration) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsRestxml_deserializeOpErrorGetBucketInventoryConfiguration(response, &metadata)
+ }
+ output := &GetBucketInventoryConfigurationOutput{}
+ out.Result = output
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+ body := io.TeeReader(response.Body, ringBuffer)
+ rootDecoder := xml.NewDecoder(body)
+ t, err := smithyxml.FetchRootElement(rootDecoder)
+ if err == io.EOF {
+ return out, metadata, nil
+ }
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ decoder := smithyxml.WrapNodeDecoder(rootDecoder, t)
+ err = awsRestxml_deserializeDocumentInventoryConfiguration(&output.InventoryConfiguration, decoder)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ return out, metadata, err
+}
+
+func awsRestxml_deserializeOpErrorGetBucketInventoryConfiguration(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{
+ UseStatusCode: true, StatusCode: response.StatusCode,
+ })
+ if err != nil {
+ return err
+ }
+ if hostID := errorComponents.HostID; len(hostID) != 0 {
+ s3shared.SetHostIDMetadata(metadata, hostID)
+ }
+ if reqID := errorComponents.RequestID; len(reqID) != 0 {
+ awsmiddleware.SetRequestIDMetadata(metadata, reqID)
+ }
+ if len(errorComponents.Code) != 0 {
+ errorCode = errorComponents.Code
+ }
+ if len(errorComponents.Message) != 0 {
+ errorMessage = errorComponents.Message
+ }
+ errorBody.Seek(0, io.SeekStart)
+ switch {
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+func awsRestxml_deserializeOpDocumentGetBucketInventoryConfigurationOutput(v **GetBucketInventoryConfigurationOutput, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *GetBucketInventoryConfigurationOutput
+ if *v == nil {
+ sv = &GetBucketInventoryConfigurationOutput{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("InventoryConfiguration", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentInventoryConfiguration(&sv.InventoryConfiguration, nodeDecoder); err != nil {
+ return err
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+type awsRestxml_deserializeOpGetBucketLifecycleConfiguration struct {
+}
+
+func (*awsRestxml_deserializeOpGetBucketLifecycleConfiguration) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsRestxml_deserializeOpGetBucketLifecycleConfiguration) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsRestxml_deserializeOpErrorGetBucketLifecycleConfiguration(response, &metadata)
+ }
+ output := &GetBucketLifecycleConfigurationOutput{}
+ out.Result = output
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+ body := io.TeeReader(response.Body, ringBuffer)
+ rootDecoder := xml.NewDecoder(body)
+ t, err := smithyxml.FetchRootElement(rootDecoder)
+ if err == io.EOF {
+ return out, metadata, nil
+ }
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ decoder := smithyxml.WrapNodeDecoder(rootDecoder, t)
+ err = awsRestxml_deserializeOpDocumentGetBucketLifecycleConfigurationOutput(&output, decoder)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ return out, metadata, err
+}
+
+func awsRestxml_deserializeOpErrorGetBucketLifecycleConfiguration(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{
+ UseStatusCode: true, StatusCode: response.StatusCode,
+ })
+ if err != nil {
+ return err
+ }
+ if hostID := errorComponents.HostID; len(hostID) != 0 {
+ s3shared.SetHostIDMetadata(metadata, hostID)
+ }
+ if reqID := errorComponents.RequestID; len(reqID) != 0 {
+ awsmiddleware.SetRequestIDMetadata(metadata, reqID)
+ }
+ if len(errorComponents.Code) != 0 {
+ errorCode = errorComponents.Code
+ }
+ if len(errorComponents.Message) != 0 {
+ errorMessage = errorComponents.Message
+ }
+ errorBody.Seek(0, io.SeekStart)
+ switch {
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+func awsRestxml_deserializeOpDocumentGetBucketLifecycleConfigurationOutput(v **GetBucketLifecycleConfigurationOutput, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *GetBucketLifecycleConfigurationOutput
+ if *v == nil {
+ sv = &GetBucketLifecycleConfigurationOutput{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("Rule", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentLifecycleRulesUnwrapped(&sv.Rules, nodeDecoder); err != nil {
+ return err
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+type awsRestxml_deserializeOpGetBucketLocation struct {
+}
+
+func (*awsRestxml_deserializeOpGetBucketLocation) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsRestxml_deserializeOpGetBucketLocation) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsRestxml_deserializeOpErrorGetBucketLocation(response, &metadata)
+ }
+ output := &GetBucketLocationOutput{}
+ out.Result = output
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+ body := io.TeeReader(response.Body, ringBuffer)
+ rootDecoder := xml.NewDecoder(body)
+ t, err := smithyxml.FetchRootElement(rootDecoder)
+ if err == io.EOF {
+ return out, metadata, nil
+ }
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ decoder := smithyxml.WrapNodeDecoder(rootDecoder, t)
+ err = awsRestxml_deserializeOpDocumentGetBucketLocationOutput(&output, decoder)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ return out, metadata, err
+}
+
+func awsRestxml_deserializeOpErrorGetBucketLocation(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{
+ UseStatusCode: true, StatusCode: response.StatusCode,
+ })
+ if err != nil {
+ return err
+ }
+ if hostID := errorComponents.HostID; len(hostID) != 0 {
+ s3shared.SetHostIDMetadata(metadata, hostID)
+ }
+ if reqID := errorComponents.RequestID; len(reqID) != 0 {
+ awsmiddleware.SetRequestIDMetadata(metadata, reqID)
+ }
+ if len(errorComponents.Code) != 0 {
+ errorCode = errorComponents.Code
+ }
+ if len(errorComponents.Message) != 0 {
+ errorMessage = errorComponents.Message
+ }
+ errorBody.Seek(0, io.SeekStart)
+ switch {
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+func awsRestxml_deserializeOpDocumentGetBucketLocationOutput(v **GetBucketLocationOutput, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *GetBucketLocationOutput
+ if *v == nil {
+ sv = &GetBucketLocationOutput{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("LocationConstraint", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.LocationConstraint = types.BucketLocationConstraint(xtv)
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+type awsRestxml_deserializeOpGetBucketLogging struct {
+}
+
+func (*awsRestxml_deserializeOpGetBucketLogging) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsRestxml_deserializeOpGetBucketLogging) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsRestxml_deserializeOpErrorGetBucketLogging(response, &metadata)
+ }
+ output := &GetBucketLoggingOutput{}
+ out.Result = output
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+ body := io.TeeReader(response.Body, ringBuffer)
+ rootDecoder := xml.NewDecoder(body)
+ t, err := smithyxml.FetchRootElement(rootDecoder)
+ if err == io.EOF {
+ return out, metadata, nil
+ }
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ decoder := smithyxml.WrapNodeDecoder(rootDecoder, t)
+ err = awsRestxml_deserializeOpDocumentGetBucketLoggingOutput(&output, decoder)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ return out, metadata, err
+}
+
+func awsRestxml_deserializeOpErrorGetBucketLogging(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{
+ UseStatusCode: true, StatusCode: response.StatusCode,
+ })
+ if err != nil {
+ return err
+ }
+ if hostID := errorComponents.HostID; len(hostID) != 0 {
+ s3shared.SetHostIDMetadata(metadata, hostID)
+ }
+ if reqID := errorComponents.RequestID; len(reqID) != 0 {
+ awsmiddleware.SetRequestIDMetadata(metadata, reqID)
+ }
+ if len(errorComponents.Code) != 0 {
+ errorCode = errorComponents.Code
+ }
+ if len(errorComponents.Message) != 0 {
+ errorMessage = errorComponents.Message
+ }
+ errorBody.Seek(0, io.SeekStart)
+ switch {
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+func awsRestxml_deserializeOpDocumentGetBucketLoggingOutput(v **GetBucketLoggingOutput, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *GetBucketLoggingOutput
+ if *v == nil {
+ sv = &GetBucketLoggingOutput{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("LoggingEnabled", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentLoggingEnabled(&sv.LoggingEnabled, nodeDecoder); err != nil {
+ return err
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+type awsRestxml_deserializeOpGetBucketMetricsConfiguration struct {
+}
+
+func (*awsRestxml_deserializeOpGetBucketMetricsConfiguration) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsRestxml_deserializeOpGetBucketMetricsConfiguration) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsRestxml_deserializeOpErrorGetBucketMetricsConfiguration(response, &metadata)
+ }
+ output := &GetBucketMetricsConfigurationOutput{}
+ out.Result = output
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+ body := io.TeeReader(response.Body, ringBuffer)
+ rootDecoder := xml.NewDecoder(body)
+ t, err := smithyxml.FetchRootElement(rootDecoder)
+ if err == io.EOF {
+ return out, metadata, nil
+ }
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ decoder := smithyxml.WrapNodeDecoder(rootDecoder, t)
+ err = awsRestxml_deserializeDocumentMetricsConfiguration(&output.MetricsConfiguration, decoder)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ return out, metadata, err
+}
+
+func awsRestxml_deserializeOpErrorGetBucketMetricsConfiguration(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{
+ UseStatusCode: true, StatusCode: response.StatusCode,
+ })
+ if err != nil {
+ return err
+ }
+ if hostID := errorComponents.HostID; len(hostID) != 0 {
+ s3shared.SetHostIDMetadata(metadata, hostID)
+ }
+ if reqID := errorComponents.RequestID; len(reqID) != 0 {
+ awsmiddleware.SetRequestIDMetadata(metadata, reqID)
+ }
+ if len(errorComponents.Code) != 0 {
+ errorCode = errorComponents.Code
+ }
+ if len(errorComponents.Message) != 0 {
+ errorMessage = errorComponents.Message
+ }
+ errorBody.Seek(0, io.SeekStart)
+ switch {
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+func awsRestxml_deserializeOpDocumentGetBucketMetricsConfigurationOutput(v **GetBucketMetricsConfigurationOutput, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *GetBucketMetricsConfigurationOutput
+ if *v == nil {
+ sv = &GetBucketMetricsConfigurationOutput{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("MetricsConfiguration", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentMetricsConfiguration(&sv.MetricsConfiguration, nodeDecoder); err != nil {
+ return err
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+type awsRestxml_deserializeOpGetBucketNotificationConfiguration struct {
+}
+
+func (*awsRestxml_deserializeOpGetBucketNotificationConfiguration) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsRestxml_deserializeOpGetBucketNotificationConfiguration) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsRestxml_deserializeOpErrorGetBucketNotificationConfiguration(response, &metadata)
+ }
+ output := &GetBucketNotificationConfigurationOutput{}
+ out.Result = output
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+ body := io.TeeReader(response.Body, ringBuffer)
+ rootDecoder := xml.NewDecoder(body)
+ t, err := smithyxml.FetchRootElement(rootDecoder)
+ if err == io.EOF {
+ return out, metadata, nil
+ }
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ decoder := smithyxml.WrapNodeDecoder(rootDecoder, t)
+ err = awsRestxml_deserializeOpDocumentGetBucketNotificationConfigurationOutput(&output, decoder)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ return out, metadata, err
+}
+
+func awsRestxml_deserializeOpErrorGetBucketNotificationConfiguration(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{
+ UseStatusCode: true, StatusCode: response.StatusCode,
+ })
+ if err != nil {
+ return err
+ }
+ if hostID := errorComponents.HostID; len(hostID) != 0 {
+ s3shared.SetHostIDMetadata(metadata, hostID)
+ }
+ if reqID := errorComponents.RequestID; len(reqID) != 0 {
+ awsmiddleware.SetRequestIDMetadata(metadata, reqID)
+ }
+ if len(errorComponents.Code) != 0 {
+ errorCode = errorComponents.Code
+ }
+ if len(errorComponents.Message) != 0 {
+ errorMessage = errorComponents.Message
+ }
+ errorBody.Seek(0, io.SeekStart)
+ switch {
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+func awsRestxml_deserializeOpDocumentGetBucketNotificationConfigurationOutput(v **GetBucketNotificationConfigurationOutput, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *GetBucketNotificationConfigurationOutput
+ if *v == nil {
+ sv = &GetBucketNotificationConfigurationOutput{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("CloudFunctionConfiguration", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentLambdaFunctionConfigurationListUnwrapped(&sv.LambdaFunctionConfigurations, nodeDecoder); err != nil {
+ return err
+ }
+
+ case strings.EqualFold("QueueConfiguration", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentQueueConfigurationListUnwrapped(&sv.QueueConfigurations, nodeDecoder); err != nil {
+ return err
+ }
+
+ case strings.EqualFold("TopicConfiguration", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentTopicConfigurationListUnwrapped(&sv.TopicConfigurations, nodeDecoder); err != nil {
+ return err
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+type awsRestxml_deserializeOpGetBucketOwnershipControls struct {
+}
+
+func (*awsRestxml_deserializeOpGetBucketOwnershipControls) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsRestxml_deserializeOpGetBucketOwnershipControls) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsRestxml_deserializeOpErrorGetBucketOwnershipControls(response, &metadata)
+ }
+ output := &GetBucketOwnershipControlsOutput{}
+ out.Result = output
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+ body := io.TeeReader(response.Body, ringBuffer)
+ rootDecoder := xml.NewDecoder(body)
+ t, err := smithyxml.FetchRootElement(rootDecoder)
+ if err == io.EOF {
+ return out, metadata, nil
+ }
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ decoder := smithyxml.WrapNodeDecoder(rootDecoder, t)
+ err = awsRestxml_deserializeDocumentOwnershipControls(&output.OwnershipControls, decoder)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ return out, metadata, err
+}
+
+func awsRestxml_deserializeOpErrorGetBucketOwnershipControls(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{
+ UseStatusCode: true, StatusCode: response.StatusCode,
+ })
+ if err != nil {
+ return err
+ }
+ if hostID := errorComponents.HostID; len(hostID) != 0 {
+ s3shared.SetHostIDMetadata(metadata, hostID)
+ }
+ if reqID := errorComponents.RequestID; len(reqID) != 0 {
+ awsmiddleware.SetRequestIDMetadata(metadata, reqID)
+ }
+ if len(errorComponents.Code) != 0 {
+ errorCode = errorComponents.Code
+ }
+ if len(errorComponents.Message) != 0 {
+ errorMessage = errorComponents.Message
+ }
+ errorBody.Seek(0, io.SeekStart)
+ switch {
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+func awsRestxml_deserializeOpDocumentGetBucketOwnershipControlsOutput(v **GetBucketOwnershipControlsOutput, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *GetBucketOwnershipControlsOutput
+ if *v == nil {
+ sv = &GetBucketOwnershipControlsOutput{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("OwnershipControls", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentOwnershipControls(&sv.OwnershipControls, nodeDecoder); err != nil {
+ return err
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+type awsRestxml_deserializeOpGetBucketPolicy struct {
+}
+
+func (*awsRestxml_deserializeOpGetBucketPolicy) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsRestxml_deserializeOpGetBucketPolicy) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsRestxml_deserializeOpErrorGetBucketPolicy(response, &metadata)
+ }
+ output := &GetBucketPolicyOutput{}
+ out.Result = output
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+ body := io.TeeReader(response.Body, ringBuffer)
+ rootDecoder := xml.NewDecoder(body)
+ t, err := smithyxml.FetchRootElement(rootDecoder)
+ if err == io.EOF {
+ return out, metadata, nil
+ }
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ decoder := smithyxml.WrapNodeDecoder(rootDecoder, t)
+ err = awsRestxml_deserializeOpDocumentGetBucketPolicyOutput(&output, decoder)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ return out, metadata, err
+}
+
+func awsRestxml_deserializeOpErrorGetBucketPolicy(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{
+ UseStatusCode: true, StatusCode: response.StatusCode,
+ })
+ if err != nil {
+ return err
+ }
+ if hostID := errorComponents.HostID; len(hostID) != 0 {
+ s3shared.SetHostIDMetadata(metadata, hostID)
+ }
+ if reqID := errorComponents.RequestID; len(reqID) != 0 {
+ awsmiddleware.SetRequestIDMetadata(metadata, reqID)
+ }
+ if len(errorComponents.Code) != 0 {
+ errorCode = errorComponents.Code
+ }
+ if len(errorComponents.Message) != 0 {
+ errorMessage = errorComponents.Message
+ }
+ errorBody.Seek(0, io.SeekStart)
+ switch {
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+func awsRestxml_deserializeOpDocumentGetBucketPolicyOutput(v **GetBucketPolicyOutput, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *GetBucketPolicyOutput
+ if *v == nil {
+ sv = &GetBucketPolicyOutput{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("Policy", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.Policy = ptr.String(xtv)
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+type awsRestxml_deserializeOpGetBucketPolicyStatus struct {
+}
+
+func (*awsRestxml_deserializeOpGetBucketPolicyStatus) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsRestxml_deserializeOpGetBucketPolicyStatus) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsRestxml_deserializeOpErrorGetBucketPolicyStatus(response, &metadata)
+ }
+ output := &GetBucketPolicyStatusOutput{}
+ out.Result = output
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+ body := io.TeeReader(response.Body, ringBuffer)
+ rootDecoder := xml.NewDecoder(body)
+ t, err := smithyxml.FetchRootElement(rootDecoder)
+ if err == io.EOF {
+ return out, metadata, nil
+ }
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ decoder := smithyxml.WrapNodeDecoder(rootDecoder, t)
+ err = awsRestxml_deserializeDocumentPolicyStatus(&output.PolicyStatus, decoder)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ return out, metadata, err
+}
+
+func awsRestxml_deserializeOpErrorGetBucketPolicyStatus(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{
+ UseStatusCode: true, StatusCode: response.StatusCode,
+ })
+ if err != nil {
+ return err
+ }
+ if hostID := errorComponents.HostID; len(hostID) != 0 {
+ s3shared.SetHostIDMetadata(metadata, hostID)
+ }
+ if reqID := errorComponents.RequestID; len(reqID) != 0 {
+ awsmiddleware.SetRequestIDMetadata(metadata, reqID)
+ }
+ if len(errorComponents.Code) != 0 {
+ errorCode = errorComponents.Code
+ }
+ if len(errorComponents.Message) != 0 {
+ errorMessage = errorComponents.Message
+ }
+ errorBody.Seek(0, io.SeekStart)
+ switch {
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+func awsRestxml_deserializeOpDocumentGetBucketPolicyStatusOutput(v **GetBucketPolicyStatusOutput, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *GetBucketPolicyStatusOutput
+ if *v == nil {
+ sv = &GetBucketPolicyStatusOutput{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("PolicyStatus", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentPolicyStatus(&sv.PolicyStatus, nodeDecoder); err != nil {
+ return err
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+type awsRestxml_deserializeOpGetBucketReplication struct {
+}
+
+func (*awsRestxml_deserializeOpGetBucketReplication) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsRestxml_deserializeOpGetBucketReplication) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsRestxml_deserializeOpErrorGetBucketReplication(response, &metadata)
+ }
+ output := &GetBucketReplicationOutput{}
+ out.Result = output
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+ body := io.TeeReader(response.Body, ringBuffer)
+ rootDecoder := xml.NewDecoder(body)
+ t, err := smithyxml.FetchRootElement(rootDecoder)
+ if err == io.EOF {
+ return out, metadata, nil
+ }
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ decoder := smithyxml.WrapNodeDecoder(rootDecoder, t)
+ err = awsRestxml_deserializeDocumentReplicationConfiguration(&output.ReplicationConfiguration, decoder)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ return out, metadata, err
+}
+
+func awsRestxml_deserializeOpErrorGetBucketReplication(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{
+ UseStatusCode: true, StatusCode: response.StatusCode,
+ })
+ if err != nil {
+ return err
+ }
+ if hostID := errorComponents.HostID; len(hostID) != 0 {
+ s3shared.SetHostIDMetadata(metadata, hostID)
+ }
+ if reqID := errorComponents.RequestID; len(reqID) != 0 {
+ awsmiddleware.SetRequestIDMetadata(metadata, reqID)
+ }
+ if len(errorComponents.Code) != 0 {
+ errorCode = errorComponents.Code
+ }
+ if len(errorComponents.Message) != 0 {
+ errorMessage = errorComponents.Message
+ }
+ errorBody.Seek(0, io.SeekStart)
+ switch {
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+func awsRestxml_deserializeOpDocumentGetBucketReplicationOutput(v **GetBucketReplicationOutput, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *GetBucketReplicationOutput
+ if *v == nil {
+ sv = &GetBucketReplicationOutput{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("ReplicationConfiguration", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentReplicationConfiguration(&sv.ReplicationConfiguration, nodeDecoder); err != nil {
+ return err
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+type awsRestxml_deserializeOpGetBucketRequestPayment struct {
+}
+
+func (*awsRestxml_deserializeOpGetBucketRequestPayment) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsRestxml_deserializeOpGetBucketRequestPayment) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsRestxml_deserializeOpErrorGetBucketRequestPayment(response, &metadata)
+ }
+ output := &GetBucketRequestPaymentOutput{}
+ out.Result = output
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+ body := io.TeeReader(response.Body, ringBuffer)
+ rootDecoder := xml.NewDecoder(body)
+ t, err := smithyxml.FetchRootElement(rootDecoder)
+ if err == io.EOF {
+ return out, metadata, nil
+ }
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ decoder := smithyxml.WrapNodeDecoder(rootDecoder, t)
+ err = awsRestxml_deserializeOpDocumentGetBucketRequestPaymentOutput(&output, decoder)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ return out, metadata, err
+}
+
+func awsRestxml_deserializeOpErrorGetBucketRequestPayment(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{
+ UseStatusCode: true, StatusCode: response.StatusCode,
+ })
+ if err != nil {
+ return err
+ }
+ if hostID := errorComponents.HostID; len(hostID) != 0 {
+ s3shared.SetHostIDMetadata(metadata, hostID)
+ }
+ if reqID := errorComponents.RequestID; len(reqID) != 0 {
+ awsmiddleware.SetRequestIDMetadata(metadata, reqID)
+ }
+ if len(errorComponents.Code) != 0 {
+ errorCode = errorComponents.Code
+ }
+ if len(errorComponents.Message) != 0 {
+ errorMessage = errorComponents.Message
+ }
+ errorBody.Seek(0, io.SeekStart)
+ switch {
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+func awsRestxml_deserializeOpDocumentGetBucketRequestPaymentOutput(v **GetBucketRequestPaymentOutput, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *GetBucketRequestPaymentOutput
+ if *v == nil {
+ sv = &GetBucketRequestPaymentOutput{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("Payer", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.Payer = types.Payer(xtv)
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+type awsRestxml_deserializeOpGetBucketTagging struct {
+}
+
+func (*awsRestxml_deserializeOpGetBucketTagging) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsRestxml_deserializeOpGetBucketTagging) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsRestxml_deserializeOpErrorGetBucketTagging(response, &metadata)
+ }
+ output := &GetBucketTaggingOutput{}
+ out.Result = output
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+ body := io.TeeReader(response.Body, ringBuffer)
+ rootDecoder := xml.NewDecoder(body)
+ t, err := smithyxml.FetchRootElement(rootDecoder)
+ if err == io.EOF {
+ return out, metadata, nil
+ }
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ decoder := smithyxml.WrapNodeDecoder(rootDecoder, t)
+ err = awsRestxml_deserializeOpDocumentGetBucketTaggingOutput(&output, decoder)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ return out, metadata, err
+}
+
+func awsRestxml_deserializeOpErrorGetBucketTagging(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{
+ UseStatusCode: true, StatusCode: response.StatusCode,
+ })
+ if err != nil {
+ return err
+ }
+ if hostID := errorComponents.HostID; len(hostID) != 0 {
+ s3shared.SetHostIDMetadata(metadata, hostID)
+ }
+ if reqID := errorComponents.RequestID; len(reqID) != 0 {
+ awsmiddleware.SetRequestIDMetadata(metadata, reqID)
+ }
+ if len(errorComponents.Code) != 0 {
+ errorCode = errorComponents.Code
+ }
+ if len(errorComponents.Message) != 0 {
+ errorMessage = errorComponents.Message
+ }
+ errorBody.Seek(0, io.SeekStart)
+ switch {
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+func awsRestxml_deserializeOpDocumentGetBucketTaggingOutput(v **GetBucketTaggingOutput, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *GetBucketTaggingOutput
+ if *v == nil {
+ sv = &GetBucketTaggingOutput{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("TagSet", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentTagSet(&sv.TagSet, nodeDecoder); err != nil {
+ return err
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+type awsRestxml_deserializeOpGetBucketVersioning struct {
+}
+
+func (*awsRestxml_deserializeOpGetBucketVersioning) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsRestxml_deserializeOpGetBucketVersioning) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsRestxml_deserializeOpErrorGetBucketVersioning(response, &metadata)
+ }
+ output := &GetBucketVersioningOutput{}
+ out.Result = output
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+ body := io.TeeReader(response.Body, ringBuffer)
+ rootDecoder := xml.NewDecoder(body)
+ t, err := smithyxml.FetchRootElement(rootDecoder)
+ if err == io.EOF {
+ return out, metadata, nil
+ }
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ decoder := smithyxml.WrapNodeDecoder(rootDecoder, t)
+ err = awsRestxml_deserializeOpDocumentGetBucketVersioningOutput(&output, decoder)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ return out, metadata, err
+}
+
+func awsRestxml_deserializeOpErrorGetBucketVersioning(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{
+ UseStatusCode: true, StatusCode: response.StatusCode,
+ })
+ if err != nil {
+ return err
+ }
+ if hostID := errorComponents.HostID; len(hostID) != 0 {
+ s3shared.SetHostIDMetadata(metadata, hostID)
+ }
+ if reqID := errorComponents.RequestID; len(reqID) != 0 {
+ awsmiddleware.SetRequestIDMetadata(metadata, reqID)
+ }
+ if len(errorComponents.Code) != 0 {
+ errorCode = errorComponents.Code
+ }
+ if len(errorComponents.Message) != 0 {
+ errorMessage = errorComponents.Message
+ }
+ errorBody.Seek(0, io.SeekStart)
+ switch {
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+func awsRestxml_deserializeOpDocumentGetBucketVersioningOutput(v **GetBucketVersioningOutput, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *GetBucketVersioningOutput
+ if *v == nil {
+ sv = &GetBucketVersioningOutput{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("MfaDelete", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.MFADelete = types.MFADeleteStatus(xtv)
+ }
+
+ case strings.EqualFold("Status", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.Status = types.BucketVersioningStatus(xtv)
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+type awsRestxml_deserializeOpGetBucketWebsite struct {
+}
+
+func (*awsRestxml_deserializeOpGetBucketWebsite) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsRestxml_deserializeOpGetBucketWebsite) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsRestxml_deserializeOpErrorGetBucketWebsite(response, &metadata)
+ }
+ output := &GetBucketWebsiteOutput{}
+ out.Result = output
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+ body := io.TeeReader(response.Body, ringBuffer)
+ rootDecoder := xml.NewDecoder(body)
+ t, err := smithyxml.FetchRootElement(rootDecoder)
+ if err == io.EOF {
+ return out, metadata, nil
+ }
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ decoder := smithyxml.WrapNodeDecoder(rootDecoder, t)
+ err = awsRestxml_deserializeOpDocumentGetBucketWebsiteOutput(&output, decoder)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ return out, metadata, err
+}
+
+func awsRestxml_deserializeOpErrorGetBucketWebsite(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{
+ UseStatusCode: true, StatusCode: response.StatusCode,
+ })
+ if err != nil {
+ return err
+ }
+ if hostID := errorComponents.HostID; len(hostID) != 0 {
+ s3shared.SetHostIDMetadata(metadata, hostID)
+ }
+ if reqID := errorComponents.RequestID; len(reqID) != 0 {
+ awsmiddleware.SetRequestIDMetadata(metadata, reqID)
+ }
+ if len(errorComponents.Code) != 0 {
+ errorCode = errorComponents.Code
+ }
+ if len(errorComponents.Message) != 0 {
+ errorMessage = errorComponents.Message
+ }
+ errorBody.Seek(0, io.SeekStart)
+ switch {
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+func awsRestxml_deserializeOpDocumentGetBucketWebsiteOutput(v **GetBucketWebsiteOutput, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *GetBucketWebsiteOutput
+ if *v == nil {
+ sv = &GetBucketWebsiteOutput{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("ErrorDocument", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentErrorDocument(&sv.ErrorDocument, nodeDecoder); err != nil {
+ return err
+ }
+
+ case strings.EqualFold("IndexDocument", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentIndexDocument(&sv.IndexDocument, nodeDecoder); err != nil {
+ return err
+ }
+
+ case strings.EqualFold("RedirectAllRequestsTo", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentRedirectAllRequestsTo(&sv.RedirectAllRequestsTo, nodeDecoder); err != nil {
+ return err
+ }
+
+ case strings.EqualFold("RoutingRules", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentRoutingRules(&sv.RoutingRules, nodeDecoder); err != nil {
+ return err
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+type awsRestxml_deserializeOpGetObject struct {
+}
+
+func (*awsRestxml_deserializeOpGetObject) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsRestxml_deserializeOpGetObject) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsRestxml_deserializeOpErrorGetObject(response, &metadata)
+ }
+ output := &GetObjectOutput{}
+ out.Result = output
+
+ err = awsRestxml_deserializeOpHttpBindingsGetObjectOutput(output, response)
+ if err != nil {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)}
+ }
+
+ err = awsRestxml_deserializeOpDocumentGetObjectOutput(output, response.Body)
+ if err != nil {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to deserialize response payload, %w", err)}
+ }
+
+ return out, metadata, err
+}
+
+func awsRestxml_deserializeOpErrorGetObject(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{
+ UseStatusCode: true, StatusCode: response.StatusCode,
+ })
+ if err != nil {
+ return err
+ }
+ if hostID := errorComponents.HostID; len(hostID) != 0 {
+ s3shared.SetHostIDMetadata(metadata, hostID)
+ }
+ if reqID := errorComponents.RequestID; len(reqID) != 0 {
+ awsmiddleware.SetRequestIDMetadata(metadata, reqID)
+ }
+ if len(errorComponents.Code) != 0 {
+ errorCode = errorComponents.Code
+ }
+ if len(errorComponents.Message) != 0 {
+ errorMessage = errorComponents.Message
+ }
+ errorBody.Seek(0, io.SeekStart)
+ switch {
+ case strings.EqualFold("InvalidObjectState", errorCode):
+ return awsRestxml_deserializeErrorInvalidObjectState(response, errorBody)
+
+ case strings.EqualFold("NoSuchKey", errorCode):
+ return awsRestxml_deserializeErrorNoSuchKey(response, errorBody)
+
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+func awsRestxml_deserializeOpHttpBindingsGetObjectOutput(v *GetObjectOutput, response *smithyhttp.Response) error {
+ if v == nil {
+ return fmt.Errorf("unsupported deserialization for nil %T", v)
+ }
+
+ if headerValues := response.Header.Values("accept-ranges"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.AcceptRanges = ptr.String(headerValues[0])
+ }
+
+ if headerValues := response.Header.Values("x-amz-server-side-encryption-bucket-key-enabled"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ vv, err := strconv.ParseBool(headerValues[0])
+ if err != nil {
+ return err
+ }
+ v.BucketKeyEnabled = vv
+ }
+
+ if headerValues := response.Header.Values("Cache-Control"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.CacheControl = ptr.String(headerValues[0])
+ }
+
+ if headerValues := response.Header.Values("Content-Disposition"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.ContentDisposition = ptr.String(headerValues[0])
+ }
+
+ if headerValues := response.Header.Values("Content-Encoding"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.ContentEncoding = ptr.String(headerValues[0])
+ }
+
+ if headerValues := response.Header.Values("Content-Language"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.ContentLanguage = ptr.String(headerValues[0])
+ }
+
+ if headerValues := response.Header.Values("Content-Length"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ vv, err := strconv.ParseInt(headerValues[0], 0, 64)
+ if err != nil {
+ return err
+ }
+ v.ContentLength = vv
+ }
+
+ if headerValues := response.Header.Values("Content-Range"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.ContentRange = ptr.String(headerValues[0])
+ }
+
+ if headerValues := response.Header.Values("Content-Type"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.ContentType = ptr.String(headerValues[0])
+ }
+
+ if headerValues := response.Header.Values("x-amz-delete-marker"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ vv, err := strconv.ParseBool(headerValues[0])
+ if err != nil {
+ return err
+ }
+ v.DeleteMarker = vv
+ }
+
+ if headerValues := response.Header.Values("ETag"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.ETag = ptr.String(headerValues[0])
+ }
+
+ if headerValues := response.Header.Values("x-amz-expiration"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.Expiration = ptr.String(headerValues[0])
+ }
+
+ if headerValues := response.Header.Values("Expires"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ t, err := smithytime.ParseHTTPDate(headerValues[0])
+ if err != nil {
+ return err
+ }
+ v.Expires = ptr.Time(t)
+ }
+
+ if headerValues := response.Header.Values("Last-Modified"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ t, err := smithytime.ParseHTTPDate(headerValues[0])
+ if err != nil {
+ return err
+ }
+ v.LastModified = ptr.Time(t)
+ }
+
+ for headerKey, headerValues := range response.Header {
+ if lenPrefix := len("x-amz-meta-"); len(headerKey) >= lenPrefix && strings.EqualFold(headerKey[:lenPrefix], "x-amz-meta-") {
+ if v.Metadata == nil {
+ v.Metadata = map[string]string{}
+ }
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.Metadata[strings.ToLower(headerKey[lenPrefix:])] = headerValues[0]
+ }
+ }
+
+ if headerValues := response.Header.Values("x-amz-missing-meta"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ vv, err := strconv.ParseInt(headerValues[0], 0, 32)
+ if err != nil {
+ return err
+ }
+ v.MissingMeta = int32(vv)
+ }
+
+ if headerValues := response.Header.Values("x-amz-object-lock-legal-hold"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.ObjectLockLegalHoldStatus = types.ObjectLockLegalHoldStatus(headerValues[0])
+ }
+
+ if headerValues := response.Header.Values("x-amz-object-lock-mode"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.ObjectLockMode = types.ObjectLockMode(headerValues[0])
+ }
+
+ if headerValues := response.Header.Values("x-amz-object-lock-retain-until-date"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ t, err := smithytime.ParseDateTime(headerValues[0])
+ if err != nil {
+ return err
+ }
+ v.ObjectLockRetainUntilDate = ptr.Time(t)
+ }
+
+ if headerValues := response.Header.Values("x-amz-mp-parts-count"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ vv, err := strconv.ParseInt(headerValues[0], 0, 32)
+ if err != nil {
+ return err
+ }
+ v.PartsCount = int32(vv)
+ }
+
+ if headerValues := response.Header.Values("x-amz-replication-status"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.ReplicationStatus = types.ReplicationStatus(headerValues[0])
+ }
+
+ if headerValues := response.Header.Values("x-amz-request-charged"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.RequestCharged = types.RequestCharged(headerValues[0])
+ }
+
+ if headerValues := response.Header.Values("x-amz-restore"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.Restore = ptr.String(headerValues[0])
+ }
+
+ if headerValues := response.Header.Values("x-amz-server-side-encryption"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.ServerSideEncryption = types.ServerSideEncryption(headerValues[0])
+ }
+
+ if headerValues := response.Header.Values("x-amz-server-side-encryption-customer-algorithm"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.SSECustomerAlgorithm = ptr.String(headerValues[0])
+ }
+
+ if headerValues := response.Header.Values("x-amz-server-side-encryption-customer-key-MD5"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.SSECustomerKeyMD5 = ptr.String(headerValues[0])
+ }
+
+ if headerValues := response.Header.Values("x-amz-server-side-encryption-aws-kms-key-id"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.SSEKMSKeyId = ptr.String(headerValues[0])
+ }
+
+ if headerValues := response.Header.Values("x-amz-storage-class"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.StorageClass = types.StorageClass(headerValues[0])
+ }
+
+ if headerValues := response.Header.Values("x-amz-tagging-count"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ vv, err := strconv.ParseInt(headerValues[0], 0, 32)
+ if err != nil {
+ return err
+ }
+ v.TagCount = int32(vv)
+ }
+
+ if headerValues := response.Header.Values("x-amz-version-id"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.VersionId = ptr.String(headerValues[0])
+ }
+
+ if headerValues := response.Header.Values("x-amz-website-redirect-location"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.WebsiteRedirectLocation = ptr.String(headerValues[0])
+ }
+
+ return nil
+}
+func awsRestxml_deserializeOpDocumentGetObjectOutput(v *GetObjectOutput, body io.ReadCloser) error {
+ if v == nil {
+ return fmt.Errorf("unsupported deserialization of nil %T", v)
+ }
+ v.Body = body
+ return nil
+}
+
+type awsRestxml_deserializeOpGetObjectAcl struct {
+}
+
+func (*awsRestxml_deserializeOpGetObjectAcl) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsRestxml_deserializeOpGetObjectAcl) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsRestxml_deserializeOpErrorGetObjectAcl(response, &metadata)
+ }
+ output := &GetObjectAclOutput{}
+ out.Result = output
+
+ err = awsRestxml_deserializeOpHttpBindingsGetObjectAclOutput(output, response)
+ if err != nil {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)}
+ }
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+ body := io.TeeReader(response.Body, ringBuffer)
+ rootDecoder := xml.NewDecoder(body)
+ t, err := smithyxml.FetchRootElement(rootDecoder)
+ if err == io.EOF {
+ return out, metadata, nil
+ }
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ decoder := smithyxml.WrapNodeDecoder(rootDecoder, t)
+ err = awsRestxml_deserializeOpDocumentGetObjectAclOutput(&output, decoder)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ return out, metadata, err
+}
+
+func awsRestxml_deserializeOpErrorGetObjectAcl(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{
+ UseStatusCode: true, StatusCode: response.StatusCode,
+ })
+ if err != nil {
+ return err
+ }
+ if hostID := errorComponents.HostID; len(hostID) != 0 {
+ s3shared.SetHostIDMetadata(metadata, hostID)
+ }
+ if reqID := errorComponents.RequestID; len(reqID) != 0 {
+ awsmiddleware.SetRequestIDMetadata(metadata, reqID)
+ }
+ if len(errorComponents.Code) != 0 {
+ errorCode = errorComponents.Code
+ }
+ if len(errorComponents.Message) != 0 {
+ errorMessage = errorComponents.Message
+ }
+ errorBody.Seek(0, io.SeekStart)
+ switch {
+ case strings.EqualFold("NoSuchKey", errorCode):
+ return awsRestxml_deserializeErrorNoSuchKey(response, errorBody)
+
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+func awsRestxml_deserializeOpHttpBindingsGetObjectAclOutput(v *GetObjectAclOutput, response *smithyhttp.Response) error {
+ if v == nil {
+ return fmt.Errorf("unsupported deserialization for nil %T", v)
+ }
+
+ if headerValues := response.Header.Values("x-amz-request-charged"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.RequestCharged = types.RequestCharged(headerValues[0])
+ }
+
+ return nil
+}
+func awsRestxml_deserializeOpDocumentGetObjectAclOutput(v **GetObjectAclOutput, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *GetObjectAclOutput
+ if *v == nil {
+ sv = &GetObjectAclOutput{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("AccessControlList", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentGrants(&sv.Grants, nodeDecoder); err != nil {
+ return err
+ }
+
+ case strings.EqualFold("Owner", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentOwner(&sv.Owner, nodeDecoder); err != nil {
+ return err
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+type awsRestxml_deserializeOpGetObjectLegalHold struct {
+}
+
+func (*awsRestxml_deserializeOpGetObjectLegalHold) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsRestxml_deserializeOpGetObjectLegalHold) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsRestxml_deserializeOpErrorGetObjectLegalHold(response, &metadata)
+ }
+ output := &GetObjectLegalHoldOutput{}
+ out.Result = output
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+ body := io.TeeReader(response.Body, ringBuffer)
+ rootDecoder := xml.NewDecoder(body)
+ t, err := smithyxml.FetchRootElement(rootDecoder)
+ if err == io.EOF {
+ return out, metadata, nil
+ }
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ decoder := smithyxml.WrapNodeDecoder(rootDecoder, t)
+ err = awsRestxml_deserializeDocumentObjectLockLegalHold(&output.LegalHold, decoder)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ return out, metadata, err
+}
+
+func awsRestxml_deserializeOpErrorGetObjectLegalHold(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{
+ UseStatusCode: true, StatusCode: response.StatusCode,
+ })
+ if err != nil {
+ return err
+ }
+ if hostID := errorComponents.HostID; len(hostID) != 0 {
+ s3shared.SetHostIDMetadata(metadata, hostID)
+ }
+ if reqID := errorComponents.RequestID; len(reqID) != 0 {
+ awsmiddleware.SetRequestIDMetadata(metadata, reqID)
+ }
+ if len(errorComponents.Code) != 0 {
+ errorCode = errorComponents.Code
+ }
+ if len(errorComponents.Message) != 0 {
+ errorMessage = errorComponents.Message
+ }
+ errorBody.Seek(0, io.SeekStart)
+ switch {
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+func awsRestxml_deserializeOpDocumentGetObjectLegalHoldOutput(v **GetObjectLegalHoldOutput, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *GetObjectLegalHoldOutput
+ if *v == nil {
+ sv = &GetObjectLegalHoldOutput{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("LegalHold", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentObjectLockLegalHold(&sv.LegalHold, nodeDecoder); err != nil {
+ return err
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+type awsRestxml_deserializeOpGetObjectLockConfiguration struct {
+}
+
+func (*awsRestxml_deserializeOpGetObjectLockConfiguration) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsRestxml_deserializeOpGetObjectLockConfiguration) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsRestxml_deserializeOpErrorGetObjectLockConfiguration(response, &metadata)
+ }
+ output := &GetObjectLockConfigurationOutput{}
+ out.Result = output
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+ body := io.TeeReader(response.Body, ringBuffer)
+ rootDecoder := xml.NewDecoder(body)
+ t, err := smithyxml.FetchRootElement(rootDecoder)
+ if err == io.EOF {
+ return out, metadata, nil
+ }
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ decoder := smithyxml.WrapNodeDecoder(rootDecoder, t)
+ err = awsRestxml_deserializeDocumentObjectLockConfiguration(&output.ObjectLockConfiguration, decoder)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ return out, metadata, err
+}
+
+func awsRestxml_deserializeOpErrorGetObjectLockConfiguration(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{
+ UseStatusCode: true, StatusCode: response.StatusCode,
+ })
+ if err != nil {
+ return err
+ }
+ if hostID := errorComponents.HostID; len(hostID) != 0 {
+ s3shared.SetHostIDMetadata(metadata, hostID)
+ }
+ if reqID := errorComponents.RequestID; len(reqID) != 0 {
+ awsmiddleware.SetRequestIDMetadata(metadata, reqID)
+ }
+ if len(errorComponents.Code) != 0 {
+ errorCode = errorComponents.Code
+ }
+ if len(errorComponents.Message) != 0 {
+ errorMessage = errorComponents.Message
+ }
+ errorBody.Seek(0, io.SeekStart)
+ switch {
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+func awsRestxml_deserializeOpDocumentGetObjectLockConfigurationOutput(v **GetObjectLockConfigurationOutput, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *GetObjectLockConfigurationOutput
+ if *v == nil {
+ sv = &GetObjectLockConfigurationOutput{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("ObjectLockConfiguration", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentObjectLockConfiguration(&sv.ObjectLockConfiguration, nodeDecoder); err != nil {
+ return err
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+type awsRestxml_deserializeOpGetObjectRetention struct {
+}
+
+func (*awsRestxml_deserializeOpGetObjectRetention) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsRestxml_deserializeOpGetObjectRetention) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsRestxml_deserializeOpErrorGetObjectRetention(response, &metadata)
+ }
+ output := &GetObjectRetentionOutput{}
+ out.Result = output
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+ body := io.TeeReader(response.Body, ringBuffer)
+ rootDecoder := xml.NewDecoder(body)
+ t, err := smithyxml.FetchRootElement(rootDecoder)
+ if err == io.EOF {
+ return out, metadata, nil
+ }
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ decoder := smithyxml.WrapNodeDecoder(rootDecoder, t)
+ err = awsRestxml_deserializeDocumentObjectLockRetention(&output.Retention, decoder)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ return out, metadata, err
+}
+
+func awsRestxml_deserializeOpErrorGetObjectRetention(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{
+ UseStatusCode: true, StatusCode: response.StatusCode,
+ })
+ if err != nil {
+ return err
+ }
+ if hostID := errorComponents.HostID; len(hostID) != 0 {
+ s3shared.SetHostIDMetadata(metadata, hostID)
+ }
+ if reqID := errorComponents.RequestID; len(reqID) != 0 {
+ awsmiddleware.SetRequestIDMetadata(metadata, reqID)
+ }
+ if len(errorComponents.Code) != 0 {
+ errorCode = errorComponents.Code
+ }
+ if len(errorComponents.Message) != 0 {
+ errorMessage = errorComponents.Message
+ }
+ errorBody.Seek(0, io.SeekStart)
+ switch {
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+func awsRestxml_deserializeOpDocumentGetObjectRetentionOutput(v **GetObjectRetentionOutput, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *GetObjectRetentionOutput
+ if *v == nil {
+ sv = &GetObjectRetentionOutput{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("Retention", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentObjectLockRetention(&sv.Retention, nodeDecoder); err != nil {
+ return err
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+type awsRestxml_deserializeOpGetObjectTagging struct {
+}
+
+func (*awsRestxml_deserializeOpGetObjectTagging) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsRestxml_deserializeOpGetObjectTagging) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsRestxml_deserializeOpErrorGetObjectTagging(response, &metadata)
+ }
+ output := &GetObjectTaggingOutput{}
+ out.Result = output
+
+ err = awsRestxml_deserializeOpHttpBindingsGetObjectTaggingOutput(output, response)
+ if err != nil {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)}
+ }
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+ body := io.TeeReader(response.Body, ringBuffer)
+ rootDecoder := xml.NewDecoder(body)
+ t, err := smithyxml.FetchRootElement(rootDecoder)
+ if err == io.EOF {
+ return out, metadata, nil
+ }
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ decoder := smithyxml.WrapNodeDecoder(rootDecoder, t)
+ err = awsRestxml_deserializeOpDocumentGetObjectTaggingOutput(&output, decoder)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ return out, metadata, err
+}
+
+func awsRestxml_deserializeOpErrorGetObjectTagging(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{
+ UseStatusCode: true, StatusCode: response.StatusCode,
+ })
+ if err != nil {
+ return err
+ }
+ if hostID := errorComponents.HostID; len(hostID) != 0 {
+ s3shared.SetHostIDMetadata(metadata, hostID)
+ }
+ if reqID := errorComponents.RequestID; len(reqID) != 0 {
+ awsmiddleware.SetRequestIDMetadata(metadata, reqID)
+ }
+ if len(errorComponents.Code) != 0 {
+ errorCode = errorComponents.Code
+ }
+ if len(errorComponents.Message) != 0 {
+ errorMessage = errorComponents.Message
+ }
+ errorBody.Seek(0, io.SeekStart)
+ switch {
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+func awsRestxml_deserializeOpHttpBindingsGetObjectTaggingOutput(v *GetObjectTaggingOutput, response *smithyhttp.Response) error {
+ if v == nil {
+ return fmt.Errorf("unsupported deserialization for nil %T", v)
+ }
+
+ if headerValues := response.Header.Values("x-amz-version-id"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.VersionId = ptr.String(headerValues[0])
+ }
+
+ return nil
+}
+func awsRestxml_deserializeOpDocumentGetObjectTaggingOutput(v **GetObjectTaggingOutput, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *GetObjectTaggingOutput
+ if *v == nil {
+ sv = &GetObjectTaggingOutput{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("TagSet", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentTagSet(&sv.TagSet, nodeDecoder); err != nil {
+ return err
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+type awsRestxml_deserializeOpGetObjectTorrent struct {
+}
+
+func (*awsRestxml_deserializeOpGetObjectTorrent) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsRestxml_deserializeOpGetObjectTorrent) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsRestxml_deserializeOpErrorGetObjectTorrent(response, &metadata)
+ }
+ output := &GetObjectTorrentOutput{}
+ out.Result = output
+
+ err = awsRestxml_deserializeOpHttpBindingsGetObjectTorrentOutput(output, response)
+ if err != nil {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)}
+ }
+
+ err = awsRestxml_deserializeOpDocumentGetObjectTorrentOutput(output, response.Body)
+ if err != nil {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to deserialize response payload, %w", err)}
+ }
+
+ return out, metadata, err
+}
+
+func awsRestxml_deserializeOpErrorGetObjectTorrent(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{
+ UseStatusCode: true, StatusCode: response.StatusCode,
+ })
+ if err != nil {
+ return err
+ }
+ if hostID := errorComponents.HostID; len(hostID) != 0 {
+ s3shared.SetHostIDMetadata(metadata, hostID)
+ }
+ if reqID := errorComponents.RequestID; len(reqID) != 0 {
+ awsmiddleware.SetRequestIDMetadata(metadata, reqID)
+ }
+ if len(errorComponents.Code) != 0 {
+ errorCode = errorComponents.Code
+ }
+ if len(errorComponents.Message) != 0 {
+ errorMessage = errorComponents.Message
+ }
+ errorBody.Seek(0, io.SeekStart)
+ switch {
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+func awsRestxml_deserializeOpHttpBindingsGetObjectTorrentOutput(v *GetObjectTorrentOutput, response *smithyhttp.Response) error {
+ if v == nil {
+ return fmt.Errorf("unsupported deserialization for nil %T", v)
+ }
+
+ if headerValues := response.Header.Values("x-amz-request-charged"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.RequestCharged = types.RequestCharged(headerValues[0])
+ }
+
+ return nil
+}
+func awsRestxml_deserializeOpDocumentGetObjectTorrentOutput(v *GetObjectTorrentOutput, body io.ReadCloser) error {
+ if v == nil {
+ return fmt.Errorf("unsupported deserialization of nil %T", v)
+ }
+ v.Body = body
+ return nil
+}
+
+type awsRestxml_deserializeOpGetPublicAccessBlock struct {
+}
+
+func (*awsRestxml_deserializeOpGetPublicAccessBlock) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsRestxml_deserializeOpGetPublicAccessBlock) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsRestxml_deserializeOpErrorGetPublicAccessBlock(response, &metadata)
+ }
+ output := &GetPublicAccessBlockOutput{}
+ out.Result = output
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+ body := io.TeeReader(response.Body, ringBuffer)
+ rootDecoder := xml.NewDecoder(body)
+ t, err := smithyxml.FetchRootElement(rootDecoder)
+ if err == io.EOF {
+ return out, metadata, nil
+ }
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ decoder := smithyxml.WrapNodeDecoder(rootDecoder, t)
+ err = awsRestxml_deserializeDocumentPublicAccessBlockConfiguration(&output.PublicAccessBlockConfiguration, decoder)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ return out, metadata, err
+}
+
+func awsRestxml_deserializeOpErrorGetPublicAccessBlock(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{
+ UseStatusCode: true, StatusCode: response.StatusCode,
+ })
+ if err != nil {
+ return err
+ }
+ if hostID := errorComponents.HostID; len(hostID) != 0 {
+ s3shared.SetHostIDMetadata(metadata, hostID)
+ }
+ if reqID := errorComponents.RequestID; len(reqID) != 0 {
+ awsmiddleware.SetRequestIDMetadata(metadata, reqID)
+ }
+ if len(errorComponents.Code) != 0 {
+ errorCode = errorComponents.Code
+ }
+ if len(errorComponents.Message) != 0 {
+ errorMessage = errorComponents.Message
+ }
+ errorBody.Seek(0, io.SeekStart)
+ switch {
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+func awsRestxml_deserializeOpDocumentGetPublicAccessBlockOutput(v **GetPublicAccessBlockOutput, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *GetPublicAccessBlockOutput
+ if *v == nil {
+ sv = &GetPublicAccessBlockOutput{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("PublicAccessBlockConfiguration", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentPublicAccessBlockConfiguration(&sv.PublicAccessBlockConfiguration, nodeDecoder); err != nil {
+ return err
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+type awsRestxml_deserializeOpHeadBucket struct {
+}
+
+func (*awsRestxml_deserializeOpHeadBucket) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsRestxml_deserializeOpHeadBucket) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsRestxml_deserializeOpErrorHeadBucket(response, &metadata)
+ }
+ output := &HeadBucketOutput{}
+ out.Result = output
+
+ if _, err = io.Copy(ioutil.Discard, response.Body); err != nil {
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to discard response body, %w", err),
+ }
+ }
+
+ return out, metadata, err
+}
+
+func awsRestxml_deserializeOpErrorHeadBucket(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{
+ UseStatusCode: true, StatusCode: response.StatusCode,
+ })
+ if err != nil {
+ return err
+ }
+ if hostID := errorComponents.HostID; len(hostID) != 0 {
+ s3shared.SetHostIDMetadata(metadata, hostID)
+ }
+ if reqID := errorComponents.RequestID; len(reqID) != 0 {
+ awsmiddleware.SetRequestIDMetadata(metadata, reqID)
+ }
+ if len(errorComponents.Code) != 0 {
+ errorCode = errorComponents.Code
+ }
+ if len(errorComponents.Message) != 0 {
+ errorMessage = errorComponents.Message
+ }
+ errorBody.Seek(0, io.SeekStart)
+ switch {
+ case strings.EqualFold("NotFound", errorCode):
+ return awsRestxml_deserializeErrorNotFound(response, errorBody)
+
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+type awsRestxml_deserializeOpHeadObject struct {
+}
+
+func (*awsRestxml_deserializeOpHeadObject) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsRestxml_deserializeOpHeadObject) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsRestxml_deserializeOpErrorHeadObject(response, &metadata)
+ }
+ output := &HeadObjectOutput{}
+ out.Result = output
+
+ err = awsRestxml_deserializeOpHttpBindingsHeadObjectOutput(output, response)
+ if err != nil {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)}
+ }
+
+ return out, metadata, err
+}
+
+func awsRestxml_deserializeOpErrorHeadObject(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{
+ UseStatusCode: true, StatusCode: response.StatusCode,
+ })
+ if err != nil {
+ return err
+ }
+ if hostID := errorComponents.HostID; len(hostID) != 0 {
+ s3shared.SetHostIDMetadata(metadata, hostID)
+ }
+ if reqID := errorComponents.RequestID; len(reqID) != 0 {
+ awsmiddleware.SetRequestIDMetadata(metadata, reqID)
+ }
+ if len(errorComponents.Code) != 0 {
+ errorCode = errorComponents.Code
+ }
+ if len(errorComponents.Message) != 0 {
+ errorMessage = errorComponents.Message
+ }
+ errorBody.Seek(0, io.SeekStart)
+ switch {
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+func awsRestxml_deserializeOpHttpBindingsHeadObjectOutput(v *HeadObjectOutput, response *smithyhttp.Response) error {
+ if v == nil {
+ return fmt.Errorf("unsupported deserialization for nil %T", v)
+ }
+
+ if headerValues := response.Header.Values("accept-ranges"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.AcceptRanges = ptr.String(headerValues[0])
+ }
+
+ if headerValues := response.Header.Values("x-amz-archive-status"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.ArchiveStatus = types.ArchiveStatus(headerValues[0])
+ }
+
+ if headerValues := response.Header.Values("x-amz-server-side-encryption-bucket-key-enabled"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ vv, err := strconv.ParseBool(headerValues[0])
+ if err != nil {
+ return err
+ }
+ v.BucketKeyEnabled = vv
+ }
+
+ if headerValues := response.Header.Values("Cache-Control"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.CacheControl = ptr.String(headerValues[0])
+ }
+
+ if headerValues := response.Header.Values("Content-Disposition"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.ContentDisposition = ptr.String(headerValues[0])
+ }
+
+ if headerValues := response.Header.Values("Content-Encoding"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.ContentEncoding = ptr.String(headerValues[0])
+ }
+
+ if headerValues := response.Header.Values("Content-Language"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.ContentLanguage = ptr.String(headerValues[0])
+ }
+
+ if headerValues := response.Header.Values("Content-Length"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ vv, err := strconv.ParseInt(headerValues[0], 0, 64)
+ if err != nil {
+ return err
+ }
+ v.ContentLength = vv
+ }
+
+ if headerValues := response.Header.Values("Content-Type"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.ContentType = ptr.String(headerValues[0])
+ }
+
+ if headerValues := response.Header.Values("x-amz-delete-marker"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ vv, err := strconv.ParseBool(headerValues[0])
+ if err != nil {
+ return err
+ }
+ v.DeleteMarker = vv
+ }
+
+ if headerValues := response.Header.Values("ETag"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.ETag = ptr.String(headerValues[0])
+ }
+
+ if headerValues := response.Header.Values("x-amz-expiration"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.Expiration = ptr.String(headerValues[0])
+ }
+
+ if headerValues := response.Header.Values("Expires"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ t, err := smithytime.ParseHTTPDate(headerValues[0])
+ if err != nil {
+ return err
+ }
+ v.Expires = ptr.Time(t)
+ }
+
+ if headerValues := response.Header.Values("Last-Modified"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ t, err := smithytime.ParseHTTPDate(headerValues[0])
+ if err != nil {
+ return err
+ }
+ v.LastModified = ptr.Time(t)
+ }
+
+ for headerKey, headerValues := range response.Header {
+ if lenPrefix := len("x-amz-meta-"); len(headerKey) >= lenPrefix && strings.EqualFold(headerKey[:lenPrefix], "x-amz-meta-") {
+ if v.Metadata == nil {
+ v.Metadata = map[string]string{}
+ }
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.Metadata[strings.ToLower(headerKey[lenPrefix:])] = headerValues[0]
+ }
+ }
+
+ if headerValues := response.Header.Values("x-amz-missing-meta"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ vv, err := strconv.ParseInt(headerValues[0], 0, 32)
+ if err != nil {
+ return err
+ }
+ v.MissingMeta = int32(vv)
+ }
+
+ if headerValues := response.Header.Values("x-amz-object-lock-legal-hold"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.ObjectLockLegalHoldStatus = types.ObjectLockLegalHoldStatus(headerValues[0])
+ }
+
+ if headerValues := response.Header.Values("x-amz-object-lock-mode"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.ObjectLockMode = types.ObjectLockMode(headerValues[0])
+ }
+
+ if headerValues := response.Header.Values("x-amz-object-lock-retain-until-date"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ t, err := smithytime.ParseDateTime(headerValues[0])
+ if err != nil {
+ return err
+ }
+ v.ObjectLockRetainUntilDate = ptr.Time(t)
+ }
+
+ if headerValues := response.Header.Values("x-amz-mp-parts-count"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ vv, err := strconv.ParseInt(headerValues[0], 0, 32)
+ if err != nil {
+ return err
+ }
+ v.PartsCount = int32(vv)
+ }
+
+ if headerValues := response.Header.Values("x-amz-replication-status"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.ReplicationStatus = types.ReplicationStatus(headerValues[0])
+ }
+
+ if headerValues := response.Header.Values("x-amz-request-charged"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.RequestCharged = types.RequestCharged(headerValues[0])
+ }
+
+ if headerValues := response.Header.Values("x-amz-restore"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.Restore = ptr.String(headerValues[0])
+ }
+
+ if headerValues := response.Header.Values("x-amz-server-side-encryption"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.ServerSideEncryption = types.ServerSideEncryption(headerValues[0])
+ }
+
+ if headerValues := response.Header.Values("x-amz-server-side-encryption-customer-algorithm"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.SSECustomerAlgorithm = ptr.String(headerValues[0])
+ }
+
+ if headerValues := response.Header.Values("x-amz-server-side-encryption-customer-key-MD5"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.SSECustomerKeyMD5 = ptr.String(headerValues[0])
+ }
+
+ if headerValues := response.Header.Values("x-amz-server-side-encryption-aws-kms-key-id"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.SSEKMSKeyId = ptr.String(headerValues[0])
+ }
+
+ if headerValues := response.Header.Values("x-amz-storage-class"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.StorageClass = types.StorageClass(headerValues[0])
+ }
+
+ if headerValues := response.Header.Values("x-amz-version-id"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.VersionId = ptr.String(headerValues[0])
+ }
+
+ if headerValues := response.Header.Values("x-amz-website-redirect-location"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.WebsiteRedirectLocation = ptr.String(headerValues[0])
+ }
+
+ return nil
+}
+
+type awsRestxml_deserializeOpListBucketAnalyticsConfigurations struct {
+}
+
+func (*awsRestxml_deserializeOpListBucketAnalyticsConfigurations) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsRestxml_deserializeOpListBucketAnalyticsConfigurations) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsRestxml_deserializeOpErrorListBucketAnalyticsConfigurations(response, &metadata)
+ }
+ output := &ListBucketAnalyticsConfigurationsOutput{}
+ out.Result = output
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+ body := io.TeeReader(response.Body, ringBuffer)
+ rootDecoder := xml.NewDecoder(body)
+ t, err := smithyxml.FetchRootElement(rootDecoder)
+ if err == io.EOF {
+ return out, metadata, nil
+ }
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ decoder := smithyxml.WrapNodeDecoder(rootDecoder, t)
+ err = awsRestxml_deserializeOpDocumentListBucketAnalyticsConfigurationsOutput(&output, decoder)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ return out, metadata, err
+}
+
+func awsRestxml_deserializeOpErrorListBucketAnalyticsConfigurations(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{
+ UseStatusCode: true, StatusCode: response.StatusCode,
+ })
+ if err != nil {
+ return err
+ }
+ if hostID := errorComponents.HostID; len(hostID) != 0 {
+ s3shared.SetHostIDMetadata(metadata, hostID)
+ }
+ if reqID := errorComponents.RequestID; len(reqID) != 0 {
+ awsmiddleware.SetRequestIDMetadata(metadata, reqID)
+ }
+ if len(errorComponents.Code) != 0 {
+ errorCode = errorComponents.Code
+ }
+ if len(errorComponents.Message) != 0 {
+ errorMessage = errorComponents.Message
+ }
+ errorBody.Seek(0, io.SeekStart)
+ switch {
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+func awsRestxml_deserializeOpDocumentListBucketAnalyticsConfigurationsOutput(v **ListBucketAnalyticsConfigurationsOutput, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *ListBucketAnalyticsConfigurationsOutput
+ if *v == nil {
+ sv = &ListBucketAnalyticsConfigurationsOutput{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("AnalyticsConfiguration", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentAnalyticsConfigurationListUnwrapped(&sv.AnalyticsConfigurationList, nodeDecoder); err != nil {
+ return err
+ }
+
+ case strings.EqualFold("ContinuationToken", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.ContinuationToken = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("IsTruncated", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv, err := strconv.ParseBool(string(val))
+ if err != nil {
+ return fmt.Errorf("expected IsTruncated to be of type *bool, got %T instead", val)
+ }
+ sv.IsTruncated = xtv
+ }
+
+ case strings.EqualFold("NextContinuationToken", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.NextContinuationToken = ptr.String(xtv)
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+type awsRestxml_deserializeOpListBucketIntelligentTieringConfigurations struct {
+}
+
+func (*awsRestxml_deserializeOpListBucketIntelligentTieringConfigurations) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsRestxml_deserializeOpListBucketIntelligentTieringConfigurations) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsRestxml_deserializeOpErrorListBucketIntelligentTieringConfigurations(response, &metadata)
+ }
+ output := &ListBucketIntelligentTieringConfigurationsOutput{}
+ out.Result = output
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+ body := io.TeeReader(response.Body, ringBuffer)
+ rootDecoder := xml.NewDecoder(body)
+ t, err := smithyxml.FetchRootElement(rootDecoder)
+ if err == io.EOF {
+ return out, metadata, nil
+ }
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ decoder := smithyxml.WrapNodeDecoder(rootDecoder, t)
+ err = awsRestxml_deserializeOpDocumentListBucketIntelligentTieringConfigurationsOutput(&output, decoder)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ return out, metadata, err
+}
+
+func awsRestxml_deserializeOpErrorListBucketIntelligentTieringConfigurations(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{
+ UseStatusCode: true, StatusCode: response.StatusCode,
+ })
+ if err != nil {
+ return err
+ }
+ if hostID := errorComponents.HostID; len(hostID) != 0 {
+ s3shared.SetHostIDMetadata(metadata, hostID)
+ }
+ if reqID := errorComponents.RequestID; len(reqID) != 0 {
+ awsmiddleware.SetRequestIDMetadata(metadata, reqID)
+ }
+ if len(errorComponents.Code) != 0 {
+ errorCode = errorComponents.Code
+ }
+ if len(errorComponents.Message) != 0 {
+ errorMessage = errorComponents.Message
+ }
+ errorBody.Seek(0, io.SeekStart)
+ switch {
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+func awsRestxml_deserializeOpDocumentListBucketIntelligentTieringConfigurationsOutput(v **ListBucketIntelligentTieringConfigurationsOutput, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *ListBucketIntelligentTieringConfigurationsOutput
+ if *v == nil {
+ sv = &ListBucketIntelligentTieringConfigurationsOutput{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("ContinuationToken", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.ContinuationToken = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("IntelligentTieringConfiguration", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentIntelligentTieringConfigurationListUnwrapped(&sv.IntelligentTieringConfigurationList, nodeDecoder); err != nil {
+ return err
+ }
+
+ case strings.EqualFold("IsTruncated", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv, err := strconv.ParseBool(string(val))
+ if err != nil {
+ return fmt.Errorf("expected IsTruncated to be of type *bool, got %T instead", val)
+ }
+ sv.IsTruncated = xtv
+ }
+
+ case strings.EqualFold("NextContinuationToken", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.NextContinuationToken = ptr.String(xtv)
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+type awsRestxml_deserializeOpListBucketInventoryConfigurations struct {
+}
+
+func (*awsRestxml_deserializeOpListBucketInventoryConfigurations) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsRestxml_deserializeOpListBucketInventoryConfigurations) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsRestxml_deserializeOpErrorListBucketInventoryConfigurations(response, &metadata)
+ }
+ output := &ListBucketInventoryConfigurationsOutput{}
+ out.Result = output
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+ body := io.TeeReader(response.Body, ringBuffer)
+ rootDecoder := xml.NewDecoder(body)
+ t, err := smithyxml.FetchRootElement(rootDecoder)
+ if err == io.EOF {
+ return out, metadata, nil
+ }
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ decoder := smithyxml.WrapNodeDecoder(rootDecoder, t)
+ err = awsRestxml_deserializeOpDocumentListBucketInventoryConfigurationsOutput(&output, decoder)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ return out, metadata, err
+}
+
+func awsRestxml_deserializeOpErrorListBucketInventoryConfigurations(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{
+ UseStatusCode: true, StatusCode: response.StatusCode,
+ })
+ if err != nil {
+ return err
+ }
+ if hostID := errorComponents.HostID; len(hostID) != 0 {
+ s3shared.SetHostIDMetadata(metadata, hostID)
+ }
+ if reqID := errorComponents.RequestID; len(reqID) != 0 {
+ awsmiddleware.SetRequestIDMetadata(metadata, reqID)
+ }
+ if len(errorComponents.Code) != 0 {
+ errorCode = errorComponents.Code
+ }
+ if len(errorComponents.Message) != 0 {
+ errorMessage = errorComponents.Message
+ }
+ errorBody.Seek(0, io.SeekStart)
+ switch {
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+func awsRestxml_deserializeOpDocumentListBucketInventoryConfigurationsOutput(v **ListBucketInventoryConfigurationsOutput, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *ListBucketInventoryConfigurationsOutput
+ if *v == nil {
+ sv = &ListBucketInventoryConfigurationsOutput{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("ContinuationToken", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.ContinuationToken = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("InventoryConfiguration", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentInventoryConfigurationListUnwrapped(&sv.InventoryConfigurationList, nodeDecoder); err != nil {
+ return err
+ }
+
+ case strings.EqualFold("IsTruncated", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv, err := strconv.ParseBool(string(val))
+ if err != nil {
+ return fmt.Errorf("expected IsTruncated to be of type *bool, got %T instead", val)
+ }
+ sv.IsTruncated = xtv
+ }
+
+ case strings.EqualFold("NextContinuationToken", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.NextContinuationToken = ptr.String(xtv)
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+type awsRestxml_deserializeOpListBucketMetricsConfigurations struct {
+}
+
+func (*awsRestxml_deserializeOpListBucketMetricsConfigurations) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsRestxml_deserializeOpListBucketMetricsConfigurations) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsRestxml_deserializeOpErrorListBucketMetricsConfigurations(response, &metadata)
+ }
+ output := &ListBucketMetricsConfigurationsOutput{}
+ out.Result = output
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+ body := io.TeeReader(response.Body, ringBuffer)
+ rootDecoder := xml.NewDecoder(body)
+ t, err := smithyxml.FetchRootElement(rootDecoder)
+ if err == io.EOF {
+ return out, metadata, nil
+ }
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ decoder := smithyxml.WrapNodeDecoder(rootDecoder, t)
+ err = awsRestxml_deserializeOpDocumentListBucketMetricsConfigurationsOutput(&output, decoder)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ return out, metadata, err
+}
+
+func awsRestxml_deserializeOpErrorListBucketMetricsConfigurations(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{
+ UseStatusCode: true, StatusCode: response.StatusCode,
+ })
+ if err != nil {
+ return err
+ }
+ if hostID := errorComponents.HostID; len(hostID) != 0 {
+ s3shared.SetHostIDMetadata(metadata, hostID)
+ }
+ if reqID := errorComponents.RequestID; len(reqID) != 0 {
+ awsmiddleware.SetRequestIDMetadata(metadata, reqID)
+ }
+ if len(errorComponents.Code) != 0 {
+ errorCode = errorComponents.Code
+ }
+ if len(errorComponents.Message) != 0 {
+ errorMessage = errorComponents.Message
+ }
+ errorBody.Seek(0, io.SeekStart)
+ switch {
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+func awsRestxml_deserializeOpDocumentListBucketMetricsConfigurationsOutput(v **ListBucketMetricsConfigurationsOutput, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *ListBucketMetricsConfigurationsOutput
+ if *v == nil {
+ sv = &ListBucketMetricsConfigurationsOutput{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("ContinuationToken", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.ContinuationToken = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("IsTruncated", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv, err := strconv.ParseBool(string(val))
+ if err != nil {
+ return fmt.Errorf("expected IsTruncated to be of type *bool, got %T instead", val)
+ }
+ sv.IsTruncated = xtv
+ }
+
+ case strings.EqualFold("MetricsConfiguration", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentMetricsConfigurationListUnwrapped(&sv.MetricsConfigurationList, nodeDecoder); err != nil {
+ return err
+ }
+
+ case strings.EqualFold("NextContinuationToken", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.NextContinuationToken = ptr.String(xtv)
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+type awsRestxml_deserializeOpListBuckets struct {
+}
+
+func (*awsRestxml_deserializeOpListBuckets) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsRestxml_deserializeOpListBuckets) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsRestxml_deserializeOpErrorListBuckets(response, &metadata)
+ }
+ output := &ListBucketsOutput{}
+ out.Result = output
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+ body := io.TeeReader(response.Body, ringBuffer)
+ rootDecoder := xml.NewDecoder(body)
+ t, err := smithyxml.FetchRootElement(rootDecoder)
+ if err == io.EOF {
+ return out, metadata, nil
+ }
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ decoder := smithyxml.WrapNodeDecoder(rootDecoder, t)
+ err = awsRestxml_deserializeOpDocumentListBucketsOutput(&output, decoder)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ return out, metadata, err
+}
+
+func awsRestxml_deserializeOpErrorListBuckets(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{
+ UseStatusCode: true, StatusCode: response.StatusCode,
+ })
+ if err != nil {
+ return err
+ }
+ if hostID := errorComponents.HostID; len(hostID) != 0 {
+ s3shared.SetHostIDMetadata(metadata, hostID)
+ }
+ if reqID := errorComponents.RequestID; len(reqID) != 0 {
+ awsmiddleware.SetRequestIDMetadata(metadata, reqID)
+ }
+ if len(errorComponents.Code) != 0 {
+ errorCode = errorComponents.Code
+ }
+ if len(errorComponents.Message) != 0 {
+ errorMessage = errorComponents.Message
+ }
+ errorBody.Seek(0, io.SeekStart)
+ switch {
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+func awsRestxml_deserializeOpDocumentListBucketsOutput(v **ListBucketsOutput, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *ListBucketsOutput
+ if *v == nil {
+ sv = &ListBucketsOutput{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("Buckets", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentBuckets(&sv.Buckets, nodeDecoder); err != nil {
+ return err
+ }
+
+ case strings.EqualFold("Owner", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentOwner(&sv.Owner, nodeDecoder); err != nil {
+ return err
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+type awsRestxml_deserializeOpListMultipartUploads struct {
+}
+
+func (*awsRestxml_deserializeOpListMultipartUploads) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsRestxml_deserializeOpListMultipartUploads) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsRestxml_deserializeOpErrorListMultipartUploads(response, &metadata)
+ }
+ output := &ListMultipartUploadsOutput{}
+ out.Result = output
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+ body := io.TeeReader(response.Body, ringBuffer)
+ rootDecoder := xml.NewDecoder(body)
+ t, err := smithyxml.FetchRootElement(rootDecoder)
+ if err == io.EOF {
+ return out, metadata, nil
+ }
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ decoder := smithyxml.WrapNodeDecoder(rootDecoder, t)
+ err = awsRestxml_deserializeOpDocumentListMultipartUploadsOutput(&output, decoder)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ return out, metadata, err
+}
+
+func awsRestxml_deserializeOpErrorListMultipartUploads(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{
+ UseStatusCode: true, StatusCode: response.StatusCode,
+ })
+ if err != nil {
+ return err
+ }
+ if hostID := errorComponents.HostID; len(hostID) != 0 {
+ s3shared.SetHostIDMetadata(metadata, hostID)
+ }
+ if reqID := errorComponents.RequestID; len(reqID) != 0 {
+ awsmiddleware.SetRequestIDMetadata(metadata, reqID)
+ }
+ if len(errorComponents.Code) != 0 {
+ errorCode = errorComponents.Code
+ }
+ if len(errorComponents.Message) != 0 {
+ errorMessage = errorComponents.Message
+ }
+ errorBody.Seek(0, io.SeekStart)
+ switch {
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+func awsRestxml_deserializeOpDocumentListMultipartUploadsOutput(v **ListMultipartUploadsOutput, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *ListMultipartUploadsOutput
+ if *v == nil {
+ sv = &ListMultipartUploadsOutput{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("Bucket", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.Bucket = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("CommonPrefixes", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentCommonPrefixListUnwrapped(&sv.CommonPrefixes, nodeDecoder); err != nil {
+ return err
+ }
+
+ case strings.EqualFold("Delimiter", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.Delimiter = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("EncodingType", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.EncodingType = types.EncodingType(xtv)
+ }
+
+ case strings.EqualFold("IsTruncated", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv, err := strconv.ParseBool(string(val))
+ if err != nil {
+ return fmt.Errorf("expected IsTruncated to be of type *bool, got %T instead", val)
+ }
+ sv.IsTruncated = xtv
+ }
+
+ case strings.EqualFold("KeyMarker", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.KeyMarker = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("MaxUploads", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ i64, err := strconv.ParseInt(xtv, 10, 64)
+ if err != nil {
+ return err
+ }
+ sv.MaxUploads = int32(i64)
+ }
+
+ case strings.EqualFold("NextKeyMarker", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.NextKeyMarker = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("NextUploadIdMarker", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.NextUploadIdMarker = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("Prefix", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.Prefix = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("UploadIdMarker", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.UploadIdMarker = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("Upload", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentMultipartUploadListUnwrapped(&sv.Uploads, nodeDecoder); err != nil {
+ return err
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+type awsRestxml_deserializeOpListObjects struct {
+}
+
+func (*awsRestxml_deserializeOpListObjects) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsRestxml_deserializeOpListObjects) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsRestxml_deserializeOpErrorListObjects(response, &metadata)
+ }
+ output := &ListObjectsOutput{}
+ out.Result = output
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+ body := io.TeeReader(response.Body, ringBuffer)
+ rootDecoder := xml.NewDecoder(body)
+ t, err := smithyxml.FetchRootElement(rootDecoder)
+ if err == io.EOF {
+ return out, metadata, nil
+ }
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ decoder := smithyxml.WrapNodeDecoder(rootDecoder, t)
+ err = awsRestxml_deserializeOpDocumentListObjectsOutput(&output, decoder)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ return out, metadata, err
+}
+
+func awsRestxml_deserializeOpErrorListObjects(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{
+ UseStatusCode: true, StatusCode: response.StatusCode,
+ })
+ if err != nil {
+ return err
+ }
+ if hostID := errorComponents.HostID; len(hostID) != 0 {
+ s3shared.SetHostIDMetadata(metadata, hostID)
+ }
+ if reqID := errorComponents.RequestID; len(reqID) != 0 {
+ awsmiddleware.SetRequestIDMetadata(metadata, reqID)
+ }
+ if len(errorComponents.Code) != 0 {
+ errorCode = errorComponents.Code
+ }
+ if len(errorComponents.Message) != 0 {
+ errorMessage = errorComponents.Message
+ }
+ errorBody.Seek(0, io.SeekStart)
+ switch {
+ case strings.EqualFold("NoSuchBucket", errorCode):
+ return awsRestxml_deserializeErrorNoSuchBucket(response, errorBody)
+
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+func awsRestxml_deserializeOpDocumentListObjectsOutput(v **ListObjectsOutput, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *ListObjectsOutput
+ if *v == nil {
+ sv = &ListObjectsOutput{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("CommonPrefixes", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentCommonPrefixListUnwrapped(&sv.CommonPrefixes, nodeDecoder); err != nil {
+ return err
+ }
+
+ case strings.EqualFold("Contents", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentObjectListUnwrapped(&sv.Contents, nodeDecoder); err != nil {
+ return err
+ }
+
+ case strings.EqualFold("Delimiter", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.Delimiter = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("EncodingType", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.EncodingType = types.EncodingType(xtv)
+ }
+
+ case strings.EqualFold("IsTruncated", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv, err := strconv.ParseBool(string(val))
+ if err != nil {
+ return fmt.Errorf("expected IsTruncated to be of type *bool, got %T instead", val)
+ }
+ sv.IsTruncated = xtv
+ }
+
+ case strings.EqualFold("Marker", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.Marker = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("MaxKeys", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ i64, err := strconv.ParseInt(xtv, 10, 64)
+ if err != nil {
+ return err
+ }
+ sv.MaxKeys = int32(i64)
+ }
+
+ case strings.EqualFold("Name", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.Name = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("NextMarker", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.NextMarker = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("Prefix", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.Prefix = ptr.String(xtv)
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+type awsRestxml_deserializeOpListObjectsV2 struct {
+}
+
+func (*awsRestxml_deserializeOpListObjectsV2) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsRestxml_deserializeOpListObjectsV2) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsRestxml_deserializeOpErrorListObjectsV2(response, &metadata)
+ }
+ output := &ListObjectsV2Output{}
+ out.Result = output
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+ body := io.TeeReader(response.Body, ringBuffer)
+ rootDecoder := xml.NewDecoder(body)
+ t, err := smithyxml.FetchRootElement(rootDecoder)
+ if err == io.EOF {
+ return out, metadata, nil
+ }
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ decoder := smithyxml.WrapNodeDecoder(rootDecoder, t)
+ err = awsRestxml_deserializeOpDocumentListObjectsV2Output(&output, decoder)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ return out, metadata, err
+}
+
+func awsRestxml_deserializeOpErrorListObjectsV2(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{
+ UseStatusCode: true, StatusCode: response.StatusCode,
+ })
+ if err != nil {
+ return err
+ }
+ if hostID := errorComponents.HostID; len(hostID) != 0 {
+ s3shared.SetHostIDMetadata(metadata, hostID)
+ }
+ if reqID := errorComponents.RequestID; len(reqID) != 0 {
+ awsmiddleware.SetRequestIDMetadata(metadata, reqID)
+ }
+ if len(errorComponents.Code) != 0 {
+ errorCode = errorComponents.Code
+ }
+ if len(errorComponents.Message) != 0 {
+ errorMessage = errorComponents.Message
+ }
+ errorBody.Seek(0, io.SeekStart)
+ switch {
+ case strings.EqualFold("NoSuchBucket", errorCode):
+ return awsRestxml_deserializeErrorNoSuchBucket(response, errorBody)
+
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+func awsRestxml_deserializeOpDocumentListObjectsV2Output(v **ListObjectsV2Output, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *ListObjectsV2Output
+ if *v == nil {
+ sv = &ListObjectsV2Output{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("CommonPrefixes", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentCommonPrefixListUnwrapped(&sv.CommonPrefixes, nodeDecoder); err != nil {
+ return err
+ }
+
+ case strings.EqualFold("Contents", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentObjectListUnwrapped(&sv.Contents, nodeDecoder); err != nil {
+ return err
+ }
+
+ case strings.EqualFold("ContinuationToken", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.ContinuationToken = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("Delimiter", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.Delimiter = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("EncodingType", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.EncodingType = types.EncodingType(xtv)
+ }
+
+ case strings.EqualFold("IsTruncated", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv, err := strconv.ParseBool(string(val))
+ if err != nil {
+ return fmt.Errorf("expected IsTruncated to be of type *bool, got %T instead", val)
+ }
+ sv.IsTruncated = xtv
+ }
+
+ case strings.EqualFold("KeyCount", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ i64, err := strconv.ParseInt(xtv, 10, 64)
+ if err != nil {
+ return err
+ }
+ sv.KeyCount = int32(i64)
+ }
+
+ case strings.EqualFold("MaxKeys", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ i64, err := strconv.ParseInt(xtv, 10, 64)
+ if err != nil {
+ return err
+ }
+ sv.MaxKeys = int32(i64)
+ }
+
+ case strings.EqualFold("Name", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.Name = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("NextContinuationToken", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.NextContinuationToken = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("Prefix", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.Prefix = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("StartAfter", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.StartAfter = ptr.String(xtv)
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+type awsRestxml_deserializeOpListObjectVersions struct {
+}
+
+func (*awsRestxml_deserializeOpListObjectVersions) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsRestxml_deserializeOpListObjectVersions) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsRestxml_deserializeOpErrorListObjectVersions(response, &metadata)
+ }
+ output := &ListObjectVersionsOutput{}
+ out.Result = output
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+ body := io.TeeReader(response.Body, ringBuffer)
+ rootDecoder := xml.NewDecoder(body)
+ t, err := smithyxml.FetchRootElement(rootDecoder)
+ if err == io.EOF {
+ return out, metadata, nil
+ }
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ decoder := smithyxml.WrapNodeDecoder(rootDecoder, t)
+ err = awsRestxml_deserializeOpDocumentListObjectVersionsOutput(&output, decoder)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ return out, metadata, err
+}
+
+func awsRestxml_deserializeOpErrorListObjectVersions(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{
+ UseStatusCode: true, StatusCode: response.StatusCode,
+ })
+ if err != nil {
+ return err
+ }
+ if hostID := errorComponents.HostID; len(hostID) != 0 {
+ s3shared.SetHostIDMetadata(metadata, hostID)
+ }
+ if reqID := errorComponents.RequestID; len(reqID) != 0 {
+ awsmiddleware.SetRequestIDMetadata(metadata, reqID)
+ }
+ if len(errorComponents.Code) != 0 {
+ errorCode = errorComponents.Code
+ }
+ if len(errorComponents.Message) != 0 {
+ errorMessage = errorComponents.Message
+ }
+ errorBody.Seek(0, io.SeekStart)
+ switch {
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+func awsRestxml_deserializeOpDocumentListObjectVersionsOutput(v **ListObjectVersionsOutput, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *ListObjectVersionsOutput
+ if *v == nil {
+ sv = &ListObjectVersionsOutput{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("CommonPrefixes", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentCommonPrefixListUnwrapped(&sv.CommonPrefixes, nodeDecoder); err != nil {
+ return err
+ }
+
+ case strings.EqualFold("DeleteMarker", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentDeleteMarkersUnwrapped(&sv.DeleteMarkers, nodeDecoder); err != nil {
+ return err
+ }
+
+ case strings.EqualFold("Delimiter", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.Delimiter = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("EncodingType", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.EncodingType = types.EncodingType(xtv)
+ }
+
+ case strings.EqualFold("IsTruncated", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv, err := strconv.ParseBool(string(val))
+ if err != nil {
+ return fmt.Errorf("expected IsTruncated to be of type *bool, got %T instead", val)
+ }
+ sv.IsTruncated = xtv
+ }
+
+ case strings.EqualFold("KeyMarker", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.KeyMarker = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("MaxKeys", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ i64, err := strconv.ParseInt(xtv, 10, 64)
+ if err != nil {
+ return err
+ }
+ sv.MaxKeys = int32(i64)
+ }
+
+ case strings.EqualFold("Name", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.Name = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("NextKeyMarker", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.NextKeyMarker = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("NextVersionIdMarker", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.NextVersionIdMarker = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("Prefix", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.Prefix = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("VersionIdMarker", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.VersionIdMarker = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("Version", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentObjectVersionListUnwrapped(&sv.Versions, nodeDecoder); err != nil {
+ return err
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+type awsRestxml_deserializeOpListParts struct {
+}
+
+func (*awsRestxml_deserializeOpListParts) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsRestxml_deserializeOpListParts) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsRestxml_deserializeOpErrorListParts(response, &metadata)
+ }
+ output := &ListPartsOutput{}
+ out.Result = output
+
+ err = awsRestxml_deserializeOpHttpBindingsListPartsOutput(output, response)
+ if err != nil {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)}
+ }
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+ body := io.TeeReader(response.Body, ringBuffer)
+ rootDecoder := xml.NewDecoder(body)
+ t, err := smithyxml.FetchRootElement(rootDecoder)
+ if err == io.EOF {
+ return out, metadata, nil
+ }
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ decoder := smithyxml.WrapNodeDecoder(rootDecoder, t)
+ err = awsRestxml_deserializeOpDocumentListPartsOutput(&output, decoder)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ return out, metadata, err
+}
+
+func awsRestxml_deserializeOpErrorListParts(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{
+ UseStatusCode: true, StatusCode: response.StatusCode,
+ })
+ if err != nil {
+ return err
+ }
+ if hostID := errorComponents.HostID; len(hostID) != 0 {
+ s3shared.SetHostIDMetadata(metadata, hostID)
+ }
+ if reqID := errorComponents.RequestID; len(reqID) != 0 {
+ awsmiddleware.SetRequestIDMetadata(metadata, reqID)
+ }
+ if len(errorComponents.Code) != 0 {
+ errorCode = errorComponents.Code
+ }
+ if len(errorComponents.Message) != 0 {
+ errorMessage = errorComponents.Message
+ }
+ errorBody.Seek(0, io.SeekStart)
+ switch {
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+func awsRestxml_deserializeOpHttpBindingsListPartsOutput(v *ListPartsOutput, response *smithyhttp.Response) error {
+ if v == nil {
+ return fmt.Errorf("unsupported deserialization for nil %T", v)
+ }
+
+ if headerValues := response.Header.Values("x-amz-abort-date"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ t, err := smithytime.ParseHTTPDate(headerValues[0])
+ if err != nil {
+ return err
+ }
+ v.AbortDate = ptr.Time(t)
+ }
+
+ if headerValues := response.Header.Values("x-amz-abort-rule-id"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.AbortRuleId = ptr.String(headerValues[0])
+ }
+
+ if headerValues := response.Header.Values("x-amz-request-charged"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.RequestCharged = types.RequestCharged(headerValues[0])
+ }
+
+ return nil
+}
+func awsRestxml_deserializeOpDocumentListPartsOutput(v **ListPartsOutput, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *ListPartsOutput
+ if *v == nil {
+ sv = &ListPartsOutput{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("Bucket", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.Bucket = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("Initiator", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentInitiator(&sv.Initiator, nodeDecoder); err != nil {
+ return err
+ }
+
+ case strings.EqualFold("IsTruncated", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv, err := strconv.ParseBool(string(val))
+ if err != nil {
+ return fmt.Errorf("expected IsTruncated to be of type *bool, got %T instead", val)
+ }
+ sv.IsTruncated = xtv
+ }
+
+ case strings.EqualFold("Key", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.Key = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("MaxParts", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ i64, err := strconv.ParseInt(xtv, 10, 64)
+ if err != nil {
+ return err
+ }
+ sv.MaxParts = int32(i64)
+ }
+
+ case strings.EqualFold("NextPartNumberMarker", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.NextPartNumberMarker = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("Owner", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentOwner(&sv.Owner, nodeDecoder); err != nil {
+ return err
+ }
+
+ case strings.EqualFold("PartNumberMarker", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.PartNumberMarker = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("Part", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentPartsUnwrapped(&sv.Parts, nodeDecoder); err != nil {
+ return err
+ }
+
+ case strings.EqualFold("StorageClass", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.StorageClass = types.StorageClass(xtv)
+ }
+
+ case strings.EqualFold("UploadId", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.UploadId = ptr.String(xtv)
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+type awsRestxml_deserializeOpPutBucketAccelerateConfiguration struct {
+}
+
+func (*awsRestxml_deserializeOpPutBucketAccelerateConfiguration) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsRestxml_deserializeOpPutBucketAccelerateConfiguration) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsRestxml_deserializeOpErrorPutBucketAccelerateConfiguration(response, &metadata)
+ }
+ output := &PutBucketAccelerateConfigurationOutput{}
+ out.Result = output
+
+ if _, err = io.Copy(ioutil.Discard, response.Body); err != nil {
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to discard response body, %w", err),
+ }
+ }
+
+ return out, metadata, err
+}
+
+func awsRestxml_deserializeOpErrorPutBucketAccelerateConfiguration(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{
+ UseStatusCode: true, StatusCode: response.StatusCode,
+ })
+ if err != nil {
+ return err
+ }
+ if hostID := errorComponents.HostID; len(hostID) != 0 {
+ s3shared.SetHostIDMetadata(metadata, hostID)
+ }
+ if reqID := errorComponents.RequestID; len(reqID) != 0 {
+ awsmiddleware.SetRequestIDMetadata(metadata, reqID)
+ }
+ if len(errorComponents.Code) != 0 {
+ errorCode = errorComponents.Code
+ }
+ if len(errorComponents.Message) != 0 {
+ errorMessage = errorComponents.Message
+ }
+ errorBody.Seek(0, io.SeekStart)
+ switch {
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+type awsRestxml_deserializeOpPutBucketAcl struct {
+}
+
+func (*awsRestxml_deserializeOpPutBucketAcl) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsRestxml_deserializeOpPutBucketAcl) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsRestxml_deserializeOpErrorPutBucketAcl(response, &metadata)
+ }
+ output := &PutBucketAclOutput{}
+ out.Result = output
+
+ if _, err = io.Copy(ioutil.Discard, response.Body); err != nil {
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to discard response body, %w", err),
+ }
+ }
+
+ return out, metadata, err
+}
+
+func awsRestxml_deserializeOpErrorPutBucketAcl(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{
+ UseStatusCode: true, StatusCode: response.StatusCode,
+ })
+ if err != nil {
+ return err
+ }
+ if hostID := errorComponents.HostID; len(hostID) != 0 {
+ s3shared.SetHostIDMetadata(metadata, hostID)
+ }
+ if reqID := errorComponents.RequestID; len(reqID) != 0 {
+ awsmiddleware.SetRequestIDMetadata(metadata, reqID)
+ }
+ if len(errorComponents.Code) != 0 {
+ errorCode = errorComponents.Code
+ }
+ if len(errorComponents.Message) != 0 {
+ errorMessage = errorComponents.Message
+ }
+ errorBody.Seek(0, io.SeekStart)
+ switch {
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+type awsRestxml_deserializeOpPutBucketAnalyticsConfiguration struct {
+}
+
+func (*awsRestxml_deserializeOpPutBucketAnalyticsConfiguration) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsRestxml_deserializeOpPutBucketAnalyticsConfiguration) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsRestxml_deserializeOpErrorPutBucketAnalyticsConfiguration(response, &metadata)
+ }
+ output := &PutBucketAnalyticsConfigurationOutput{}
+ out.Result = output
+
+ if _, err = io.Copy(ioutil.Discard, response.Body); err != nil {
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to discard response body, %w", err),
+ }
+ }
+
+ return out, metadata, err
+}
+
+func awsRestxml_deserializeOpErrorPutBucketAnalyticsConfiguration(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{
+ UseStatusCode: true, StatusCode: response.StatusCode,
+ })
+ if err != nil {
+ return err
+ }
+ if hostID := errorComponents.HostID; len(hostID) != 0 {
+ s3shared.SetHostIDMetadata(metadata, hostID)
+ }
+ if reqID := errorComponents.RequestID; len(reqID) != 0 {
+ awsmiddleware.SetRequestIDMetadata(metadata, reqID)
+ }
+ if len(errorComponents.Code) != 0 {
+ errorCode = errorComponents.Code
+ }
+ if len(errorComponents.Message) != 0 {
+ errorMessage = errorComponents.Message
+ }
+ errorBody.Seek(0, io.SeekStart)
+ switch {
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+type awsRestxml_deserializeOpPutBucketCors struct {
+}
+
+func (*awsRestxml_deserializeOpPutBucketCors) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsRestxml_deserializeOpPutBucketCors) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsRestxml_deserializeOpErrorPutBucketCors(response, &metadata)
+ }
+ output := &PutBucketCorsOutput{}
+ out.Result = output
+
+ if _, err = io.Copy(ioutil.Discard, response.Body); err != nil {
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to discard response body, %w", err),
+ }
+ }
+
+ return out, metadata, err
+}
+
+func awsRestxml_deserializeOpErrorPutBucketCors(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{
+ UseStatusCode: true, StatusCode: response.StatusCode,
+ })
+ if err != nil {
+ return err
+ }
+ if hostID := errorComponents.HostID; len(hostID) != 0 {
+ s3shared.SetHostIDMetadata(metadata, hostID)
+ }
+ if reqID := errorComponents.RequestID; len(reqID) != 0 {
+ awsmiddleware.SetRequestIDMetadata(metadata, reqID)
+ }
+ if len(errorComponents.Code) != 0 {
+ errorCode = errorComponents.Code
+ }
+ if len(errorComponents.Message) != 0 {
+ errorMessage = errorComponents.Message
+ }
+ errorBody.Seek(0, io.SeekStart)
+ switch {
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+type awsRestxml_deserializeOpPutBucketEncryption struct {
+}
+
+func (*awsRestxml_deserializeOpPutBucketEncryption) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsRestxml_deserializeOpPutBucketEncryption) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsRestxml_deserializeOpErrorPutBucketEncryption(response, &metadata)
+ }
+ output := &PutBucketEncryptionOutput{}
+ out.Result = output
+
+ if _, err = io.Copy(ioutil.Discard, response.Body); err != nil {
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to discard response body, %w", err),
+ }
+ }
+
+ return out, metadata, err
+}
+
+func awsRestxml_deserializeOpErrorPutBucketEncryption(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{
+ UseStatusCode: true, StatusCode: response.StatusCode,
+ })
+ if err != nil {
+ return err
+ }
+ if hostID := errorComponents.HostID; len(hostID) != 0 {
+ s3shared.SetHostIDMetadata(metadata, hostID)
+ }
+ if reqID := errorComponents.RequestID; len(reqID) != 0 {
+ awsmiddleware.SetRequestIDMetadata(metadata, reqID)
+ }
+ if len(errorComponents.Code) != 0 {
+ errorCode = errorComponents.Code
+ }
+ if len(errorComponents.Message) != 0 {
+ errorMessage = errorComponents.Message
+ }
+ errorBody.Seek(0, io.SeekStart)
+ switch {
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+type awsRestxml_deserializeOpPutBucketIntelligentTieringConfiguration struct {
+}
+
+func (*awsRestxml_deserializeOpPutBucketIntelligentTieringConfiguration) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsRestxml_deserializeOpPutBucketIntelligentTieringConfiguration) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsRestxml_deserializeOpErrorPutBucketIntelligentTieringConfiguration(response, &metadata)
+ }
+ output := &PutBucketIntelligentTieringConfigurationOutput{}
+ out.Result = output
+
+ if _, err = io.Copy(ioutil.Discard, response.Body); err != nil {
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to discard response body, %w", err),
+ }
+ }
+
+ return out, metadata, err
+}
+
+func awsRestxml_deserializeOpErrorPutBucketIntelligentTieringConfiguration(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{
+ UseStatusCode: true, StatusCode: response.StatusCode,
+ })
+ if err != nil {
+ return err
+ }
+ if hostID := errorComponents.HostID; len(hostID) != 0 {
+ s3shared.SetHostIDMetadata(metadata, hostID)
+ }
+ if reqID := errorComponents.RequestID; len(reqID) != 0 {
+ awsmiddleware.SetRequestIDMetadata(metadata, reqID)
+ }
+ if len(errorComponents.Code) != 0 {
+ errorCode = errorComponents.Code
+ }
+ if len(errorComponents.Message) != 0 {
+ errorMessage = errorComponents.Message
+ }
+ errorBody.Seek(0, io.SeekStart)
+ switch {
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+type awsRestxml_deserializeOpPutBucketInventoryConfiguration struct {
+}
+
+func (*awsRestxml_deserializeOpPutBucketInventoryConfiguration) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsRestxml_deserializeOpPutBucketInventoryConfiguration) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsRestxml_deserializeOpErrorPutBucketInventoryConfiguration(response, &metadata)
+ }
+ output := &PutBucketInventoryConfigurationOutput{}
+ out.Result = output
+
+ if _, err = io.Copy(ioutil.Discard, response.Body); err != nil {
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to discard response body, %w", err),
+ }
+ }
+
+ return out, metadata, err
+}
+
+func awsRestxml_deserializeOpErrorPutBucketInventoryConfiguration(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{
+ UseStatusCode: true, StatusCode: response.StatusCode,
+ })
+ if err != nil {
+ return err
+ }
+ if hostID := errorComponents.HostID; len(hostID) != 0 {
+ s3shared.SetHostIDMetadata(metadata, hostID)
+ }
+ if reqID := errorComponents.RequestID; len(reqID) != 0 {
+ awsmiddleware.SetRequestIDMetadata(metadata, reqID)
+ }
+ if len(errorComponents.Code) != 0 {
+ errorCode = errorComponents.Code
+ }
+ if len(errorComponents.Message) != 0 {
+ errorMessage = errorComponents.Message
+ }
+ errorBody.Seek(0, io.SeekStart)
+ switch {
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+type awsRestxml_deserializeOpPutBucketLifecycleConfiguration struct {
+}
+
+func (*awsRestxml_deserializeOpPutBucketLifecycleConfiguration) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsRestxml_deserializeOpPutBucketLifecycleConfiguration) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsRestxml_deserializeOpErrorPutBucketLifecycleConfiguration(response, &metadata)
+ }
+ output := &PutBucketLifecycleConfigurationOutput{}
+ out.Result = output
+
+ if _, err = io.Copy(ioutil.Discard, response.Body); err != nil {
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to discard response body, %w", err),
+ }
+ }
+
+ return out, metadata, err
+}
+
+func awsRestxml_deserializeOpErrorPutBucketLifecycleConfiguration(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{
+ UseStatusCode: true, StatusCode: response.StatusCode,
+ })
+ if err != nil {
+ return err
+ }
+ if hostID := errorComponents.HostID; len(hostID) != 0 {
+ s3shared.SetHostIDMetadata(metadata, hostID)
+ }
+ if reqID := errorComponents.RequestID; len(reqID) != 0 {
+ awsmiddleware.SetRequestIDMetadata(metadata, reqID)
+ }
+ if len(errorComponents.Code) != 0 {
+ errorCode = errorComponents.Code
+ }
+ if len(errorComponents.Message) != 0 {
+ errorMessage = errorComponents.Message
+ }
+ errorBody.Seek(0, io.SeekStart)
+ switch {
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+type awsRestxml_deserializeOpPutBucketLogging struct {
+}
+
+func (*awsRestxml_deserializeOpPutBucketLogging) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsRestxml_deserializeOpPutBucketLogging) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsRestxml_deserializeOpErrorPutBucketLogging(response, &metadata)
+ }
+ output := &PutBucketLoggingOutput{}
+ out.Result = output
+
+ if _, err = io.Copy(ioutil.Discard, response.Body); err != nil {
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to discard response body, %w", err),
+ }
+ }
+
+ return out, metadata, err
+}
+
+func awsRestxml_deserializeOpErrorPutBucketLogging(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{
+ UseStatusCode: true, StatusCode: response.StatusCode,
+ })
+ if err != nil {
+ return err
+ }
+ if hostID := errorComponents.HostID; len(hostID) != 0 {
+ s3shared.SetHostIDMetadata(metadata, hostID)
+ }
+ if reqID := errorComponents.RequestID; len(reqID) != 0 {
+ awsmiddleware.SetRequestIDMetadata(metadata, reqID)
+ }
+ if len(errorComponents.Code) != 0 {
+ errorCode = errorComponents.Code
+ }
+ if len(errorComponents.Message) != 0 {
+ errorMessage = errorComponents.Message
+ }
+ errorBody.Seek(0, io.SeekStart)
+ switch {
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+type awsRestxml_deserializeOpPutBucketMetricsConfiguration struct {
+}
+
+func (*awsRestxml_deserializeOpPutBucketMetricsConfiguration) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsRestxml_deserializeOpPutBucketMetricsConfiguration) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsRestxml_deserializeOpErrorPutBucketMetricsConfiguration(response, &metadata)
+ }
+ output := &PutBucketMetricsConfigurationOutput{}
+ out.Result = output
+
+ if _, err = io.Copy(ioutil.Discard, response.Body); err != nil {
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to discard response body, %w", err),
+ }
+ }
+
+ return out, metadata, err
+}
+
+func awsRestxml_deserializeOpErrorPutBucketMetricsConfiguration(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{
+ UseStatusCode: true, StatusCode: response.StatusCode,
+ })
+ if err != nil {
+ return err
+ }
+ if hostID := errorComponents.HostID; len(hostID) != 0 {
+ s3shared.SetHostIDMetadata(metadata, hostID)
+ }
+ if reqID := errorComponents.RequestID; len(reqID) != 0 {
+ awsmiddleware.SetRequestIDMetadata(metadata, reqID)
+ }
+ if len(errorComponents.Code) != 0 {
+ errorCode = errorComponents.Code
+ }
+ if len(errorComponents.Message) != 0 {
+ errorMessage = errorComponents.Message
+ }
+ errorBody.Seek(0, io.SeekStart)
+ switch {
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+type awsRestxml_deserializeOpPutBucketNotificationConfiguration struct {
+}
+
+func (*awsRestxml_deserializeOpPutBucketNotificationConfiguration) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsRestxml_deserializeOpPutBucketNotificationConfiguration) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsRestxml_deserializeOpErrorPutBucketNotificationConfiguration(response, &metadata)
+ }
+ output := &PutBucketNotificationConfigurationOutput{}
+ out.Result = output
+
+ if _, err = io.Copy(ioutil.Discard, response.Body); err != nil {
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to discard response body, %w", err),
+ }
+ }
+
+ return out, metadata, err
+}
+
+func awsRestxml_deserializeOpErrorPutBucketNotificationConfiguration(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{
+ UseStatusCode: true, StatusCode: response.StatusCode,
+ })
+ if err != nil {
+ return err
+ }
+ if hostID := errorComponents.HostID; len(hostID) != 0 {
+ s3shared.SetHostIDMetadata(metadata, hostID)
+ }
+ if reqID := errorComponents.RequestID; len(reqID) != 0 {
+ awsmiddleware.SetRequestIDMetadata(metadata, reqID)
+ }
+ if len(errorComponents.Code) != 0 {
+ errorCode = errorComponents.Code
+ }
+ if len(errorComponents.Message) != 0 {
+ errorMessage = errorComponents.Message
+ }
+ errorBody.Seek(0, io.SeekStart)
+ switch {
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+type awsRestxml_deserializeOpPutBucketOwnershipControls struct {
+}
+
+func (*awsRestxml_deserializeOpPutBucketOwnershipControls) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsRestxml_deserializeOpPutBucketOwnershipControls) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsRestxml_deserializeOpErrorPutBucketOwnershipControls(response, &metadata)
+ }
+ output := &PutBucketOwnershipControlsOutput{}
+ out.Result = output
+
+ if _, err = io.Copy(ioutil.Discard, response.Body); err != nil {
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to discard response body, %w", err),
+ }
+ }
+
+ return out, metadata, err
+}
+
+func awsRestxml_deserializeOpErrorPutBucketOwnershipControls(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{
+ UseStatusCode: true, StatusCode: response.StatusCode,
+ })
+ if err != nil {
+ return err
+ }
+ if hostID := errorComponents.HostID; len(hostID) != 0 {
+ s3shared.SetHostIDMetadata(metadata, hostID)
+ }
+ if reqID := errorComponents.RequestID; len(reqID) != 0 {
+ awsmiddleware.SetRequestIDMetadata(metadata, reqID)
+ }
+ if len(errorComponents.Code) != 0 {
+ errorCode = errorComponents.Code
+ }
+ if len(errorComponents.Message) != 0 {
+ errorMessage = errorComponents.Message
+ }
+ errorBody.Seek(0, io.SeekStart)
+ switch {
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+type awsRestxml_deserializeOpPutBucketPolicy struct {
+}
+
+func (*awsRestxml_deserializeOpPutBucketPolicy) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsRestxml_deserializeOpPutBucketPolicy) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsRestxml_deserializeOpErrorPutBucketPolicy(response, &metadata)
+ }
+ output := &PutBucketPolicyOutput{}
+ out.Result = output
+
+ if _, err = io.Copy(ioutil.Discard, response.Body); err != nil {
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to discard response body, %w", err),
+ }
+ }
+
+ return out, metadata, err
+}
+
+func awsRestxml_deserializeOpErrorPutBucketPolicy(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{
+ UseStatusCode: true, StatusCode: response.StatusCode,
+ })
+ if err != nil {
+ return err
+ }
+ if hostID := errorComponents.HostID; len(hostID) != 0 {
+ s3shared.SetHostIDMetadata(metadata, hostID)
+ }
+ if reqID := errorComponents.RequestID; len(reqID) != 0 {
+ awsmiddleware.SetRequestIDMetadata(metadata, reqID)
+ }
+ if len(errorComponents.Code) != 0 {
+ errorCode = errorComponents.Code
+ }
+ if len(errorComponents.Message) != 0 {
+ errorMessage = errorComponents.Message
+ }
+ errorBody.Seek(0, io.SeekStart)
+ switch {
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+type awsRestxml_deserializeOpPutBucketReplication struct {
+}
+
+func (*awsRestxml_deserializeOpPutBucketReplication) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsRestxml_deserializeOpPutBucketReplication) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsRestxml_deserializeOpErrorPutBucketReplication(response, &metadata)
+ }
+ output := &PutBucketReplicationOutput{}
+ out.Result = output
+
+ if _, err = io.Copy(ioutil.Discard, response.Body); err != nil {
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to discard response body, %w", err),
+ }
+ }
+
+ return out, metadata, err
+}
+
+func awsRestxml_deserializeOpErrorPutBucketReplication(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{
+ UseStatusCode: true, StatusCode: response.StatusCode,
+ })
+ if err != nil {
+ return err
+ }
+ if hostID := errorComponents.HostID; len(hostID) != 0 {
+ s3shared.SetHostIDMetadata(metadata, hostID)
+ }
+ if reqID := errorComponents.RequestID; len(reqID) != 0 {
+ awsmiddleware.SetRequestIDMetadata(metadata, reqID)
+ }
+ if len(errorComponents.Code) != 0 {
+ errorCode = errorComponents.Code
+ }
+ if len(errorComponents.Message) != 0 {
+ errorMessage = errorComponents.Message
+ }
+ errorBody.Seek(0, io.SeekStart)
+ switch {
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+type awsRestxml_deserializeOpPutBucketRequestPayment struct {
+}
+
+func (*awsRestxml_deserializeOpPutBucketRequestPayment) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsRestxml_deserializeOpPutBucketRequestPayment) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsRestxml_deserializeOpErrorPutBucketRequestPayment(response, &metadata)
+ }
+ output := &PutBucketRequestPaymentOutput{}
+ out.Result = output
+
+ if _, err = io.Copy(ioutil.Discard, response.Body); err != nil {
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to discard response body, %w", err),
+ }
+ }
+
+ return out, metadata, err
+}
+
+func awsRestxml_deserializeOpErrorPutBucketRequestPayment(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{
+ UseStatusCode: true, StatusCode: response.StatusCode,
+ })
+ if err != nil {
+ return err
+ }
+ if hostID := errorComponents.HostID; len(hostID) != 0 {
+ s3shared.SetHostIDMetadata(metadata, hostID)
+ }
+ if reqID := errorComponents.RequestID; len(reqID) != 0 {
+ awsmiddleware.SetRequestIDMetadata(metadata, reqID)
+ }
+ if len(errorComponents.Code) != 0 {
+ errorCode = errorComponents.Code
+ }
+ if len(errorComponents.Message) != 0 {
+ errorMessage = errorComponents.Message
+ }
+ errorBody.Seek(0, io.SeekStart)
+ switch {
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+type awsRestxml_deserializeOpPutBucketTagging struct {
+}
+
+func (*awsRestxml_deserializeOpPutBucketTagging) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsRestxml_deserializeOpPutBucketTagging) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsRestxml_deserializeOpErrorPutBucketTagging(response, &metadata)
+ }
+ output := &PutBucketTaggingOutput{}
+ out.Result = output
+
+ if _, err = io.Copy(ioutil.Discard, response.Body); err != nil {
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to discard response body, %w", err),
+ }
+ }
+
+ return out, metadata, err
+}
+
+func awsRestxml_deserializeOpErrorPutBucketTagging(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{
+ UseStatusCode: true, StatusCode: response.StatusCode,
+ })
+ if err != nil {
+ return err
+ }
+ if hostID := errorComponents.HostID; len(hostID) != 0 {
+ s3shared.SetHostIDMetadata(metadata, hostID)
+ }
+ if reqID := errorComponents.RequestID; len(reqID) != 0 {
+ awsmiddleware.SetRequestIDMetadata(metadata, reqID)
+ }
+ if len(errorComponents.Code) != 0 {
+ errorCode = errorComponents.Code
+ }
+ if len(errorComponents.Message) != 0 {
+ errorMessage = errorComponents.Message
+ }
+ errorBody.Seek(0, io.SeekStart)
+ switch {
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+type awsRestxml_deserializeOpPutBucketVersioning struct {
+}
+
+func (*awsRestxml_deserializeOpPutBucketVersioning) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsRestxml_deserializeOpPutBucketVersioning) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsRestxml_deserializeOpErrorPutBucketVersioning(response, &metadata)
+ }
+ output := &PutBucketVersioningOutput{}
+ out.Result = output
+
+ if _, err = io.Copy(ioutil.Discard, response.Body); err != nil {
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to discard response body, %w", err),
+ }
+ }
+
+ return out, metadata, err
+}
+
+func awsRestxml_deserializeOpErrorPutBucketVersioning(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{
+ UseStatusCode: true, StatusCode: response.StatusCode,
+ })
+ if err != nil {
+ return err
+ }
+ if hostID := errorComponents.HostID; len(hostID) != 0 {
+ s3shared.SetHostIDMetadata(metadata, hostID)
+ }
+ if reqID := errorComponents.RequestID; len(reqID) != 0 {
+ awsmiddleware.SetRequestIDMetadata(metadata, reqID)
+ }
+ if len(errorComponents.Code) != 0 {
+ errorCode = errorComponents.Code
+ }
+ if len(errorComponents.Message) != 0 {
+ errorMessage = errorComponents.Message
+ }
+ errorBody.Seek(0, io.SeekStart)
+ switch {
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+type awsRestxml_deserializeOpPutBucketWebsite struct {
+}
+
+func (*awsRestxml_deserializeOpPutBucketWebsite) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsRestxml_deserializeOpPutBucketWebsite) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsRestxml_deserializeOpErrorPutBucketWebsite(response, &metadata)
+ }
+ output := &PutBucketWebsiteOutput{}
+ out.Result = output
+
+ if _, err = io.Copy(ioutil.Discard, response.Body); err != nil {
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to discard response body, %w", err),
+ }
+ }
+
+ return out, metadata, err
+}
+
+func awsRestxml_deserializeOpErrorPutBucketWebsite(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{
+ UseStatusCode: true, StatusCode: response.StatusCode,
+ })
+ if err != nil {
+ return err
+ }
+ if hostID := errorComponents.HostID; len(hostID) != 0 {
+ s3shared.SetHostIDMetadata(metadata, hostID)
+ }
+ if reqID := errorComponents.RequestID; len(reqID) != 0 {
+ awsmiddleware.SetRequestIDMetadata(metadata, reqID)
+ }
+ if len(errorComponents.Code) != 0 {
+ errorCode = errorComponents.Code
+ }
+ if len(errorComponents.Message) != 0 {
+ errorMessage = errorComponents.Message
+ }
+ errorBody.Seek(0, io.SeekStart)
+ switch {
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+type awsRestxml_deserializeOpPutObject struct {
+}
+
+func (*awsRestxml_deserializeOpPutObject) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsRestxml_deserializeOpPutObject) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsRestxml_deserializeOpErrorPutObject(response, &metadata)
+ }
+ output := &PutObjectOutput{}
+ out.Result = output
+
+ err = awsRestxml_deserializeOpHttpBindingsPutObjectOutput(output, response)
+ if err != nil {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)}
+ }
+
+ return out, metadata, err
+}
+
+func awsRestxml_deserializeOpErrorPutObject(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{
+ UseStatusCode: true, StatusCode: response.StatusCode,
+ })
+ if err != nil {
+ return err
+ }
+ if hostID := errorComponents.HostID; len(hostID) != 0 {
+ s3shared.SetHostIDMetadata(metadata, hostID)
+ }
+ if reqID := errorComponents.RequestID; len(reqID) != 0 {
+ awsmiddleware.SetRequestIDMetadata(metadata, reqID)
+ }
+ if len(errorComponents.Code) != 0 {
+ errorCode = errorComponents.Code
+ }
+ if len(errorComponents.Message) != 0 {
+ errorMessage = errorComponents.Message
+ }
+ errorBody.Seek(0, io.SeekStart)
+ switch {
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+func awsRestxml_deserializeOpHttpBindingsPutObjectOutput(v *PutObjectOutput, response *smithyhttp.Response) error {
+ if v == nil {
+ return fmt.Errorf("unsupported deserialization for nil %T", v)
+ }
+
+ if headerValues := response.Header.Values("x-amz-server-side-encryption-bucket-key-enabled"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ vv, err := strconv.ParseBool(headerValues[0])
+ if err != nil {
+ return err
+ }
+ v.BucketKeyEnabled = vv
+ }
+
+ if headerValues := response.Header.Values("ETag"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.ETag = ptr.String(headerValues[0])
+ }
+
+ if headerValues := response.Header.Values("x-amz-expiration"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.Expiration = ptr.String(headerValues[0])
+ }
+
+ if headerValues := response.Header.Values("x-amz-request-charged"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.RequestCharged = types.RequestCharged(headerValues[0])
+ }
+
+ if headerValues := response.Header.Values("x-amz-server-side-encryption"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.ServerSideEncryption = types.ServerSideEncryption(headerValues[0])
+ }
+
+ if headerValues := response.Header.Values("x-amz-server-side-encryption-customer-algorithm"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.SSECustomerAlgorithm = ptr.String(headerValues[0])
+ }
+
+ if headerValues := response.Header.Values("x-amz-server-side-encryption-customer-key-MD5"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.SSECustomerKeyMD5 = ptr.String(headerValues[0])
+ }
+
+ if headerValues := response.Header.Values("x-amz-server-side-encryption-context"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.SSEKMSEncryptionContext = ptr.String(headerValues[0])
+ }
+
+ if headerValues := response.Header.Values("x-amz-server-side-encryption-aws-kms-key-id"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.SSEKMSKeyId = ptr.String(headerValues[0])
+ }
+
+ if headerValues := response.Header.Values("x-amz-version-id"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.VersionId = ptr.String(headerValues[0])
+ }
+
+ return nil
+}
+
+type awsRestxml_deserializeOpPutObjectAcl struct {
+}
+
+func (*awsRestxml_deserializeOpPutObjectAcl) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsRestxml_deserializeOpPutObjectAcl) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsRestxml_deserializeOpErrorPutObjectAcl(response, &metadata)
+ }
+ output := &PutObjectAclOutput{}
+ out.Result = output
+
+ err = awsRestxml_deserializeOpHttpBindingsPutObjectAclOutput(output, response)
+ if err != nil {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)}
+ }
+
+ return out, metadata, err
+}
+
+func awsRestxml_deserializeOpErrorPutObjectAcl(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{
+ UseStatusCode: true, StatusCode: response.StatusCode,
+ })
+ if err != nil {
+ return err
+ }
+ if hostID := errorComponents.HostID; len(hostID) != 0 {
+ s3shared.SetHostIDMetadata(metadata, hostID)
+ }
+ if reqID := errorComponents.RequestID; len(reqID) != 0 {
+ awsmiddleware.SetRequestIDMetadata(metadata, reqID)
+ }
+ if len(errorComponents.Code) != 0 {
+ errorCode = errorComponents.Code
+ }
+ if len(errorComponents.Message) != 0 {
+ errorMessage = errorComponents.Message
+ }
+ errorBody.Seek(0, io.SeekStart)
+ switch {
+ case strings.EqualFold("NoSuchKey", errorCode):
+ return awsRestxml_deserializeErrorNoSuchKey(response, errorBody)
+
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+func awsRestxml_deserializeOpHttpBindingsPutObjectAclOutput(v *PutObjectAclOutput, response *smithyhttp.Response) error {
+ if v == nil {
+ return fmt.Errorf("unsupported deserialization for nil %T", v)
+ }
+
+ if headerValues := response.Header.Values("x-amz-request-charged"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.RequestCharged = types.RequestCharged(headerValues[0])
+ }
+
+ return nil
+}
+
+type awsRestxml_deserializeOpPutObjectLegalHold struct {
+}
+
+func (*awsRestxml_deserializeOpPutObjectLegalHold) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsRestxml_deserializeOpPutObjectLegalHold) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsRestxml_deserializeOpErrorPutObjectLegalHold(response, &metadata)
+ }
+ output := &PutObjectLegalHoldOutput{}
+ out.Result = output
+
+ err = awsRestxml_deserializeOpHttpBindingsPutObjectLegalHoldOutput(output, response)
+ if err != nil {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)}
+ }
+
+ return out, metadata, err
+}
+
+func awsRestxml_deserializeOpErrorPutObjectLegalHold(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{
+ UseStatusCode: true, StatusCode: response.StatusCode,
+ })
+ if err != nil {
+ return err
+ }
+ if hostID := errorComponents.HostID; len(hostID) != 0 {
+ s3shared.SetHostIDMetadata(metadata, hostID)
+ }
+ if reqID := errorComponents.RequestID; len(reqID) != 0 {
+ awsmiddleware.SetRequestIDMetadata(metadata, reqID)
+ }
+ if len(errorComponents.Code) != 0 {
+ errorCode = errorComponents.Code
+ }
+ if len(errorComponents.Message) != 0 {
+ errorMessage = errorComponents.Message
+ }
+ errorBody.Seek(0, io.SeekStart)
+ switch {
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+func awsRestxml_deserializeOpHttpBindingsPutObjectLegalHoldOutput(v *PutObjectLegalHoldOutput, response *smithyhttp.Response) error {
+ if v == nil {
+ return fmt.Errorf("unsupported deserialization for nil %T", v)
+ }
+
+ if headerValues := response.Header.Values("x-amz-request-charged"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.RequestCharged = types.RequestCharged(headerValues[0])
+ }
+
+ return nil
+}
+
+type awsRestxml_deserializeOpPutObjectLockConfiguration struct {
+}
+
+func (*awsRestxml_deserializeOpPutObjectLockConfiguration) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsRestxml_deserializeOpPutObjectLockConfiguration) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsRestxml_deserializeOpErrorPutObjectLockConfiguration(response, &metadata)
+ }
+ output := &PutObjectLockConfigurationOutput{}
+ out.Result = output
+
+ err = awsRestxml_deserializeOpHttpBindingsPutObjectLockConfigurationOutput(output, response)
+ if err != nil {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)}
+ }
+
+ return out, metadata, err
+}
+
+func awsRestxml_deserializeOpErrorPutObjectLockConfiguration(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{
+ UseStatusCode: true, StatusCode: response.StatusCode,
+ })
+ if err != nil {
+ return err
+ }
+ if hostID := errorComponents.HostID; len(hostID) != 0 {
+ s3shared.SetHostIDMetadata(metadata, hostID)
+ }
+ if reqID := errorComponents.RequestID; len(reqID) != 0 {
+ awsmiddleware.SetRequestIDMetadata(metadata, reqID)
+ }
+ if len(errorComponents.Code) != 0 {
+ errorCode = errorComponents.Code
+ }
+ if len(errorComponents.Message) != 0 {
+ errorMessage = errorComponents.Message
+ }
+ errorBody.Seek(0, io.SeekStart)
+ switch {
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+func awsRestxml_deserializeOpHttpBindingsPutObjectLockConfigurationOutput(v *PutObjectLockConfigurationOutput, response *smithyhttp.Response) error {
+ if v == nil {
+ return fmt.Errorf("unsupported deserialization for nil %T", v)
+ }
+
+ if headerValues := response.Header.Values("x-amz-request-charged"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.RequestCharged = types.RequestCharged(headerValues[0])
+ }
+
+ return nil
+}
+
+type awsRestxml_deserializeOpPutObjectRetention struct {
+}
+
+func (*awsRestxml_deserializeOpPutObjectRetention) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsRestxml_deserializeOpPutObjectRetention) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsRestxml_deserializeOpErrorPutObjectRetention(response, &metadata)
+ }
+ output := &PutObjectRetentionOutput{}
+ out.Result = output
+
+ err = awsRestxml_deserializeOpHttpBindingsPutObjectRetentionOutput(output, response)
+ if err != nil {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)}
+ }
+
+ return out, metadata, err
+}
+
+func awsRestxml_deserializeOpErrorPutObjectRetention(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{
+ UseStatusCode: true, StatusCode: response.StatusCode,
+ })
+ if err != nil {
+ return err
+ }
+ if hostID := errorComponents.HostID; len(hostID) != 0 {
+ s3shared.SetHostIDMetadata(metadata, hostID)
+ }
+ if reqID := errorComponents.RequestID; len(reqID) != 0 {
+ awsmiddleware.SetRequestIDMetadata(metadata, reqID)
+ }
+ if len(errorComponents.Code) != 0 {
+ errorCode = errorComponents.Code
+ }
+ if len(errorComponents.Message) != 0 {
+ errorMessage = errorComponents.Message
+ }
+ errorBody.Seek(0, io.SeekStart)
+ switch {
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+func awsRestxml_deserializeOpHttpBindingsPutObjectRetentionOutput(v *PutObjectRetentionOutput, response *smithyhttp.Response) error {
+ if v == nil {
+ return fmt.Errorf("unsupported deserialization for nil %T", v)
+ }
+
+ if headerValues := response.Header.Values("x-amz-request-charged"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.RequestCharged = types.RequestCharged(headerValues[0])
+ }
+
+ return nil
+}
+
+type awsRestxml_deserializeOpPutObjectTagging struct {
+}
+
+func (*awsRestxml_deserializeOpPutObjectTagging) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsRestxml_deserializeOpPutObjectTagging) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsRestxml_deserializeOpErrorPutObjectTagging(response, &metadata)
+ }
+ output := &PutObjectTaggingOutput{}
+ out.Result = output
+
+ err = awsRestxml_deserializeOpHttpBindingsPutObjectTaggingOutput(output, response)
+ if err != nil {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)}
+ }
+
+ return out, metadata, err
+}
+
+func awsRestxml_deserializeOpErrorPutObjectTagging(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{
+ UseStatusCode: true, StatusCode: response.StatusCode,
+ })
+ if err != nil {
+ return err
+ }
+ if hostID := errorComponents.HostID; len(hostID) != 0 {
+ s3shared.SetHostIDMetadata(metadata, hostID)
+ }
+ if reqID := errorComponents.RequestID; len(reqID) != 0 {
+ awsmiddleware.SetRequestIDMetadata(metadata, reqID)
+ }
+ if len(errorComponents.Code) != 0 {
+ errorCode = errorComponents.Code
+ }
+ if len(errorComponents.Message) != 0 {
+ errorMessage = errorComponents.Message
+ }
+ errorBody.Seek(0, io.SeekStart)
+ switch {
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+func awsRestxml_deserializeOpHttpBindingsPutObjectTaggingOutput(v *PutObjectTaggingOutput, response *smithyhttp.Response) error {
+ if v == nil {
+ return fmt.Errorf("unsupported deserialization for nil %T", v)
+ }
+
+ if headerValues := response.Header.Values("x-amz-version-id"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.VersionId = ptr.String(headerValues[0])
+ }
+
+ return nil
+}
+
+type awsRestxml_deserializeOpPutPublicAccessBlock struct {
+}
+
+func (*awsRestxml_deserializeOpPutPublicAccessBlock) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsRestxml_deserializeOpPutPublicAccessBlock) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsRestxml_deserializeOpErrorPutPublicAccessBlock(response, &metadata)
+ }
+ output := &PutPublicAccessBlockOutput{}
+ out.Result = output
+
+ if _, err = io.Copy(ioutil.Discard, response.Body); err != nil {
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to discard response body, %w", err),
+ }
+ }
+
+ return out, metadata, err
+}
+
+func awsRestxml_deserializeOpErrorPutPublicAccessBlock(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{
+ UseStatusCode: true, StatusCode: response.StatusCode,
+ })
+ if err != nil {
+ return err
+ }
+ if hostID := errorComponents.HostID; len(hostID) != 0 {
+ s3shared.SetHostIDMetadata(metadata, hostID)
+ }
+ if reqID := errorComponents.RequestID; len(reqID) != 0 {
+ awsmiddleware.SetRequestIDMetadata(metadata, reqID)
+ }
+ if len(errorComponents.Code) != 0 {
+ errorCode = errorComponents.Code
+ }
+ if len(errorComponents.Message) != 0 {
+ errorMessage = errorComponents.Message
+ }
+ errorBody.Seek(0, io.SeekStart)
+ switch {
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+type awsRestxml_deserializeOpRestoreObject struct {
+}
+
+func (*awsRestxml_deserializeOpRestoreObject) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsRestxml_deserializeOpRestoreObject) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsRestxml_deserializeOpErrorRestoreObject(response, &metadata)
+ }
+ output := &RestoreObjectOutput{}
+ out.Result = output
+
+ err = awsRestxml_deserializeOpHttpBindingsRestoreObjectOutput(output, response)
+ if err != nil {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)}
+ }
+
+ return out, metadata, err
+}
+
+func awsRestxml_deserializeOpErrorRestoreObject(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{
+ UseStatusCode: true, StatusCode: response.StatusCode,
+ })
+ if err != nil {
+ return err
+ }
+ if hostID := errorComponents.HostID; len(hostID) != 0 {
+ s3shared.SetHostIDMetadata(metadata, hostID)
+ }
+ if reqID := errorComponents.RequestID; len(reqID) != 0 {
+ awsmiddleware.SetRequestIDMetadata(metadata, reqID)
+ }
+ if len(errorComponents.Code) != 0 {
+ errorCode = errorComponents.Code
+ }
+ if len(errorComponents.Message) != 0 {
+ errorMessage = errorComponents.Message
+ }
+ errorBody.Seek(0, io.SeekStart)
+ switch {
+ case strings.EqualFold("ObjectAlreadyInActiveTierError", errorCode):
+ return awsRestxml_deserializeErrorObjectAlreadyInActiveTierError(response, errorBody)
+
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+func awsRestxml_deserializeOpHttpBindingsRestoreObjectOutput(v *RestoreObjectOutput, response *smithyhttp.Response) error {
+ if v == nil {
+ return fmt.Errorf("unsupported deserialization for nil %T", v)
+ }
+
+ if headerValues := response.Header.Values("x-amz-request-charged"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.RequestCharged = types.RequestCharged(headerValues[0])
+ }
+
+ if headerValues := response.Header.Values("x-amz-restore-output-path"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.RestoreOutputPath = ptr.String(headerValues[0])
+ }
+
+ return nil
+}
+
+type awsRestxml_deserializeOpUploadPart struct {
+}
+
+func (*awsRestxml_deserializeOpUploadPart) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsRestxml_deserializeOpUploadPart) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsRestxml_deserializeOpErrorUploadPart(response, &metadata)
+ }
+ output := &UploadPartOutput{}
+ out.Result = output
+
+ err = awsRestxml_deserializeOpHttpBindingsUploadPartOutput(output, response)
+ if err != nil {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)}
+ }
+
+ return out, metadata, err
+}
+
+func awsRestxml_deserializeOpErrorUploadPart(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{
+ UseStatusCode: true, StatusCode: response.StatusCode,
+ })
+ if err != nil {
+ return err
+ }
+ if hostID := errorComponents.HostID; len(hostID) != 0 {
+ s3shared.SetHostIDMetadata(metadata, hostID)
+ }
+ if reqID := errorComponents.RequestID; len(reqID) != 0 {
+ awsmiddleware.SetRequestIDMetadata(metadata, reqID)
+ }
+ if len(errorComponents.Code) != 0 {
+ errorCode = errorComponents.Code
+ }
+ if len(errorComponents.Message) != 0 {
+ errorMessage = errorComponents.Message
+ }
+ errorBody.Seek(0, io.SeekStart)
+ switch {
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+func awsRestxml_deserializeOpHttpBindingsUploadPartOutput(v *UploadPartOutput, response *smithyhttp.Response) error {
+ if v == nil {
+ return fmt.Errorf("unsupported deserialization for nil %T", v)
+ }
+
+ if headerValues := response.Header.Values("x-amz-server-side-encryption-bucket-key-enabled"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ vv, err := strconv.ParseBool(headerValues[0])
+ if err != nil {
+ return err
+ }
+ v.BucketKeyEnabled = vv
+ }
+
+ if headerValues := response.Header.Values("ETag"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.ETag = ptr.String(headerValues[0])
+ }
+
+ if headerValues := response.Header.Values("x-amz-request-charged"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.RequestCharged = types.RequestCharged(headerValues[0])
+ }
+
+ if headerValues := response.Header.Values("x-amz-server-side-encryption"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.ServerSideEncryption = types.ServerSideEncryption(headerValues[0])
+ }
+
+ if headerValues := response.Header.Values("x-amz-server-side-encryption-customer-algorithm"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.SSECustomerAlgorithm = ptr.String(headerValues[0])
+ }
+
+ if headerValues := response.Header.Values("x-amz-server-side-encryption-customer-key-MD5"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.SSECustomerKeyMD5 = ptr.String(headerValues[0])
+ }
+
+ if headerValues := response.Header.Values("x-amz-server-side-encryption-aws-kms-key-id"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.SSEKMSKeyId = ptr.String(headerValues[0])
+ }
+
+ return nil
+}
+
+type awsRestxml_deserializeOpUploadPartCopy struct {
+}
+
+func (*awsRestxml_deserializeOpUploadPartCopy) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsRestxml_deserializeOpUploadPartCopy) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsRestxml_deserializeOpErrorUploadPartCopy(response, &metadata)
+ }
+ output := &UploadPartCopyOutput{}
+ out.Result = output
+
+ err = awsRestxml_deserializeOpHttpBindingsUploadPartCopyOutput(output, response)
+ if err != nil {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)}
+ }
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+ body := io.TeeReader(response.Body, ringBuffer)
+ rootDecoder := xml.NewDecoder(body)
+ t, err := smithyxml.FetchRootElement(rootDecoder)
+ if err == io.EOF {
+ return out, metadata, nil
+ }
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ decoder := smithyxml.WrapNodeDecoder(rootDecoder, t)
+ err = awsRestxml_deserializeDocumentCopyPartResult(&output.CopyPartResult, decoder)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ return out, metadata, err
+}
+
+func awsRestxml_deserializeOpErrorUploadPartCopy(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{
+ UseStatusCode: true, StatusCode: response.StatusCode,
+ })
+ if err != nil {
+ return err
+ }
+ if hostID := errorComponents.HostID; len(hostID) != 0 {
+ s3shared.SetHostIDMetadata(metadata, hostID)
+ }
+ if reqID := errorComponents.RequestID; len(reqID) != 0 {
+ awsmiddleware.SetRequestIDMetadata(metadata, reqID)
+ }
+ if len(errorComponents.Code) != 0 {
+ errorCode = errorComponents.Code
+ }
+ if len(errorComponents.Message) != 0 {
+ errorMessage = errorComponents.Message
+ }
+ errorBody.Seek(0, io.SeekStart)
+ switch {
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+func awsRestxml_deserializeOpHttpBindingsUploadPartCopyOutput(v *UploadPartCopyOutput, response *smithyhttp.Response) error {
+ if v == nil {
+ return fmt.Errorf("unsupported deserialization for nil %T", v)
+ }
+
+ if headerValues := response.Header.Values("x-amz-server-side-encryption-bucket-key-enabled"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ vv, err := strconv.ParseBool(headerValues[0])
+ if err != nil {
+ return err
+ }
+ v.BucketKeyEnabled = vv
+ }
+
+ if headerValues := response.Header.Values("x-amz-copy-source-version-id"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.CopySourceVersionId = ptr.String(headerValues[0])
+ }
+
+ if headerValues := response.Header.Values("x-amz-request-charged"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.RequestCharged = types.RequestCharged(headerValues[0])
+ }
+
+ if headerValues := response.Header.Values("x-amz-server-side-encryption"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.ServerSideEncryption = types.ServerSideEncryption(headerValues[0])
+ }
+
+ if headerValues := response.Header.Values("x-amz-server-side-encryption-customer-algorithm"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.SSECustomerAlgorithm = ptr.String(headerValues[0])
+ }
+
+ if headerValues := response.Header.Values("x-amz-server-side-encryption-customer-key-MD5"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.SSECustomerKeyMD5 = ptr.String(headerValues[0])
+ }
+
+ if headerValues := response.Header.Values("x-amz-server-side-encryption-aws-kms-key-id"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.SSEKMSKeyId = ptr.String(headerValues[0])
+ }
+
+ return nil
+}
+func awsRestxml_deserializeOpDocumentUploadPartCopyOutput(v **UploadPartCopyOutput, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *UploadPartCopyOutput
+ if *v == nil {
+ sv = &UploadPartCopyOutput{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("CopyPartResult", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentCopyPartResult(&sv.CopyPartResult, nodeDecoder); err != nil {
+ return err
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeErrorBucketAlreadyExists(response *smithyhttp.Response, errorBody *bytes.Reader) error {
+ output := &types.BucketAlreadyExists{}
+ return output
+}
+
+func awsRestxml_deserializeErrorBucketAlreadyOwnedByYou(response *smithyhttp.Response, errorBody *bytes.Reader) error {
+ output := &types.BucketAlreadyOwnedByYou{}
+ return output
+}
+
+func awsRestxml_deserializeErrorInvalidObjectState(response *smithyhttp.Response, errorBody *bytes.Reader) error {
+ output := &types.InvalidObjectState{}
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+ body := io.TeeReader(errorBody, ringBuffer)
+ rootDecoder := xml.NewDecoder(body)
+ t, err := smithyxml.FetchRootElement(rootDecoder)
+ if err == io.EOF {
+ return output
+ }
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ decoder := smithyxml.WrapNodeDecoder(rootDecoder, t)
+ err = awsRestxml_deserializeDocumentInvalidObjectState(&output, decoder)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ return output
+}
+
+func awsRestxml_deserializeErrorNoSuchBucket(response *smithyhttp.Response, errorBody *bytes.Reader) error {
+ output := &types.NoSuchBucket{}
+ return output
+}
+
+func awsRestxml_deserializeErrorNoSuchKey(response *smithyhttp.Response, errorBody *bytes.Reader) error {
+ output := &types.NoSuchKey{}
+ return output
+}
+
+func awsRestxml_deserializeErrorNoSuchUpload(response *smithyhttp.Response, errorBody *bytes.Reader) error {
+ output := &types.NoSuchUpload{}
+ return output
+}
+
+func awsRestxml_deserializeErrorNotFound(response *smithyhttp.Response, errorBody *bytes.Reader) error {
+ output := &types.NotFound{}
+ return output
+}
+
+func awsRestxml_deserializeErrorObjectAlreadyInActiveTierError(response *smithyhttp.Response, errorBody *bytes.Reader) error {
+ output := &types.ObjectAlreadyInActiveTierError{}
+ return output
+}
+
+func awsRestxml_deserializeErrorObjectNotInActiveTierError(response *smithyhttp.Response, errorBody *bytes.Reader) error {
+ output := &types.ObjectNotInActiveTierError{}
+ return output
+}
+
+func awsRestxml_deserializeDocumentAbortIncompleteMultipartUpload(v **types.AbortIncompleteMultipartUpload, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *types.AbortIncompleteMultipartUpload
+ if *v == nil {
+ sv = &types.AbortIncompleteMultipartUpload{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("DaysAfterInitiation", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ i64, err := strconv.ParseInt(xtv, 10, 64)
+ if err != nil {
+ return err
+ }
+ sv.DaysAfterInitiation = int32(i64)
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentAccessControlTranslation(v **types.AccessControlTranslation, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *types.AccessControlTranslation
+ if *v == nil {
+ sv = &types.AccessControlTranslation{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("Owner", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.Owner = types.OwnerOverride(xtv)
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentAllowedHeaders(v *[]string, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv []string
+ if *v == nil {
+ sv = make([]string, 0)
+ } else {
+ sv = *v
+ }
+
+ originalDecoder := decoder
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ memberDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ decoder = memberDecoder
+ switch {
+ case strings.EqualFold("member", t.Name.Local):
+ var col string
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ col = xtv
+ }
+ sv = append(sv, col)
+
+ default:
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentAllowedHeadersUnwrapped(v *[]string, decoder smithyxml.NodeDecoder) error {
+ var sv []string
+ if *v == nil {
+ sv = make([]string, 0)
+ } else {
+ sv = *v
+ }
+
+ switch {
+ default:
+ var mv string
+ t := decoder.StartEl
+ _ = t
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ mv = xtv
+ }
+ sv = append(sv, mv)
+ }
+ *v = sv
+ return nil
+}
+func awsRestxml_deserializeDocumentAllowedMethods(v *[]string, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv []string
+ if *v == nil {
+ sv = make([]string, 0)
+ } else {
+ sv = *v
+ }
+
+ originalDecoder := decoder
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ memberDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ decoder = memberDecoder
+ switch {
+ case strings.EqualFold("member", t.Name.Local):
+ var col string
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ col = xtv
+ }
+ sv = append(sv, col)
+
+ default:
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentAllowedMethodsUnwrapped(v *[]string, decoder smithyxml.NodeDecoder) error {
+ var sv []string
+ if *v == nil {
+ sv = make([]string, 0)
+ } else {
+ sv = *v
+ }
+
+ switch {
+ default:
+ var mv string
+ t := decoder.StartEl
+ _ = t
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ mv = xtv
+ }
+ sv = append(sv, mv)
+ }
+ *v = sv
+ return nil
+}
+func awsRestxml_deserializeDocumentAllowedOrigins(v *[]string, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv []string
+ if *v == nil {
+ sv = make([]string, 0)
+ } else {
+ sv = *v
+ }
+
+ originalDecoder := decoder
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ memberDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ decoder = memberDecoder
+ switch {
+ case strings.EqualFold("member", t.Name.Local):
+ var col string
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ col = xtv
+ }
+ sv = append(sv, col)
+
+ default:
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentAllowedOriginsUnwrapped(v *[]string, decoder smithyxml.NodeDecoder) error {
+ var sv []string
+ if *v == nil {
+ sv = make([]string, 0)
+ } else {
+ sv = *v
+ }
+
+ switch {
+ default:
+ var mv string
+ t := decoder.StartEl
+ _ = t
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ mv = xtv
+ }
+ sv = append(sv, mv)
+ }
+ *v = sv
+ return nil
+}
+func awsRestxml_deserializeDocumentAnalyticsAndOperator(v **types.AnalyticsAndOperator, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *types.AnalyticsAndOperator
+ if *v == nil {
+ sv = &types.AnalyticsAndOperator{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("Prefix", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.Prefix = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("Tag", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentTagSetUnwrapped(&sv.Tags, nodeDecoder); err != nil {
+ return err
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentAnalyticsConfiguration(v **types.AnalyticsConfiguration, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *types.AnalyticsConfiguration
+ if *v == nil {
+ sv = &types.AnalyticsConfiguration{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("Filter", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentAnalyticsFilter(&sv.Filter, nodeDecoder); err != nil {
+ return err
+ }
+
+ case strings.EqualFold("Id", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.Id = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("StorageClassAnalysis", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentStorageClassAnalysis(&sv.StorageClassAnalysis, nodeDecoder); err != nil {
+ return err
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentAnalyticsConfigurationList(v *[]types.AnalyticsConfiguration, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv []types.AnalyticsConfiguration
+ if *v == nil {
+ sv = make([]types.AnalyticsConfiguration, 0)
+ } else {
+ sv = *v
+ }
+
+ originalDecoder := decoder
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ switch {
+ case strings.EqualFold("member", t.Name.Local):
+ var col types.AnalyticsConfiguration
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ destAddr := &col
+ if err := awsRestxml_deserializeDocumentAnalyticsConfiguration(&destAddr, nodeDecoder); err != nil {
+ return err
+ }
+ col = *destAddr
+ sv = append(sv, col)
+
+ default:
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentAnalyticsConfigurationListUnwrapped(v *[]types.AnalyticsConfiguration, decoder smithyxml.NodeDecoder) error {
+ var sv []types.AnalyticsConfiguration
+ if *v == nil {
+ sv = make([]types.AnalyticsConfiguration, 0)
+ } else {
+ sv = *v
+ }
+
+ switch {
+ default:
+ var mv types.AnalyticsConfiguration
+ t := decoder.StartEl
+ _ = t
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ destAddr := &mv
+ if err := awsRestxml_deserializeDocumentAnalyticsConfiguration(&destAddr, nodeDecoder); err != nil {
+ return err
+ }
+ mv = *destAddr
+ sv = append(sv, mv)
+ }
+ *v = sv
+ return nil
+}
+func awsRestxml_deserializeDocumentAnalyticsExportDestination(v **types.AnalyticsExportDestination, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *types.AnalyticsExportDestination
+ if *v == nil {
+ sv = &types.AnalyticsExportDestination{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("S3BucketDestination", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentAnalyticsS3BucketDestination(&sv.S3BucketDestination, nodeDecoder); err != nil {
+ return err
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentAnalyticsFilter(v *types.AnalyticsFilter, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var uv types.AnalyticsFilter
+ var memberFound bool
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ if memberFound {
+ if err = decoder.Decoder.Skip(); err != nil {
+ return err
+ }
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("And", t.Name.Local):
+ var mv types.AnalyticsAndOperator
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ destAddr := &mv
+ if err := awsRestxml_deserializeDocumentAnalyticsAndOperator(&destAddr, nodeDecoder); err != nil {
+ return err
+ }
+ mv = *destAddr
+ uv = &types.AnalyticsFilterMemberAnd{Value: mv}
+ memberFound = true
+
+ case strings.EqualFold("Prefix", t.Name.Local):
+ var mv string
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ mv = xtv
+ }
+ uv = &types.AnalyticsFilterMemberPrefix{Value: mv}
+ memberFound = true
+
+ case strings.EqualFold("Tag", t.Name.Local):
+ var mv types.Tag
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ destAddr := &mv
+ if err := awsRestxml_deserializeDocumentTag(&destAddr, nodeDecoder); err != nil {
+ return err
+ }
+ mv = *destAddr
+ uv = &types.AnalyticsFilterMemberTag{Value: mv}
+ memberFound = true
+
+ default:
+ uv = &types.UnknownUnionMember{Tag: t.Name.Local}
+ memberFound = true
+
+ }
+ decoder = originalDecoder
+ }
+ *v = uv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentAnalyticsS3BucketDestination(v **types.AnalyticsS3BucketDestination, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *types.AnalyticsS3BucketDestination
+ if *v == nil {
+ sv = &types.AnalyticsS3BucketDestination{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("Bucket", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.Bucket = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("BucketAccountId", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.BucketAccountId = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("Format", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.Format = types.AnalyticsS3ExportFileFormat(xtv)
+ }
+
+ case strings.EqualFold("Prefix", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.Prefix = ptr.String(xtv)
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentBucket(v **types.Bucket, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *types.Bucket
+ if *v == nil {
+ sv = &types.Bucket{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("CreationDate", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ t, err := smithytime.ParseDateTime(xtv)
+ if err != nil {
+ return err
+ }
+ sv.CreationDate = ptr.Time(t)
+ }
+
+ case strings.EqualFold("Name", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.Name = ptr.String(xtv)
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentBucketAlreadyExists(v **types.BucketAlreadyExists, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *types.BucketAlreadyExists
+ if *v == nil {
+ sv = &types.BucketAlreadyExists{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentBucketAlreadyOwnedByYou(v **types.BucketAlreadyOwnedByYou, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *types.BucketAlreadyOwnedByYou
+ if *v == nil {
+ sv = &types.BucketAlreadyOwnedByYou{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentBuckets(v *[]types.Bucket, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv []types.Bucket
+ if *v == nil {
+ sv = make([]types.Bucket, 0)
+ } else {
+ sv = *v
+ }
+
+ originalDecoder := decoder
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ switch {
+ case strings.EqualFold("Bucket", t.Name.Local):
+ var col types.Bucket
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ destAddr := &col
+ if err := awsRestxml_deserializeDocumentBucket(&destAddr, nodeDecoder); err != nil {
+ return err
+ }
+ col = *destAddr
+ sv = append(sv, col)
+
+ default:
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentBucketsUnwrapped(v *[]types.Bucket, decoder smithyxml.NodeDecoder) error {
+ var sv []types.Bucket
+ if *v == nil {
+ sv = make([]types.Bucket, 0)
+ } else {
+ sv = *v
+ }
+
+ switch {
+ default:
+ var mv types.Bucket
+ t := decoder.StartEl
+ _ = t
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ destAddr := &mv
+ if err := awsRestxml_deserializeDocumentBucket(&destAddr, nodeDecoder); err != nil {
+ return err
+ }
+ mv = *destAddr
+ sv = append(sv, mv)
+ }
+ *v = sv
+ return nil
+}
+func awsRestxml_deserializeDocumentCommonPrefix(v **types.CommonPrefix, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *types.CommonPrefix
+ if *v == nil {
+ sv = &types.CommonPrefix{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("Prefix", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.Prefix = ptr.String(xtv)
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentCommonPrefixList(v *[]types.CommonPrefix, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv []types.CommonPrefix
+ if *v == nil {
+ sv = make([]types.CommonPrefix, 0)
+ } else {
+ sv = *v
+ }
+
+ originalDecoder := decoder
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ switch {
+ case strings.EqualFold("member", t.Name.Local):
+ var col types.CommonPrefix
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ destAddr := &col
+ if err := awsRestxml_deserializeDocumentCommonPrefix(&destAddr, nodeDecoder); err != nil {
+ return err
+ }
+ col = *destAddr
+ sv = append(sv, col)
+
+ default:
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentCommonPrefixListUnwrapped(v *[]types.CommonPrefix, decoder smithyxml.NodeDecoder) error {
+ var sv []types.CommonPrefix
+ if *v == nil {
+ sv = make([]types.CommonPrefix, 0)
+ } else {
+ sv = *v
+ }
+
+ switch {
+ default:
+ var mv types.CommonPrefix
+ t := decoder.StartEl
+ _ = t
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ destAddr := &mv
+ if err := awsRestxml_deserializeDocumentCommonPrefix(&destAddr, nodeDecoder); err != nil {
+ return err
+ }
+ mv = *destAddr
+ sv = append(sv, mv)
+ }
+ *v = sv
+ return nil
+}
+func awsRestxml_deserializeDocumentCondition(v **types.Condition, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *types.Condition
+ if *v == nil {
+ sv = &types.Condition{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("HttpErrorCodeReturnedEquals", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.HttpErrorCodeReturnedEquals = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("KeyPrefixEquals", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.KeyPrefixEquals = ptr.String(xtv)
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentCopyObjectResult(v **types.CopyObjectResult, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *types.CopyObjectResult
+ if *v == nil {
+ sv = &types.CopyObjectResult{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("ETag", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.ETag = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("LastModified", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ t, err := smithytime.ParseDateTime(xtv)
+ if err != nil {
+ return err
+ }
+ sv.LastModified = ptr.Time(t)
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentCopyPartResult(v **types.CopyPartResult, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *types.CopyPartResult
+ if *v == nil {
+ sv = &types.CopyPartResult{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("ETag", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.ETag = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("LastModified", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ t, err := smithytime.ParseDateTime(xtv)
+ if err != nil {
+ return err
+ }
+ sv.LastModified = ptr.Time(t)
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentCORSRule(v **types.CORSRule, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *types.CORSRule
+ if *v == nil {
+ sv = &types.CORSRule{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("AllowedHeader", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentAllowedHeadersUnwrapped(&sv.AllowedHeaders, nodeDecoder); err != nil {
+ return err
+ }
+
+ case strings.EqualFold("AllowedMethod", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentAllowedMethodsUnwrapped(&sv.AllowedMethods, nodeDecoder); err != nil {
+ return err
+ }
+
+ case strings.EqualFold("AllowedOrigin", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentAllowedOriginsUnwrapped(&sv.AllowedOrigins, nodeDecoder); err != nil {
+ return err
+ }
+
+ case strings.EqualFold("ExposeHeader", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentExposeHeadersUnwrapped(&sv.ExposeHeaders, nodeDecoder); err != nil {
+ return err
+ }
+
+ case strings.EqualFold("MaxAgeSeconds", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ i64, err := strconv.ParseInt(xtv, 10, 64)
+ if err != nil {
+ return err
+ }
+ sv.MaxAgeSeconds = int32(i64)
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentCORSRules(v *[]types.CORSRule, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv []types.CORSRule
+ if *v == nil {
+ sv = make([]types.CORSRule, 0)
+ } else {
+ sv = *v
+ }
+
+ originalDecoder := decoder
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ switch {
+ case strings.EqualFold("member", t.Name.Local):
+ var col types.CORSRule
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ destAddr := &col
+ if err := awsRestxml_deserializeDocumentCORSRule(&destAddr, nodeDecoder); err != nil {
+ return err
+ }
+ col = *destAddr
+ sv = append(sv, col)
+
+ default:
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentCORSRulesUnwrapped(v *[]types.CORSRule, decoder smithyxml.NodeDecoder) error {
+ var sv []types.CORSRule
+ if *v == nil {
+ sv = make([]types.CORSRule, 0)
+ } else {
+ sv = *v
+ }
+
+ switch {
+ default:
+ var mv types.CORSRule
+ t := decoder.StartEl
+ _ = t
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ destAddr := &mv
+ if err := awsRestxml_deserializeDocumentCORSRule(&destAddr, nodeDecoder); err != nil {
+ return err
+ }
+ mv = *destAddr
+ sv = append(sv, mv)
+ }
+ *v = sv
+ return nil
+}
+func awsRestxml_deserializeDocumentDefaultRetention(v **types.DefaultRetention, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *types.DefaultRetention
+ if *v == nil {
+ sv = &types.DefaultRetention{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("Days", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ i64, err := strconv.ParseInt(xtv, 10, 64)
+ if err != nil {
+ return err
+ }
+ sv.Days = int32(i64)
+ }
+
+ case strings.EqualFold("Mode", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.Mode = types.ObjectLockRetentionMode(xtv)
+ }
+
+ case strings.EqualFold("Years", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ i64, err := strconv.ParseInt(xtv, 10, 64)
+ if err != nil {
+ return err
+ }
+ sv.Years = int32(i64)
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentDeletedObject(v **types.DeletedObject, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *types.DeletedObject
+ if *v == nil {
+ sv = &types.DeletedObject{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("DeleteMarker", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv, err := strconv.ParseBool(string(val))
+ if err != nil {
+ return fmt.Errorf("expected DeleteMarker to be of type *bool, got %T instead", val)
+ }
+ sv.DeleteMarker = xtv
+ }
+
+ case strings.EqualFold("DeleteMarkerVersionId", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.DeleteMarkerVersionId = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("Key", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.Key = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("VersionId", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.VersionId = ptr.String(xtv)
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentDeletedObjects(v *[]types.DeletedObject, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv []types.DeletedObject
+ if *v == nil {
+ sv = make([]types.DeletedObject, 0)
+ } else {
+ sv = *v
+ }
+
+ originalDecoder := decoder
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ switch {
+ case strings.EqualFold("member", t.Name.Local):
+ var col types.DeletedObject
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ destAddr := &col
+ if err := awsRestxml_deserializeDocumentDeletedObject(&destAddr, nodeDecoder); err != nil {
+ return err
+ }
+ col = *destAddr
+ sv = append(sv, col)
+
+ default:
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentDeletedObjectsUnwrapped(v *[]types.DeletedObject, decoder smithyxml.NodeDecoder) error {
+ var sv []types.DeletedObject
+ if *v == nil {
+ sv = make([]types.DeletedObject, 0)
+ } else {
+ sv = *v
+ }
+
+ switch {
+ default:
+ var mv types.DeletedObject
+ t := decoder.StartEl
+ _ = t
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ destAddr := &mv
+ if err := awsRestxml_deserializeDocumentDeletedObject(&destAddr, nodeDecoder); err != nil {
+ return err
+ }
+ mv = *destAddr
+ sv = append(sv, mv)
+ }
+ *v = sv
+ return nil
+}
+func awsRestxml_deserializeDocumentDeleteMarkerEntry(v **types.DeleteMarkerEntry, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *types.DeleteMarkerEntry
+ if *v == nil {
+ sv = &types.DeleteMarkerEntry{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("IsLatest", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv, err := strconv.ParseBool(string(val))
+ if err != nil {
+ return fmt.Errorf("expected IsLatest to be of type *bool, got %T instead", val)
+ }
+ sv.IsLatest = xtv
+ }
+
+ case strings.EqualFold("Key", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.Key = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("LastModified", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ t, err := smithytime.ParseDateTime(xtv)
+ if err != nil {
+ return err
+ }
+ sv.LastModified = ptr.Time(t)
+ }
+
+ case strings.EqualFold("Owner", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentOwner(&sv.Owner, nodeDecoder); err != nil {
+ return err
+ }
+
+ case strings.EqualFold("VersionId", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.VersionId = ptr.String(xtv)
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentDeleteMarkerReplication(v **types.DeleteMarkerReplication, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *types.DeleteMarkerReplication
+ if *v == nil {
+ sv = &types.DeleteMarkerReplication{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("Status", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.Status = types.DeleteMarkerReplicationStatus(xtv)
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentDeleteMarkers(v *[]types.DeleteMarkerEntry, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv []types.DeleteMarkerEntry
+ if *v == nil {
+ sv = make([]types.DeleteMarkerEntry, 0)
+ } else {
+ sv = *v
+ }
+
+ originalDecoder := decoder
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ switch {
+ case strings.EqualFold("member", t.Name.Local):
+ var col types.DeleteMarkerEntry
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ destAddr := &col
+ if err := awsRestxml_deserializeDocumentDeleteMarkerEntry(&destAddr, nodeDecoder); err != nil {
+ return err
+ }
+ col = *destAddr
+ sv = append(sv, col)
+
+ default:
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentDeleteMarkersUnwrapped(v *[]types.DeleteMarkerEntry, decoder smithyxml.NodeDecoder) error {
+ var sv []types.DeleteMarkerEntry
+ if *v == nil {
+ sv = make([]types.DeleteMarkerEntry, 0)
+ } else {
+ sv = *v
+ }
+
+ switch {
+ default:
+ var mv types.DeleteMarkerEntry
+ t := decoder.StartEl
+ _ = t
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ destAddr := &mv
+ if err := awsRestxml_deserializeDocumentDeleteMarkerEntry(&destAddr, nodeDecoder); err != nil {
+ return err
+ }
+ mv = *destAddr
+ sv = append(sv, mv)
+ }
+ *v = sv
+ return nil
+}
+func awsRestxml_deserializeDocumentDestination(v **types.Destination, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *types.Destination
+ if *v == nil {
+ sv = &types.Destination{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("AccessControlTranslation", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentAccessControlTranslation(&sv.AccessControlTranslation, nodeDecoder); err != nil {
+ return err
+ }
+
+ case strings.EqualFold("Account", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.Account = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("Bucket", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.Bucket = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("EncryptionConfiguration", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentEncryptionConfiguration(&sv.EncryptionConfiguration, nodeDecoder); err != nil {
+ return err
+ }
+
+ case strings.EqualFold("Metrics", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentMetrics(&sv.Metrics, nodeDecoder); err != nil {
+ return err
+ }
+
+ case strings.EqualFold("ReplicationTime", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentReplicationTime(&sv.ReplicationTime, nodeDecoder); err != nil {
+ return err
+ }
+
+ case strings.EqualFold("StorageClass", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.StorageClass = types.StorageClass(xtv)
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentEncryptionConfiguration(v **types.EncryptionConfiguration, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *types.EncryptionConfiguration
+ if *v == nil {
+ sv = &types.EncryptionConfiguration{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("ReplicaKmsKeyID", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.ReplicaKmsKeyID = ptr.String(xtv)
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentError(v **types.Error, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *types.Error
+ if *v == nil {
+ sv = &types.Error{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("Code", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.Code = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("Key", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.Key = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("Message", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.Message = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("VersionId", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.VersionId = ptr.String(xtv)
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentErrorDocument(v **types.ErrorDocument, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *types.ErrorDocument
+ if *v == nil {
+ sv = &types.ErrorDocument{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("Key", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.Key = ptr.String(xtv)
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentErrors(v *[]types.Error, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv []types.Error
+ if *v == nil {
+ sv = make([]types.Error, 0)
+ } else {
+ sv = *v
+ }
+
+ originalDecoder := decoder
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ switch {
+ case strings.EqualFold("member", t.Name.Local):
+ var col types.Error
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ destAddr := &col
+ if err := awsRestxml_deserializeDocumentError(&destAddr, nodeDecoder); err != nil {
+ return err
+ }
+ col = *destAddr
+ sv = append(sv, col)
+
+ default:
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentErrorsUnwrapped(v *[]types.Error, decoder smithyxml.NodeDecoder) error {
+ var sv []types.Error
+ if *v == nil {
+ sv = make([]types.Error, 0)
+ } else {
+ sv = *v
+ }
+
+ switch {
+ default:
+ var mv types.Error
+ t := decoder.StartEl
+ _ = t
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ destAddr := &mv
+ if err := awsRestxml_deserializeDocumentError(&destAddr, nodeDecoder); err != nil {
+ return err
+ }
+ mv = *destAddr
+ sv = append(sv, mv)
+ }
+ *v = sv
+ return nil
+}
+func awsRestxml_deserializeDocumentEventList(v *[]types.Event, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv []types.Event
+ if *v == nil {
+ sv = make([]types.Event, 0)
+ } else {
+ sv = *v
+ }
+
+ originalDecoder := decoder
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ memberDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ decoder = memberDecoder
+ switch {
+ case strings.EqualFold("member", t.Name.Local):
+ var col types.Event
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ col = types.Event(xtv)
+ }
+ sv = append(sv, col)
+
+ default:
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentEventListUnwrapped(v *[]types.Event, decoder smithyxml.NodeDecoder) error {
+ var sv []types.Event
+ if *v == nil {
+ sv = make([]types.Event, 0)
+ } else {
+ sv = *v
+ }
+
+ switch {
+ default:
+ var mv types.Event
+ t := decoder.StartEl
+ _ = t
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ mv = types.Event(xtv)
+ }
+ sv = append(sv, mv)
+ }
+ *v = sv
+ return nil
+}
+func awsRestxml_deserializeDocumentExistingObjectReplication(v **types.ExistingObjectReplication, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *types.ExistingObjectReplication
+ if *v == nil {
+ sv = &types.ExistingObjectReplication{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("Status", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.Status = types.ExistingObjectReplicationStatus(xtv)
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentExposeHeaders(v *[]string, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv []string
+ if *v == nil {
+ sv = make([]string, 0)
+ } else {
+ sv = *v
+ }
+
+ originalDecoder := decoder
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ memberDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ decoder = memberDecoder
+ switch {
+ case strings.EqualFold("member", t.Name.Local):
+ var col string
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ col = xtv
+ }
+ sv = append(sv, col)
+
+ default:
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentExposeHeadersUnwrapped(v *[]string, decoder smithyxml.NodeDecoder) error {
+ var sv []string
+ if *v == nil {
+ sv = make([]string, 0)
+ } else {
+ sv = *v
+ }
+
+ switch {
+ default:
+ var mv string
+ t := decoder.StartEl
+ _ = t
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ mv = xtv
+ }
+ sv = append(sv, mv)
+ }
+ *v = sv
+ return nil
+}
+func awsRestxml_deserializeDocumentFilterRule(v **types.FilterRule, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *types.FilterRule
+ if *v == nil {
+ sv = &types.FilterRule{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("Name", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.Name = types.FilterRuleName(xtv)
+ }
+
+ case strings.EqualFold("Value", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.Value = ptr.String(xtv)
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentFilterRuleList(v *[]types.FilterRule, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv []types.FilterRule
+ if *v == nil {
+ sv = make([]types.FilterRule, 0)
+ } else {
+ sv = *v
+ }
+
+ originalDecoder := decoder
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ switch {
+ case strings.EqualFold("member", t.Name.Local):
+ var col types.FilterRule
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ destAddr := &col
+ if err := awsRestxml_deserializeDocumentFilterRule(&destAddr, nodeDecoder); err != nil {
+ return err
+ }
+ col = *destAddr
+ sv = append(sv, col)
+
+ default:
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentFilterRuleListUnwrapped(v *[]types.FilterRule, decoder smithyxml.NodeDecoder) error {
+ var sv []types.FilterRule
+ if *v == nil {
+ sv = make([]types.FilterRule, 0)
+ } else {
+ sv = *v
+ }
+
+ switch {
+ default:
+ var mv types.FilterRule
+ t := decoder.StartEl
+ _ = t
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ destAddr := &mv
+ if err := awsRestxml_deserializeDocumentFilterRule(&destAddr, nodeDecoder); err != nil {
+ return err
+ }
+ mv = *destAddr
+ sv = append(sv, mv)
+ }
+ *v = sv
+ return nil
+}
+func awsRestxml_deserializeDocumentGrant(v **types.Grant, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *types.Grant
+ if *v == nil {
+ sv = &types.Grant{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("Grantee", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentGrantee(&sv.Grantee, nodeDecoder); err != nil {
+ return err
+ }
+
+ case strings.EqualFold("Permission", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.Permission = types.Permission(xtv)
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentGrantee(v **types.Grantee, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *types.Grantee
+ if *v == nil {
+ sv = &types.Grantee{}
+ } else {
+ sv = *v
+ }
+
+ for _, attr := range decoder.StartEl.Attr {
+ name := attr.Name.Local
+ if len(attr.Name.Space) != 0 {
+ name = attr.Name.Space + `:` + attr.Name.Local
+ }
+ switch {
+ case strings.EqualFold("xsi:type", name):
+ val := []byte(attr.Value)
+ {
+ xtv := string(val)
+ sv.Type = types.Type(xtv)
+ }
+
+ }
+ }
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("DisplayName", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.DisplayName = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("EmailAddress", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.EmailAddress = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("ID", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.ID = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("URI", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.URI = ptr.String(xtv)
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentGrants(v *[]types.Grant, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv []types.Grant
+ if *v == nil {
+ sv = make([]types.Grant, 0)
+ } else {
+ sv = *v
+ }
+
+ originalDecoder := decoder
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ switch {
+ case strings.EqualFold("Grant", t.Name.Local):
+ var col types.Grant
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ destAddr := &col
+ if err := awsRestxml_deserializeDocumentGrant(&destAddr, nodeDecoder); err != nil {
+ return err
+ }
+ col = *destAddr
+ sv = append(sv, col)
+
+ default:
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentGrantsUnwrapped(v *[]types.Grant, decoder smithyxml.NodeDecoder) error {
+ var sv []types.Grant
+ if *v == nil {
+ sv = make([]types.Grant, 0)
+ } else {
+ sv = *v
+ }
+
+ switch {
+ default:
+ var mv types.Grant
+ t := decoder.StartEl
+ _ = t
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ destAddr := &mv
+ if err := awsRestxml_deserializeDocumentGrant(&destAddr, nodeDecoder); err != nil {
+ return err
+ }
+ mv = *destAddr
+ sv = append(sv, mv)
+ }
+ *v = sv
+ return nil
+}
+func awsRestxml_deserializeDocumentIndexDocument(v **types.IndexDocument, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *types.IndexDocument
+ if *v == nil {
+ sv = &types.IndexDocument{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("Suffix", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.Suffix = ptr.String(xtv)
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentInitiator(v **types.Initiator, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *types.Initiator
+ if *v == nil {
+ sv = &types.Initiator{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("DisplayName", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.DisplayName = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("ID", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.ID = ptr.String(xtv)
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentIntelligentTieringAndOperator(v **types.IntelligentTieringAndOperator, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *types.IntelligentTieringAndOperator
+ if *v == nil {
+ sv = &types.IntelligentTieringAndOperator{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("Prefix", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.Prefix = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("Tag", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentTagSetUnwrapped(&sv.Tags, nodeDecoder); err != nil {
+ return err
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentIntelligentTieringConfiguration(v **types.IntelligentTieringConfiguration, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *types.IntelligentTieringConfiguration
+ if *v == nil {
+ sv = &types.IntelligentTieringConfiguration{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("Filter", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentIntelligentTieringFilter(&sv.Filter, nodeDecoder); err != nil {
+ return err
+ }
+
+ case strings.EqualFold("Id", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.Id = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("Status", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.Status = types.IntelligentTieringStatus(xtv)
+ }
+
+ case strings.EqualFold("Tiering", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentTieringListUnwrapped(&sv.Tierings, nodeDecoder); err != nil {
+ return err
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentIntelligentTieringConfigurationList(v *[]types.IntelligentTieringConfiguration, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv []types.IntelligentTieringConfiguration
+ if *v == nil {
+ sv = make([]types.IntelligentTieringConfiguration, 0)
+ } else {
+ sv = *v
+ }
+
+ originalDecoder := decoder
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ switch {
+ case strings.EqualFold("member", t.Name.Local):
+ var col types.IntelligentTieringConfiguration
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ destAddr := &col
+ if err := awsRestxml_deserializeDocumentIntelligentTieringConfiguration(&destAddr, nodeDecoder); err != nil {
+ return err
+ }
+ col = *destAddr
+ sv = append(sv, col)
+
+ default:
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentIntelligentTieringConfigurationListUnwrapped(v *[]types.IntelligentTieringConfiguration, decoder smithyxml.NodeDecoder) error {
+ var sv []types.IntelligentTieringConfiguration
+ if *v == nil {
+ sv = make([]types.IntelligentTieringConfiguration, 0)
+ } else {
+ sv = *v
+ }
+
+ switch {
+ default:
+ var mv types.IntelligentTieringConfiguration
+ t := decoder.StartEl
+ _ = t
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ destAddr := &mv
+ if err := awsRestxml_deserializeDocumentIntelligentTieringConfiguration(&destAddr, nodeDecoder); err != nil {
+ return err
+ }
+ mv = *destAddr
+ sv = append(sv, mv)
+ }
+ *v = sv
+ return nil
+}
+func awsRestxml_deserializeDocumentIntelligentTieringFilter(v **types.IntelligentTieringFilter, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *types.IntelligentTieringFilter
+ if *v == nil {
+ sv = &types.IntelligentTieringFilter{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("And", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentIntelligentTieringAndOperator(&sv.And, nodeDecoder); err != nil {
+ return err
+ }
+
+ case strings.EqualFold("Prefix", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.Prefix = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("Tag", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentTag(&sv.Tag, nodeDecoder); err != nil {
+ return err
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentInvalidObjectState(v **types.InvalidObjectState, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *types.InvalidObjectState
+ if *v == nil {
+ sv = &types.InvalidObjectState{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("AccessTier", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.AccessTier = types.IntelligentTieringAccessTier(xtv)
+ }
+
+ case strings.EqualFold("StorageClass", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.StorageClass = types.StorageClass(xtv)
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentInventoryConfiguration(v **types.InventoryConfiguration, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *types.InventoryConfiguration
+ if *v == nil {
+ sv = &types.InventoryConfiguration{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("Destination", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentInventoryDestination(&sv.Destination, nodeDecoder); err != nil {
+ return err
+ }
+
+ case strings.EqualFold("Filter", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentInventoryFilter(&sv.Filter, nodeDecoder); err != nil {
+ return err
+ }
+
+ case strings.EqualFold("Id", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.Id = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("IncludedObjectVersions", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.IncludedObjectVersions = types.InventoryIncludedObjectVersions(xtv)
+ }
+
+ case strings.EqualFold("IsEnabled", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv, err := strconv.ParseBool(string(val))
+ if err != nil {
+ return fmt.Errorf("expected IsEnabled to be of type *bool, got %T instead", val)
+ }
+ sv.IsEnabled = xtv
+ }
+
+ case strings.EqualFold("OptionalFields", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentInventoryOptionalFields(&sv.OptionalFields, nodeDecoder); err != nil {
+ return err
+ }
+
+ case strings.EqualFold("Schedule", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentInventorySchedule(&sv.Schedule, nodeDecoder); err != nil {
+ return err
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentInventoryConfigurationList(v *[]types.InventoryConfiguration, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv []types.InventoryConfiguration
+ if *v == nil {
+ sv = make([]types.InventoryConfiguration, 0)
+ } else {
+ sv = *v
+ }
+
+ originalDecoder := decoder
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ switch {
+ case strings.EqualFold("member", t.Name.Local):
+ var col types.InventoryConfiguration
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ destAddr := &col
+ if err := awsRestxml_deserializeDocumentInventoryConfiguration(&destAddr, nodeDecoder); err != nil {
+ return err
+ }
+ col = *destAddr
+ sv = append(sv, col)
+
+ default:
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentInventoryConfigurationListUnwrapped(v *[]types.InventoryConfiguration, decoder smithyxml.NodeDecoder) error {
+ var sv []types.InventoryConfiguration
+ if *v == nil {
+ sv = make([]types.InventoryConfiguration, 0)
+ } else {
+ sv = *v
+ }
+
+ switch {
+ default:
+ var mv types.InventoryConfiguration
+ t := decoder.StartEl
+ _ = t
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ destAddr := &mv
+ if err := awsRestxml_deserializeDocumentInventoryConfiguration(&destAddr, nodeDecoder); err != nil {
+ return err
+ }
+ mv = *destAddr
+ sv = append(sv, mv)
+ }
+ *v = sv
+ return nil
+}
+func awsRestxml_deserializeDocumentInventoryDestination(v **types.InventoryDestination, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *types.InventoryDestination
+ if *v == nil {
+ sv = &types.InventoryDestination{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("S3BucketDestination", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentInventoryS3BucketDestination(&sv.S3BucketDestination, nodeDecoder); err != nil {
+ return err
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentInventoryEncryption(v **types.InventoryEncryption, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *types.InventoryEncryption
+ if *v == nil {
+ sv = &types.InventoryEncryption{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("SSE-KMS", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentSSEKMS(&sv.SSEKMS, nodeDecoder); err != nil {
+ return err
+ }
+
+ case strings.EqualFold("SSE-S3", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentSSES3(&sv.SSES3, nodeDecoder); err != nil {
+ return err
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentInventoryFilter(v **types.InventoryFilter, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *types.InventoryFilter
+ if *v == nil {
+ sv = &types.InventoryFilter{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("Prefix", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.Prefix = ptr.String(xtv)
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentInventoryOptionalFields(v *[]types.InventoryOptionalField, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv []types.InventoryOptionalField
+ if *v == nil {
+ sv = make([]types.InventoryOptionalField, 0)
+ } else {
+ sv = *v
+ }
+
+ originalDecoder := decoder
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ memberDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ decoder = memberDecoder
+ switch {
+ case strings.EqualFold("Field", t.Name.Local):
+ var col types.InventoryOptionalField
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ col = types.InventoryOptionalField(xtv)
+ }
+ sv = append(sv, col)
+
+ default:
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentInventoryOptionalFieldsUnwrapped(v *[]types.InventoryOptionalField, decoder smithyxml.NodeDecoder) error {
+ var sv []types.InventoryOptionalField
+ if *v == nil {
+ sv = make([]types.InventoryOptionalField, 0)
+ } else {
+ sv = *v
+ }
+
+ switch {
+ default:
+ var mv types.InventoryOptionalField
+ t := decoder.StartEl
+ _ = t
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ mv = types.InventoryOptionalField(xtv)
+ }
+ sv = append(sv, mv)
+ }
+ *v = sv
+ return nil
+}
+func awsRestxml_deserializeDocumentInventoryS3BucketDestination(v **types.InventoryS3BucketDestination, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *types.InventoryS3BucketDestination
+ if *v == nil {
+ sv = &types.InventoryS3BucketDestination{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("AccountId", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.AccountId = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("Bucket", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.Bucket = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("Encryption", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentInventoryEncryption(&sv.Encryption, nodeDecoder); err != nil {
+ return err
+ }
+
+ case strings.EqualFold("Format", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.Format = types.InventoryFormat(xtv)
+ }
+
+ case strings.EqualFold("Prefix", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.Prefix = ptr.String(xtv)
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentInventorySchedule(v **types.InventorySchedule, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *types.InventorySchedule
+ if *v == nil {
+ sv = &types.InventorySchedule{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("Frequency", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.Frequency = types.InventoryFrequency(xtv)
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentLambdaFunctionConfiguration(v **types.LambdaFunctionConfiguration, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *types.LambdaFunctionConfiguration
+ if *v == nil {
+ sv = &types.LambdaFunctionConfiguration{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("Event", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentEventListUnwrapped(&sv.Events, nodeDecoder); err != nil {
+ return err
+ }
+
+ case strings.EqualFold("Filter", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentNotificationConfigurationFilter(&sv.Filter, nodeDecoder); err != nil {
+ return err
+ }
+
+ case strings.EqualFold("Id", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.Id = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("CloudFunction", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.LambdaFunctionArn = ptr.String(xtv)
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentLambdaFunctionConfigurationList(v *[]types.LambdaFunctionConfiguration, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv []types.LambdaFunctionConfiguration
+ if *v == nil {
+ sv = make([]types.LambdaFunctionConfiguration, 0)
+ } else {
+ sv = *v
+ }
+
+ originalDecoder := decoder
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ switch {
+ case strings.EqualFold("member", t.Name.Local):
+ var col types.LambdaFunctionConfiguration
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ destAddr := &col
+ if err := awsRestxml_deserializeDocumentLambdaFunctionConfiguration(&destAddr, nodeDecoder); err != nil {
+ return err
+ }
+ col = *destAddr
+ sv = append(sv, col)
+
+ default:
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentLambdaFunctionConfigurationListUnwrapped(v *[]types.LambdaFunctionConfiguration, decoder smithyxml.NodeDecoder) error {
+ var sv []types.LambdaFunctionConfiguration
+ if *v == nil {
+ sv = make([]types.LambdaFunctionConfiguration, 0)
+ } else {
+ sv = *v
+ }
+
+ switch {
+ default:
+ var mv types.LambdaFunctionConfiguration
+ t := decoder.StartEl
+ _ = t
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ destAddr := &mv
+ if err := awsRestxml_deserializeDocumentLambdaFunctionConfiguration(&destAddr, nodeDecoder); err != nil {
+ return err
+ }
+ mv = *destAddr
+ sv = append(sv, mv)
+ }
+ *v = sv
+ return nil
+}
+func awsRestxml_deserializeDocumentLifecycleExpiration(v **types.LifecycleExpiration, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *types.LifecycleExpiration
+ if *v == nil {
+ sv = &types.LifecycleExpiration{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("Date", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ t, err := smithytime.ParseDateTime(xtv)
+ if err != nil {
+ return err
+ }
+ sv.Date = ptr.Time(t)
+ }
+
+ case strings.EqualFold("Days", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ i64, err := strconv.ParseInt(xtv, 10, 64)
+ if err != nil {
+ return err
+ }
+ sv.Days = int32(i64)
+ }
+
+ case strings.EqualFold("ExpiredObjectDeleteMarker", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv, err := strconv.ParseBool(string(val))
+ if err != nil {
+ return fmt.Errorf("expected ExpiredObjectDeleteMarker to be of type *bool, got %T instead", val)
+ }
+ sv.ExpiredObjectDeleteMarker = xtv
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentLifecycleRule(v **types.LifecycleRule, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *types.LifecycleRule
+ if *v == nil {
+ sv = &types.LifecycleRule{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("AbortIncompleteMultipartUpload", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentAbortIncompleteMultipartUpload(&sv.AbortIncompleteMultipartUpload, nodeDecoder); err != nil {
+ return err
+ }
+
+ case strings.EqualFold("Expiration", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentLifecycleExpiration(&sv.Expiration, nodeDecoder); err != nil {
+ return err
+ }
+
+ case strings.EqualFold("Filter", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentLifecycleRuleFilter(&sv.Filter, nodeDecoder); err != nil {
+ return err
+ }
+
+ case strings.EqualFold("ID", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.ID = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("NoncurrentVersionExpiration", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentNoncurrentVersionExpiration(&sv.NoncurrentVersionExpiration, nodeDecoder); err != nil {
+ return err
+ }
+
+ case strings.EqualFold("NoncurrentVersionTransition", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentNoncurrentVersionTransitionListUnwrapped(&sv.NoncurrentVersionTransitions, nodeDecoder); err != nil {
+ return err
+ }
+
+ case strings.EqualFold("Prefix", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.Prefix = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("Status", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.Status = types.ExpirationStatus(xtv)
+ }
+
+ case strings.EqualFold("Transition", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentTransitionListUnwrapped(&sv.Transitions, nodeDecoder); err != nil {
+ return err
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentLifecycleRuleAndOperator(v **types.LifecycleRuleAndOperator, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *types.LifecycleRuleAndOperator
+ if *v == nil {
+ sv = &types.LifecycleRuleAndOperator{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("Prefix", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.Prefix = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("Tag", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentTagSetUnwrapped(&sv.Tags, nodeDecoder); err != nil {
+ return err
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentLifecycleRuleFilter(v *types.LifecycleRuleFilter, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var uv types.LifecycleRuleFilter
+ var memberFound bool
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ if memberFound {
+ if err = decoder.Decoder.Skip(); err != nil {
+ return err
+ }
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("And", t.Name.Local):
+ var mv types.LifecycleRuleAndOperator
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ destAddr := &mv
+ if err := awsRestxml_deserializeDocumentLifecycleRuleAndOperator(&destAddr, nodeDecoder); err != nil {
+ return err
+ }
+ mv = *destAddr
+ uv = &types.LifecycleRuleFilterMemberAnd{Value: mv}
+ memberFound = true
+
+ case strings.EqualFold("Prefix", t.Name.Local):
+ var mv string
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ mv = xtv
+ }
+ uv = &types.LifecycleRuleFilterMemberPrefix{Value: mv}
+ memberFound = true
+
+ case strings.EqualFold("Tag", t.Name.Local):
+ var mv types.Tag
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ destAddr := &mv
+ if err := awsRestxml_deserializeDocumentTag(&destAddr, nodeDecoder); err != nil {
+ return err
+ }
+ mv = *destAddr
+ uv = &types.LifecycleRuleFilterMemberTag{Value: mv}
+ memberFound = true
+
+ default:
+ uv = &types.UnknownUnionMember{Tag: t.Name.Local}
+ memberFound = true
+
+ }
+ decoder = originalDecoder
+ }
+ *v = uv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentLifecycleRules(v *[]types.LifecycleRule, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv []types.LifecycleRule
+ if *v == nil {
+ sv = make([]types.LifecycleRule, 0)
+ } else {
+ sv = *v
+ }
+
+ originalDecoder := decoder
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ switch {
+ case strings.EqualFold("member", t.Name.Local):
+ var col types.LifecycleRule
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ destAddr := &col
+ if err := awsRestxml_deserializeDocumentLifecycleRule(&destAddr, nodeDecoder); err != nil {
+ return err
+ }
+ col = *destAddr
+ sv = append(sv, col)
+
+ default:
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentLifecycleRulesUnwrapped(v *[]types.LifecycleRule, decoder smithyxml.NodeDecoder) error {
+ var sv []types.LifecycleRule
+ if *v == nil {
+ sv = make([]types.LifecycleRule, 0)
+ } else {
+ sv = *v
+ }
+
+ switch {
+ default:
+ var mv types.LifecycleRule
+ t := decoder.StartEl
+ _ = t
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ destAddr := &mv
+ if err := awsRestxml_deserializeDocumentLifecycleRule(&destAddr, nodeDecoder); err != nil {
+ return err
+ }
+ mv = *destAddr
+ sv = append(sv, mv)
+ }
+ *v = sv
+ return nil
+}
+func awsRestxml_deserializeDocumentLoggingEnabled(v **types.LoggingEnabled, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *types.LoggingEnabled
+ if *v == nil {
+ sv = &types.LoggingEnabled{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("TargetBucket", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.TargetBucket = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("TargetGrants", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentTargetGrants(&sv.TargetGrants, nodeDecoder); err != nil {
+ return err
+ }
+
+ case strings.EqualFold("TargetPrefix", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.TargetPrefix = ptr.String(xtv)
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentMetrics(v **types.Metrics, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *types.Metrics
+ if *v == nil {
+ sv = &types.Metrics{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("EventThreshold", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentReplicationTimeValue(&sv.EventThreshold, nodeDecoder); err != nil {
+ return err
+ }
+
+ case strings.EqualFold("Status", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.Status = types.MetricsStatus(xtv)
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentMetricsAndOperator(v **types.MetricsAndOperator, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *types.MetricsAndOperator
+ if *v == nil {
+ sv = &types.MetricsAndOperator{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("Prefix", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.Prefix = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("Tag", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentTagSetUnwrapped(&sv.Tags, nodeDecoder); err != nil {
+ return err
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentMetricsConfiguration(v **types.MetricsConfiguration, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *types.MetricsConfiguration
+ if *v == nil {
+ sv = &types.MetricsConfiguration{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("Filter", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentMetricsFilter(&sv.Filter, nodeDecoder); err != nil {
+ return err
+ }
+
+ case strings.EqualFold("Id", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.Id = ptr.String(xtv)
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentMetricsConfigurationList(v *[]types.MetricsConfiguration, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv []types.MetricsConfiguration
+ if *v == nil {
+ sv = make([]types.MetricsConfiguration, 0)
+ } else {
+ sv = *v
+ }
+
+ originalDecoder := decoder
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ switch {
+ case strings.EqualFold("member", t.Name.Local):
+ var col types.MetricsConfiguration
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ destAddr := &col
+ if err := awsRestxml_deserializeDocumentMetricsConfiguration(&destAddr, nodeDecoder); err != nil {
+ return err
+ }
+ col = *destAddr
+ sv = append(sv, col)
+
+ default:
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentMetricsConfigurationListUnwrapped(v *[]types.MetricsConfiguration, decoder smithyxml.NodeDecoder) error {
+ var sv []types.MetricsConfiguration
+ if *v == nil {
+ sv = make([]types.MetricsConfiguration, 0)
+ } else {
+ sv = *v
+ }
+
+ switch {
+ default:
+ var mv types.MetricsConfiguration
+ t := decoder.StartEl
+ _ = t
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ destAddr := &mv
+ if err := awsRestxml_deserializeDocumentMetricsConfiguration(&destAddr, nodeDecoder); err != nil {
+ return err
+ }
+ mv = *destAddr
+ sv = append(sv, mv)
+ }
+ *v = sv
+ return nil
+}
+func awsRestxml_deserializeDocumentMetricsFilter(v *types.MetricsFilter, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var uv types.MetricsFilter
+ var memberFound bool
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ if memberFound {
+ if err = decoder.Decoder.Skip(); err != nil {
+ return err
+ }
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("And", t.Name.Local):
+ var mv types.MetricsAndOperator
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ destAddr := &mv
+ if err := awsRestxml_deserializeDocumentMetricsAndOperator(&destAddr, nodeDecoder); err != nil {
+ return err
+ }
+ mv = *destAddr
+ uv = &types.MetricsFilterMemberAnd{Value: mv}
+ memberFound = true
+
+ case strings.EqualFold("Prefix", t.Name.Local):
+ var mv string
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ mv = xtv
+ }
+ uv = &types.MetricsFilterMemberPrefix{Value: mv}
+ memberFound = true
+
+ case strings.EqualFold("Tag", t.Name.Local):
+ var mv types.Tag
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ destAddr := &mv
+ if err := awsRestxml_deserializeDocumentTag(&destAddr, nodeDecoder); err != nil {
+ return err
+ }
+ mv = *destAddr
+ uv = &types.MetricsFilterMemberTag{Value: mv}
+ memberFound = true
+
+ default:
+ uv = &types.UnknownUnionMember{Tag: t.Name.Local}
+ memberFound = true
+
+ }
+ decoder = originalDecoder
+ }
+ *v = uv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentMultipartUpload(v **types.MultipartUpload, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *types.MultipartUpload
+ if *v == nil {
+ sv = &types.MultipartUpload{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("Initiated", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ t, err := smithytime.ParseDateTime(xtv)
+ if err != nil {
+ return err
+ }
+ sv.Initiated = ptr.Time(t)
+ }
+
+ case strings.EqualFold("Initiator", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentInitiator(&sv.Initiator, nodeDecoder); err != nil {
+ return err
+ }
+
+ case strings.EqualFold("Key", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.Key = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("Owner", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentOwner(&sv.Owner, nodeDecoder); err != nil {
+ return err
+ }
+
+ case strings.EqualFold("StorageClass", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.StorageClass = types.StorageClass(xtv)
+ }
+
+ case strings.EqualFold("UploadId", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.UploadId = ptr.String(xtv)
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentMultipartUploadList(v *[]types.MultipartUpload, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv []types.MultipartUpload
+ if *v == nil {
+ sv = make([]types.MultipartUpload, 0)
+ } else {
+ sv = *v
+ }
+
+ originalDecoder := decoder
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ switch {
+ case strings.EqualFold("member", t.Name.Local):
+ var col types.MultipartUpload
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ destAddr := &col
+ if err := awsRestxml_deserializeDocumentMultipartUpload(&destAddr, nodeDecoder); err != nil {
+ return err
+ }
+ col = *destAddr
+ sv = append(sv, col)
+
+ default:
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentMultipartUploadListUnwrapped(v *[]types.MultipartUpload, decoder smithyxml.NodeDecoder) error {
+ var sv []types.MultipartUpload
+ if *v == nil {
+ sv = make([]types.MultipartUpload, 0)
+ } else {
+ sv = *v
+ }
+
+ switch {
+ default:
+ var mv types.MultipartUpload
+ t := decoder.StartEl
+ _ = t
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ destAddr := &mv
+ if err := awsRestxml_deserializeDocumentMultipartUpload(&destAddr, nodeDecoder); err != nil {
+ return err
+ }
+ mv = *destAddr
+ sv = append(sv, mv)
+ }
+ *v = sv
+ return nil
+}
+func awsRestxml_deserializeDocumentNoncurrentVersionExpiration(v **types.NoncurrentVersionExpiration, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *types.NoncurrentVersionExpiration
+ if *v == nil {
+ sv = &types.NoncurrentVersionExpiration{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("NoncurrentDays", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ i64, err := strconv.ParseInt(xtv, 10, 64)
+ if err != nil {
+ return err
+ }
+ sv.NoncurrentDays = int32(i64)
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentNoncurrentVersionTransition(v **types.NoncurrentVersionTransition, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *types.NoncurrentVersionTransition
+ if *v == nil {
+ sv = &types.NoncurrentVersionTransition{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("NoncurrentDays", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ i64, err := strconv.ParseInt(xtv, 10, 64)
+ if err != nil {
+ return err
+ }
+ sv.NoncurrentDays = int32(i64)
+ }
+
+ case strings.EqualFold("StorageClass", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.StorageClass = types.TransitionStorageClass(xtv)
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentNoncurrentVersionTransitionList(v *[]types.NoncurrentVersionTransition, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv []types.NoncurrentVersionTransition
+ if *v == nil {
+ sv = make([]types.NoncurrentVersionTransition, 0)
+ } else {
+ sv = *v
+ }
+
+ originalDecoder := decoder
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ switch {
+ case strings.EqualFold("member", t.Name.Local):
+ var col types.NoncurrentVersionTransition
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ destAddr := &col
+ if err := awsRestxml_deserializeDocumentNoncurrentVersionTransition(&destAddr, nodeDecoder); err != nil {
+ return err
+ }
+ col = *destAddr
+ sv = append(sv, col)
+
+ default:
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentNoncurrentVersionTransitionListUnwrapped(v *[]types.NoncurrentVersionTransition, decoder smithyxml.NodeDecoder) error {
+ var sv []types.NoncurrentVersionTransition
+ if *v == nil {
+ sv = make([]types.NoncurrentVersionTransition, 0)
+ } else {
+ sv = *v
+ }
+
+ switch {
+ default:
+ var mv types.NoncurrentVersionTransition
+ t := decoder.StartEl
+ _ = t
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ destAddr := &mv
+ if err := awsRestxml_deserializeDocumentNoncurrentVersionTransition(&destAddr, nodeDecoder); err != nil {
+ return err
+ }
+ mv = *destAddr
+ sv = append(sv, mv)
+ }
+ *v = sv
+ return nil
+}
+func awsRestxml_deserializeDocumentNoSuchBucket(v **types.NoSuchBucket, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *types.NoSuchBucket
+ if *v == nil {
+ sv = &types.NoSuchBucket{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentNoSuchKey(v **types.NoSuchKey, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *types.NoSuchKey
+ if *v == nil {
+ sv = &types.NoSuchKey{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentNoSuchUpload(v **types.NoSuchUpload, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *types.NoSuchUpload
+ if *v == nil {
+ sv = &types.NoSuchUpload{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentNotFound(v **types.NotFound, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *types.NotFound
+ if *v == nil {
+ sv = &types.NotFound{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentNotificationConfigurationFilter(v **types.NotificationConfigurationFilter, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *types.NotificationConfigurationFilter
+ if *v == nil {
+ sv = &types.NotificationConfigurationFilter{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("S3Key", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentS3KeyFilter(&sv.Key, nodeDecoder); err != nil {
+ return err
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentObject(v **types.Object, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *types.Object
+ if *v == nil {
+ sv = &types.Object{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("ETag", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.ETag = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("Key", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.Key = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("LastModified", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ t, err := smithytime.ParseDateTime(xtv)
+ if err != nil {
+ return err
+ }
+ sv.LastModified = ptr.Time(t)
+ }
+
+ case strings.EqualFold("Owner", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentOwner(&sv.Owner, nodeDecoder); err != nil {
+ return err
+ }
+
+ case strings.EqualFold("Size", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ i64, err := strconv.ParseInt(xtv, 10, 64)
+ if err != nil {
+ return err
+ }
+ sv.Size = i64
+ }
+
+ case strings.EqualFold("StorageClass", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.StorageClass = types.ObjectStorageClass(xtv)
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentObjectAlreadyInActiveTierError(v **types.ObjectAlreadyInActiveTierError, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *types.ObjectAlreadyInActiveTierError
+ if *v == nil {
+ sv = &types.ObjectAlreadyInActiveTierError{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentObjectList(v *[]types.Object, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv []types.Object
+ if *v == nil {
+ sv = make([]types.Object, 0)
+ } else {
+ sv = *v
+ }
+
+ originalDecoder := decoder
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ switch {
+ case strings.EqualFold("member", t.Name.Local):
+ var col types.Object
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ destAddr := &col
+ if err := awsRestxml_deserializeDocumentObject(&destAddr, nodeDecoder); err != nil {
+ return err
+ }
+ col = *destAddr
+ sv = append(sv, col)
+
+ default:
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentObjectListUnwrapped(v *[]types.Object, decoder smithyxml.NodeDecoder) error {
+ var sv []types.Object
+ if *v == nil {
+ sv = make([]types.Object, 0)
+ } else {
+ sv = *v
+ }
+
+ switch {
+ default:
+ var mv types.Object
+ t := decoder.StartEl
+ _ = t
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ destAddr := &mv
+ if err := awsRestxml_deserializeDocumentObject(&destAddr, nodeDecoder); err != nil {
+ return err
+ }
+ mv = *destAddr
+ sv = append(sv, mv)
+ }
+ *v = sv
+ return nil
+}
+func awsRestxml_deserializeDocumentObjectLockConfiguration(v **types.ObjectLockConfiguration, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *types.ObjectLockConfiguration
+ if *v == nil {
+ sv = &types.ObjectLockConfiguration{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("ObjectLockEnabled", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.ObjectLockEnabled = types.ObjectLockEnabled(xtv)
+ }
+
+ case strings.EqualFold("Rule", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentObjectLockRule(&sv.Rule, nodeDecoder); err != nil {
+ return err
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentObjectLockLegalHold(v **types.ObjectLockLegalHold, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *types.ObjectLockLegalHold
+ if *v == nil {
+ sv = &types.ObjectLockLegalHold{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("Status", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.Status = types.ObjectLockLegalHoldStatus(xtv)
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentObjectLockRetention(v **types.ObjectLockRetention, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *types.ObjectLockRetention
+ if *v == nil {
+ sv = &types.ObjectLockRetention{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("Mode", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.Mode = types.ObjectLockRetentionMode(xtv)
+ }
+
+ case strings.EqualFold("RetainUntilDate", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ t, err := smithytime.ParseDateTime(xtv)
+ if err != nil {
+ return err
+ }
+ sv.RetainUntilDate = ptr.Time(t)
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentObjectLockRule(v **types.ObjectLockRule, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *types.ObjectLockRule
+ if *v == nil {
+ sv = &types.ObjectLockRule{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("DefaultRetention", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentDefaultRetention(&sv.DefaultRetention, nodeDecoder); err != nil {
+ return err
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentObjectNotInActiveTierError(v **types.ObjectNotInActiveTierError, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *types.ObjectNotInActiveTierError
+ if *v == nil {
+ sv = &types.ObjectNotInActiveTierError{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentObjectVersion(v **types.ObjectVersion, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *types.ObjectVersion
+ if *v == nil {
+ sv = &types.ObjectVersion{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("ETag", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.ETag = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("IsLatest", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv, err := strconv.ParseBool(string(val))
+ if err != nil {
+ return fmt.Errorf("expected IsLatest to be of type *bool, got %T instead", val)
+ }
+ sv.IsLatest = xtv
+ }
+
+ case strings.EqualFold("Key", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.Key = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("LastModified", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ t, err := smithytime.ParseDateTime(xtv)
+ if err != nil {
+ return err
+ }
+ sv.LastModified = ptr.Time(t)
+ }
+
+ case strings.EqualFold("Owner", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentOwner(&sv.Owner, nodeDecoder); err != nil {
+ return err
+ }
+
+ case strings.EqualFold("Size", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ i64, err := strconv.ParseInt(xtv, 10, 64)
+ if err != nil {
+ return err
+ }
+ sv.Size = i64
+ }
+
+ case strings.EqualFold("StorageClass", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.StorageClass = types.ObjectVersionStorageClass(xtv)
+ }
+
+ case strings.EqualFold("VersionId", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.VersionId = ptr.String(xtv)
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentObjectVersionList(v *[]types.ObjectVersion, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv []types.ObjectVersion
+ if *v == nil {
+ sv = make([]types.ObjectVersion, 0)
+ } else {
+ sv = *v
+ }
+
+ originalDecoder := decoder
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ switch {
+ case strings.EqualFold("member", t.Name.Local):
+ var col types.ObjectVersion
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ destAddr := &col
+ if err := awsRestxml_deserializeDocumentObjectVersion(&destAddr, nodeDecoder); err != nil {
+ return err
+ }
+ col = *destAddr
+ sv = append(sv, col)
+
+ default:
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentObjectVersionListUnwrapped(v *[]types.ObjectVersion, decoder smithyxml.NodeDecoder) error {
+ var sv []types.ObjectVersion
+ if *v == nil {
+ sv = make([]types.ObjectVersion, 0)
+ } else {
+ sv = *v
+ }
+
+ switch {
+ default:
+ var mv types.ObjectVersion
+ t := decoder.StartEl
+ _ = t
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ destAddr := &mv
+ if err := awsRestxml_deserializeDocumentObjectVersion(&destAddr, nodeDecoder); err != nil {
+ return err
+ }
+ mv = *destAddr
+ sv = append(sv, mv)
+ }
+ *v = sv
+ return nil
+}
+func awsRestxml_deserializeDocumentOwner(v **types.Owner, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *types.Owner
+ if *v == nil {
+ sv = &types.Owner{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("DisplayName", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.DisplayName = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("ID", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.ID = ptr.String(xtv)
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentOwnershipControls(v **types.OwnershipControls, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *types.OwnershipControls
+ if *v == nil {
+ sv = &types.OwnershipControls{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("Rule", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentOwnershipControlsRulesUnwrapped(&sv.Rules, nodeDecoder); err != nil {
+ return err
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentOwnershipControlsRule(v **types.OwnershipControlsRule, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *types.OwnershipControlsRule
+ if *v == nil {
+ sv = &types.OwnershipControlsRule{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("ObjectOwnership", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.ObjectOwnership = types.ObjectOwnership(xtv)
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentOwnershipControlsRules(v *[]types.OwnershipControlsRule, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv []types.OwnershipControlsRule
+ if *v == nil {
+ sv = make([]types.OwnershipControlsRule, 0)
+ } else {
+ sv = *v
+ }
+
+ originalDecoder := decoder
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ switch {
+ case strings.EqualFold("member", t.Name.Local):
+ var col types.OwnershipControlsRule
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ destAddr := &col
+ if err := awsRestxml_deserializeDocumentOwnershipControlsRule(&destAddr, nodeDecoder); err != nil {
+ return err
+ }
+ col = *destAddr
+ sv = append(sv, col)
+
+ default:
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentOwnershipControlsRulesUnwrapped(v *[]types.OwnershipControlsRule, decoder smithyxml.NodeDecoder) error {
+ var sv []types.OwnershipControlsRule
+ if *v == nil {
+ sv = make([]types.OwnershipControlsRule, 0)
+ } else {
+ sv = *v
+ }
+
+ switch {
+ default:
+ var mv types.OwnershipControlsRule
+ t := decoder.StartEl
+ _ = t
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ destAddr := &mv
+ if err := awsRestxml_deserializeDocumentOwnershipControlsRule(&destAddr, nodeDecoder); err != nil {
+ return err
+ }
+ mv = *destAddr
+ sv = append(sv, mv)
+ }
+ *v = sv
+ return nil
+}
+func awsRestxml_deserializeDocumentPart(v **types.Part, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *types.Part
+ if *v == nil {
+ sv = &types.Part{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("ETag", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.ETag = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("LastModified", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ t, err := smithytime.ParseDateTime(xtv)
+ if err != nil {
+ return err
+ }
+ sv.LastModified = ptr.Time(t)
+ }
+
+ case strings.EqualFold("PartNumber", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ i64, err := strconv.ParseInt(xtv, 10, 64)
+ if err != nil {
+ return err
+ }
+ sv.PartNumber = int32(i64)
+ }
+
+ case strings.EqualFold("Size", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ i64, err := strconv.ParseInt(xtv, 10, 64)
+ if err != nil {
+ return err
+ }
+ sv.Size = i64
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentParts(v *[]types.Part, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv []types.Part
+ if *v == nil {
+ sv = make([]types.Part, 0)
+ } else {
+ sv = *v
+ }
+
+ originalDecoder := decoder
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ switch {
+ case strings.EqualFold("member", t.Name.Local):
+ var col types.Part
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ destAddr := &col
+ if err := awsRestxml_deserializeDocumentPart(&destAddr, nodeDecoder); err != nil {
+ return err
+ }
+ col = *destAddr
+ sv = append(sv, col)
+
+ default:
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentPartsUnwrapped(v *[]types.Part, decoder smithyxml.NodeDecoder) error {
+ var sv []types.Part
+ if *v == nil {
+ sv = make([]types.Part, 0)
+ } else {
+ sv = *v
+ }
+
+ switch {
+ default:
+ var mv types.Part
+ t := decoder.StartEl
+ _ = t
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ destAddr := &mv
+ if err := awsRestxml_deserializeDocumentPart(&destAddr, nodeDecoder); err != nil {
+ return err
+ }
+ mv = *destAddr
+ sv = append(sv, mv)
+ }
+ *v = sv
+ return nil
+}
+func awsRestxml_deserializeDocumentPolicyStatus(v **types.PolicyStatus, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *types.PolicyStatus
+ if *v == nil {
+ sv = &types.PolicyStatus{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("IsPublic", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv, err := strconv.ParseBool(string(val))
+ if err != nil {
+ return fmt.Errorf("expected IsPublic to be of type *bool, got %T instead", val)
+ }
+ sv.IsPublic = xtv
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentPublicAccessBlockConfiguration(v **types.PublicAccessBlockConfiguration, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *types.PublicAccessBlockConfiguration
+ if *v == nil {
+ sv = &types.PublicAccessBlockConfiguration{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("BlockPublicAcls", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv, err := strconv.ParseBool(string(val))
+ if err != nil {
+ return fmt.Errorf("expected Setting to be of type *bool, got %T instead", val)
+ }
+ sv.BlockPublicAcls = xtv
+ }
+
+ case strings.EqualFold("BlockPublicPolicy", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv, err := strconv.ParseBool(string(val))
+ if err != nil {
+ return fmt.Errorf("expected Setting to be of type *bool, got %T instead", val)
+ }
+ sv.BlockPublicPolicy = xtv
+ }
+
+ case strings.EqualFold("IgnorePublicAcls", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv, err := strconv.ParseBool(string(val))
+ if err != nil {
+ return fmt.Errorf("expected Setting to be of type *bool, got %T instead", val)
+ }
+ sv.IgnorePublicAcls = xtv
+ }
+
+ case strings.EqualFold("RestrictPublicBuckets", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv, err := strconv.ParseBool(string(val))
+ if err != nil {
+ return fmt.Errorf("expected Setting to be of type *bool, got %T instead", val)
+ }
+ sv.RestrictPublicBuckets = xtv
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentQueueConfiguration(v **types.QueueConfiguration, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *types.QueueConfiguration
+ if *v == nil {
+ sv = &types.QueueConfiguration{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("Event", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentEventListUnwrapped(&sv.Events, nodeDecoder); err != nil {
+ return err
+ }
+
+ case strings.EqualFold("Filter", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentNotificationConfigurationFilter(&sv.Filter, nodeDecoder); err != nil {
+ return err
+ }
+
+ case strings.EqualFold("Id", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.Id = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("Queue", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.QueueArn = ptr.String(xtv)
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentQueueConfigurationList(v *[]types.QueueConfiguration, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv []types.QueueConfiguration
+ if *v == nil {
+ sv = make([]types.QueueConfiguration, 0)
+ } else {
+ sv = *v
+ }
+
+ originalDecoder := decoder
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ switch {
+ case strings.EqualFold("member", t.Name.Local):
+ var col types.QueueConfiguration
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ destAddr := &col
+ if err := awsRestxml_deserializeDocumentQueueConfiguration(&destAddr, nodeDecoder); err != nil {
+ return err
+ }
+ col = *destAddr
+ sv = append(sv, col)
+
+ default:
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentQueueConfigurationListUnwrapped(v *[]types.QueueConfiguration, decoder smithyxml.NodeDecoder) error {
+ var sv []types.QueueConfiguration
+ if *v == nil {
+ sv = make([]types.QueueConfiguration, 0)
+ } else {
+ sv = *v
+ }
+
+ switch {
+ default:
+ var mv types.QueueConfiguration
+ t := decoder.StartEl
+ _ = t
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ destAddr := &mv
+ if err := awsRestxml_deserializeDocumentQueueConfiguration(&destAddr, nodeDecoder); err != nil {
+ return err
+ }
+ mv = *destAddr
+ sv = append(sv, mv)
+ }
+ *v = sv
+ return nil
+}
+func awsRestxml_deserializeDocumentRedirect(v **types.Redirect, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *types.Redirect
+ if *v == nil {
+ sv = &types.Redirect{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("HostName", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.HostName = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("HttpRedirectCode", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.HttpRedirectCode = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("Protocol", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.Protocol = types.Protocol(xtv)
+ }
+
+ case strings.EqualFold("ReplaceKeyPrefixWith", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.ReplaceKeyPrefixWith = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("ReplaceKeyWith", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.ReplaceKeyWith = ptr.String(xtv)
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentRedirectAllRequestsTo(v **types.RedirectAllRequestsTo, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *types.RedirectAllRequestsTo
+ if *v == nil {
+ sv = &types.RedirectAllRequestsTo{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("HostName", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.HostName = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("Protocol", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.Protocol = types.Protocol(xtv)
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentReplicaModifications(v **types.ReplicaModifications, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *types.ReplicaModifications
+ if *v == nil {
+ sv = &types.ReplicaModifications{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("Status", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.Status = types.ReplicaModificationsStatus(xtv)
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentReplicationConfiguration(v **types.ReplicationConfiguration, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *types.ReplicationConfiguration
+ if *v == nil {
+ sv = &types.ReplicationConfiguration{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("Role", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.Role = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("Rule", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentReplicationRulesUnwrapped(&sv.Rules, nodeDecoder); err != nil {
+ return err
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentReplicationRule(v **types.ReplicationRule, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *types.ReplicationRule
+ if *v == nil {
+ sv = &types.ReplicationRule{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("DeleteMarkerReplication", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentDeleteMarkerReplication(&sv.DeleteMarkerReplication, nodeDecoder); err != nil {
+ return err
+ }
+
+ case strings.EqualFold("Destination", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentDestination(&sv.Destination, nodeDecoder); err != nil {
+ return err
+ }
+
+ case strings.EqualFold("ExistingObjectReplication", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentExistingObjectReplication(&sv.ExistingObjectReplication, nodeDecoder); err != nil {
+ return err
+ }
+
+ case strings.EqualFold("Filter", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentReplicationRuleFilter(&sv.Filter, nodeDecoder); err != nil {
+ return err
+ }
+
+ case strings.EqualFold("ID", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.ID = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("Prefix", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.Prefix = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("Priority", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ i64, err := strconv.ParseInt(xtv, 10, 64)
+ if err != nil {
+ return err
+ }
+ sv.Priority = int32(i64)
+ }
+
+ case strings.EqualFold("SourceSelectionCriteria", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentSourceSelectionCriteria(&sv.SourceSelectionCriteria, nodeDecoder); err != nil {
+ return err
+ }
+
+ case strings.EqualFold("Status", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.Status = types.ReplicationRuleStatus(xtv)
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentReplicationRuleAndOperator(v **types.ReplicationRuleAndOperator, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *types.ReplicationRuleAndOperator
+ if *v == nil {
+ sv = &types.ReplicationRuleAndOperator{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("Prefix", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.Prefix = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("Tag", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentTagSetUnwrapped(&sv.Tags, nodeDecoder); err != nil {
+ return err
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentReplicationRuleFilter(v *types.ReplicationRuleFilter, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var uv types.ReplicationRuleFilter
+ var memberFound bool
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ if memberFound {
+ if err = decoder.Decoder.Skip(); err != nil {
+ return err
+ }
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("And", t.Name.Local):
+ var mv types.ReplicationRuleAndOperator
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ destAddr := &mv
+ if err := awsRestxml_deserializeDocumentReplicationRuleAndOperator(&destAddr, nodeDecoder); err != nil {
+ return err
+ }
+ mv = *destAddr
+ uv = &types.ReplicationRuleFilterMemberAnd{Value: mv}
+ memberFound = true
+
+ case strings.EqualFold("Prefix", t.Name.Local):
+ var mv string
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ mv = xtv
+ }
+ uv = &types.ReplicationRuleFilterMemberPrefix{Value: mv}
+ memberFound = true
+
+ case strings.EqualFold("Tag", t.Name.Local):
+ var mv types.Tag
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ destAddr := &mv
+ if err := awsRestxml_deserializeDocumentTag(&destAddr, nodeDecoder); err != nil {
+ return err
+ }
+ mv = *destAddr
+ uv = &types.ReplicationRuleFilterMemberTag{Value: mv}
+ memberFound = true
+
+ default:
+ uv = &types.UnknownUnionMember{Tag: t.Name.Local}
+ memberFound = true
+
+ }
+ decoder = originalDecoder
+ }
+ *v = uv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentReplicationRules(v *[]types.ReplicationRule, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv []types.ReplicationRule
+ if *v == nil {
+ sv = make([]types.ReplicationRule, 0)
+ } else {
+ sv = *v
+ }
+
+ originalDecoder := decoder
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ switch {
+ case strings.EqualFold("member", t.Name.Local):
+ var col types.ReplicationRule
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ destAddr := &col
+ if err := awsRestxml_deserializeDocumentReplicationRule(&destAddr, nodeDecoder); err != nil {
+ return err
+ }
+ col = *destAddr
+ sv = append(sv, col)
+
+ default:
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentReplicationRulesUnwrapped(v *[]types.ReplicationRule, decoder smithyxml.NodeDecoder) error {
+ var sv []types.ReplicationRule
+ if *v == nil {
+ sv = make([]types.ReplicationRule, 0)
+ } else {
+ sv = *v
+ }
+
+ switch {
+ default:
+ var mv types.ReplicationRule
+ t := decoder.StartEl
+ _ = t
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ destAddr := &mv
+ if err := awsRestxml_deserializeDocumentReplicationRule(&destAddr, nodeDecoder); err != nil {
+ return err
+ }
+ mv = *destAddr
+ sv = append(sv, mv)
+ }
+ *v = sv
+ return nil
+}
+func awsRestxml_deserializeDocumentReplicationTime(v **types.ReplicationTime, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *types.ReplicationTime
+ if *v == nil {
+ sv = &types.ReplicationTime{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("Status", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.Status = types.ReplicationTimeStatus(xtv)
+ }
+
+ case strings.EqualFold("Time", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentReplicationTimeValue(&sv.Time, nodeDecoder); err != nil {
+ return err
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentReplicationTimeValue(v **types.ReplicationTimeValue, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *types.ReplicationTimeValue
+ if *v == nil {
+ sv = &types.ReplicationTimeValue{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("Minutes", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ i64, err := strconv.ParseInt(xtv, 10, 64)
+ if err != nil {
+ return err
+ }
+ sv.Minutes = int32(i64)
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentRoutingRule(v **types.RoutingRule, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *types.RoutingRule
+ if *v == nil {
+ sv = &types.RoutingRule{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("Condition", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentCondition(&sv.Condition, nodeDecoder); err != nil {
+ return err
+ }
+
+ case strings.EqualFold("Redirect", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentRedirect(&sv.Redirect, nodeDecoder); err != nil {
+ return err
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentRoutingRules(v *[]types.RoutingRule, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv []types.RoutingRule
+ if *v == nil {
+ sv = make([]types.RoutingRule, 0)
+ } else {
+ sv = *v
+ }
+
+ originalDecoder := decoder
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ switch {
+ case strings.EqualFold("RoutingRule", t.Name.Local):
+ var col types.RoutingRule
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ destAddr := &col
+ if err := awsRestxml_deserializeDocumentRoutingRule(&destAddr, nodeDecoder); err != nil {
+ return err
+ }
+ col = *destAddr
+ sv = append(sv, col)
+
+ default:
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentRoutingRulesUnwrapped(v *[]types.RoutingRule, decoder smithyxml.NodeDecoder) error {
+ var sv []types.RoutingRule
+ if *v == nil {
+ sv = make([]types.RoutingRule, 0)
+ } else {
+ sv = *v
+ }
+
+ switch {
+ default:
+ var mv types.RoutingRule
+ t := decoder.StartEl
+ _ = t
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ destAddr := &mv
+ if err := awsRestxml_deserializeDocumentRoutingRule(&destAddr, nodeDecoder); err != nil {
+ return err
+ }
+ mv = *destAddr
+ sv = append(sv, mv)
+ }
+ *v = sv
+ return nil
+}
+func awsRestxml_deserializeDocumentS3KeyFilter(v **types.S3KeyFilter, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *types.S3KeyFilter
+ if *v == nil {
+ sv = &types.S3KeyFilter{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("FilterRule", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentFilterRuleListUnwrapped(&sv.FilterRules, nodeDecoder); err != nil {
+ return err
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentServerSideEncryptionByDefault(v **types.ServerSideEncryptionByDefault, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *types.ServerSideEncryptionByDefault
+ if *v == nil {
+ sv = &types.ServerSideEncryptionByDefault{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("KMSMasterKeyID", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.KMSMasterKeyID = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("SSEAlgorithm", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.SSEAlgorithm = types.ServerSideEncryption(xtv)
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentServerSideEncryptionConfiguration(v **types.ServerSideEncryptionConfiguration, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *types.ServerSideEncryptionConfiguration
+ if *v == nil {
+ sv = &types.ServerSideEncryptionConfiguration{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("Rule", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentServerSideEncryptionRulesUnwrapped(&sv.Rules, nodeDecoder); err != nil {
+ return err
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentServerSideEncryptionRule(v **types.ServerSideEncryptionRule, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *types.ServerSideEncryptionRule
+ if *v == nil {
+ sv = &types.ServerSideEncryptionRule{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("ApplyServerSideEncryptionByDefault", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentServerSideEncryptionByDefault(&sv.ApplyServerSideEncryptionByDefault, nodeDecoder); err != nil {
+ return err
+ }
+
+ case strings.EqualFold("BucketKeyEnabled", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv, err := strconv.ParseBool(string(val))
+ if err != nil {
+ return fmt.Errorf("expected BucketKeyEnabled to be of type *bool, got %T instead", val)
+ }
+ sv.BucketKeyEnabled = xtv
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentServerSideEncryptionRules(v *[]types.ServerSideEncryptionRule, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv []types.ServerSideEncryptionRule
+ if *v == nil {
+ sv = make([]types.ServerSideEncryptionRule, 0)
+ } else {
+ sv = *v
+ }
+
+ originalDecoder := decoder
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ switch {
+ case strings.EqualFold("member", t.Name.Local):
+ var col types.ServerSideEncryptionRule
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ destAddr := &col
+ if err := awsRestxml_deserializeDocumentServerSideEncryptionRule(&destAddr, nodeDecoder); err != nil {
+ return err
+ }
+ col = *destAddr
+ sv = append(sv, col)
+
+ default:
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentServerSideEncryptionRulesUnwrapped(v *[]types.ServerSideEncryptionRule, decoder smithyxml.NodeDecoder) error {
+ var sv []types.ServerSideEncryptionRule
+ if *v == nil {
+ sv = make([]types.ServerSideEncryptionRule, 0)
+ } else {
+ sv = *v
+ }
+
+ switch {
+ default:
+ var mv types.ServerSideEncryptionRule
+ t := decoder.StartEl
+ _ = t
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ destAddr := &mv
+ if err := awsRestxml_deserializeDocumentServerSideEncryptionRule(&destAddr, nodeDecoder); err != nil {
+ return err
+ }
+ mv = *destAddr
+ sv = append(sv, mv)
+ }
+ *v = sv
+ return nil
+}
+func awsRestxml_deserializeDocumentSourceSelectionCriteria(v **types.SourceSelectionCriteria, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *types.SourceSelectionCriteria
+ if *v == nil {
+ sv = &types.SourceSelectionCriteria{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("ReplicaModifications", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentReplicaModifications(&sv.ReplicaModifications, nodeDecoder); err != nil {
+ return err
+ }
+
+ case strings.EqualFold("SseKmsEncryptedObjects", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentSseKmsEncryptedObjects(&sv.SseKmsEncryptedObjects, nodeDecoder); err != nil {
+ return err
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentSSEKMS(v **types.SSEKMS, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *types.SSEKMS
+ if *v == nil {
+ sv = &types.SSEKMS{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("KeyId", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.KeyId = ptr.String(xtv)
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentSseKmsEncryptedObjects(v **types.SseKmsEncryptedObjects, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *types.SseKmsEncryptedObjects
+ if *v == nil {
+ sv = &types.SseKmsEncryptedObjects{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("Status", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.Status = types.SseKmsEncryptedObjectsStatus(xtv)
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentSSES3(v **types.SSES3, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *types.SSES3
+ if *v == nil {
+ sv = &types.SSES3{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentStorageClassAnalysis(v **types.StorageClassAnalysis, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *types.StorageClassAnalysis
+ if *v == nil {
+ sv = &types.StorageClassAnalysis{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("DataExport", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentStorageClassAnalysisDataExport(&sv.DataExport, nodeDecoder); err != nil {
+ return err
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentStorageClassAnalysisDataExport(v **types.StorageClassAnalysisDataExport, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *types.StorageClassAnalysisDataExport
+ if *v == nil {
+ sv = &types.StorageClassAnalysisDataExport{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("Destination", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentAnalyticsExportDestination(&sv.Destination, nodeDecoder); err != nil {
+ return err
+ }
+
+ case strings.EqualFold("OutputSchemaVersion", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.OutputSchemaVersion = types.StorageClassAnalysisSchemaVersion(xtv)
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentTag(v **types.Tag, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *types.Tag
+ if *v == nil {
+ sv = &types.Tag{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("Key", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.Key = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("Value", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.Value = ptr.String(xtv)
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentTagSet(v *[]types.Tag, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv []types.Tag
+ if *v == nil {
+ sv = make([]types.Tag, 0)
+ } else {
+ sv = *v
+ }
+
+ originalDecoder := decoder
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ switch {
+ case strings.EqualFold("Tag", t.Name.Local):
+ var col types.Tag
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ destAddr := &col
+ if err := awsRestxml_deserializeDocumentTag(&destAddr, nodeDecoder); err != nil {
+ return err
+ }
+ col = *destAddr
+ sv = append(sv, col)
+
+ default:
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentTagSetUnwrapped(v *[]types.Tag, decoder smithyxml.NodeDecoder) error {
+ var sv []types.Tag
+ if *v == nil {
+ sv = make([]types.Tag, 0)
+ } else {
+ sv = *v
+ }
+
+ switch {
+ default:
+ var mv types.Tag
+ t := decoder.StartEl
+ _ = t
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ destAddr := &mv
+ if err := awsRestxml_deserializeDocumentTag(&destAddr, nodeDecoder); err != nil {
+ return err
+ }
+ mv = *destAddr
+ sv = append(sv, mv)
+ }
+ *v = sv
+ return nil
+}
+func awsRestxml_deserializeDocumentTargetGrant(v **types.TargetGrant, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *types.TargetGrant
+ if *v == nil {
+ sv = &types.TargetGrant{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("Grantee", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentGrantee(&sv.Grantee, nodeDecoder); err != nil {
+ return err
+ }
+
+ case strings.EqualFold("Permission", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.Permission = types.BucketLogsPermission(xtv)
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentTargetGrants(v *[]types.TargetGrant, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv []types.TargetGrant
+ if *v == nil {
+ sv = make([]types.TargetGrant, 0)
+ } else {
+ sv = *v
+ }
+
+ originalDecoder := decoder
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ switch {
+ case strings.EqualFold("Grant", t.Name.Local):
+ var col types.TargetGrant
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ destAddr := &col
+ if err := awsRestxml_deserializeDocumentTargetGrant(&destAddr, nodeDecoder); err != nil {
+ return err
+ }
+ col = *destAddr
+ sv = append(sv, col)
+
+ default:
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentTargetGrantsUnwrapped(v *[]types.TargetGrant, decoder smithyxml.NodeDecoder) error {
+ var sv []types.TargetGrant
+ if *v == nil {
+ sv = make([]types.TargetGrant, 0)
+ } else {
+ sv = *v
+ }
+
+ switch {
+ default:
+ var mv types.TargetGrant
+ t := decoder.StartEl
+ _ = t
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ destAddr := &mv
+ if err := awsRestxml_deserializeDocumentTargetGrant(&destAddr, nodeDecoder); err != nil {
+ return err
+ }
+ mv = *destAddr
+ sv = append(sv, mv)
+ }
+ *v = sv
+ return nil
+}
+func awsRestxml_deserializeDocumentTiering(v **types.Tiering, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *types.Tiering
+ if *v == nil {
+ sv = &types.Tiering{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("AccessTier", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.AccessTier = types.IntelligentTieringAccessTier(xtv)
+ }
+
+ case strings.EqualFold("Days", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ i64, err := strconv.ParseInt(xtv, 10, 64)
+ if err != nil {
+ return err
+ }
+ sv.Days = int32(i64)
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentTieringList(v *[]types.Tiering, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv []types.Tiering
+ if *v == nil {
+ sv = make([]types.Tiering, 0)
+ } else {
+ sv = *v
+ }
+
+ originalDecoder := decoder
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ switch {
+ case strings.EqualFold("member", t.Name.Local):
+ var col types.Tiering
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ destAddr := &col
+ if err := awsRestxml_deserializeDocumentTiering(&destAddr, nodeDecoder); err != nil {
+ return err
+ }
+ col = *destAddr
+ sv = append(sv, col)
+
+ default:
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentTieringListUnwrapped(v *[]types.Tiering, decoder smithyxml.NodeDecoder) error {
+ var sv []types.Tiering
+ if *v == nil {
+ sv = make([]types.Tiering, 0)
+ } else {
+ sv = *v
+ }
+
+ switch {
+ default:
+ var mv types.Tiering
+ t := decoder.StartEl
+ _ = t
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ destAddr := &mv
+ if err := awsRestxml_deserializeDocumentTiering(&destAddr, nodeDecoder); err != nil {
+ return err
+ }
+ mv = *destAddr
+ sv = append(sv, mv)
+ }
+ *v = sv
+ return nil
+}
+func awsRestxml_deserializeDocumentTopicConfiguration(v **types.TopicConfiguration, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *types.TopicConfiguration
+ if *v == nil {
+ sv = &types.TopicConfiguration{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("Event", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentEventListUnwrapped(&sv.Events, nodeDecoder); err != nil {
+ return err
+ }
+
+ case strings.EqualFold("Filter", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentNotificationConfigurationFilter(&sv.Filter, nodeDecoder); err != nil {
+ return err
+ }
+
+ case strings.EqualFold("Id", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.Id = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("Topic", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.TopicArn = ptr.String(xtv)
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentTopicConfigurationList(v *[]types.TopicConfiguration, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv []types.TopicConfiguration
+ if *v == nil {
+ sv = make([]types.TopicConfiguration, 0)
+ } else {
+ sv = *v
+ }
+
+ originalDecoder := decoder
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ switch {
+ case strings.EqualFold("member", t.Name.Local):
+ var col types.TopicConfiguration
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ destAddr := &col
+ if err := awsRestxml_deserializeDocumentTopicConfiguration(&destAddr, nodeDecoder); err != nil {
+ return err
+ }
+ col = *destAddr
+ sv = append(sv, col)
+
+ default:
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentTopicConfigurationListUnwrapped(v *[]types.TopicConfiguration, decoder smithyxml.NodeDecoder) error {
+ var sv []types.TopicConfiguration
+ if *v == nil {
+ sv = make([]types.TopicConfiguration, 0)
+ } else {
+ sv = *v
+ }
+
+ switch {
+ default:
+ var mv types.TopicConfiguration
+ t := decoder.StartEl
+ _ = t
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ destAddr := &mv
+ if err := awsRestxml_deserializeDocumentTopicConfiguration(&destAddr, nodeDecoder); err != nil {
+ return err
+ }
+ mv = *destAddr
+ sv = append(sv, mv)
+ }
+ *v = sv
+ return nil
+}
+func awsRestxml_deserializeDocumentTransition(v **types.Transition, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *types.Transition
+ if *v == nil {
+ sv = &types.Transition{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("Date", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ t, err := smithytime.ParseDateTime(xtv)
+ if err != nil {
+ return err
+ }
+ sv.Date = ptr.Time(t)
+ }
+
+ case strings.EqualFold("Days", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ i64, err := strconv.ParseInt(xtv, 10, 64)
+ if err != nil {
+ return err
+ }
+ sv.Days = int32(i64)
+ }
+
+ case strings.EqualFold("StorageClass", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.StorageClass = types.TransitionStorageClass(xtv)
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentTransitionList(v *[]types.Transition, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv []types.Transition
+ if *v == nil {
+ sv = make([]types.Transition, 0)
+ } else {
+ sv = *v
+ }
+
+ originalDecoder := decoder
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ switch {
+ case strings.EqualFold("member", t.Name.Local):
+ var col types.Transition
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ destAddr := &col
+ if err := awsRestxml_deserializeDocumentTransition(&destAddr, nodeDecoder); err != nil {
+ return err
+ }
+ col = *destAddr
+ sv = append(sv, col)
+
+ default:
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentTransitionListUnwrapped(v *[]types.Transition, decoder smithyxml.NodeDecoder) error {
+ var sv []types.Transition
+ if *v == nil {
+ sv = make([]types.Transition, 0)
+ } else {
+ sv = *v
+ }
+
+ switch {
+ default:
+ var mv types.Transition
+ t := decoder.StartEl
+ _ = t
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ destAddr := &mv
+ if err := awsRestxml_deserializeDocumentTransition(&destAddr, nodeDecoder); err != nil {
+ return err
+ }
+ mv = *destAddr
+ sv = append(sv, mv)
+ }
+ *v = sv
+ return nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/doc.go
new file mode 100644
index 000000000..ce3203be5
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/doc.go
@@ -0,0 +1,7 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+// Package s3 provides the API client, operations, and parameter types for Amazon
+// Simple Storage Service.
+//
+//
+package s3
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/endpoints.go
new file mode 100644
index 000000000..4652db06f
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/endpoints.go
@@ -0,0 +1,160 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package s3
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "github.com/aws/aws-sdk-go-v2/aws"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ internalendpoints "github.com/aws/aws-sdk-go-v2/service/s3/internal/endpoints"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+ "net/url"
+)
+
+// EndpointResolverOptions is the service endpoint resolver options
+type EndpointResolverOptions = internalendpoints.Options
+
+// EndpointResolver interface for resolving service endpoints.
+type EndpointResolver interface {
+ ResolveEndpoint(region string, options EndpointResolverOptions) (aws.Endpoint, error)
+}
+
+var _ EndpointResolver = &internalendpoints.Resolver{}
+
+// NewDefaultEndpointResolver constructs a new service endpoint resolver
+func NewDefaultEndpointResolver() *internalendpoints.Resolver {
+ return internalendpoints.New()
+}
+
+// EndpointResolverFunc is a helper utility that wraps a function so it satisfies
+// the EndpointResolver interface. This is useful when you want to add additional
+// endpoint resolving logic, or stub out specific endpoints with custom values.
+type EndpointResolverFunc func(region string, options EndpointResolverOptions) (aws.Endpoint, error)
+
+func (fn EndpointResolverFunc) ResolveEndpoint(region string, options EndpointResolverOptions) (endpoint aws.Endpoint, err error) {
+ return fn(region, options)
+}
+
+func resolveDefaultEndpointConfiguration(o *Options) {
+ if o.EndpointResolver != nil {
+ return
+ }
+ o.EndpointResolver = NewDefaultEndpointResolver()
+}
+
+// EndpointResolverFromURL returns an EndpointResolver configured using the
+// provided endpoint url. By default, the resolved endpoint resolver uses the
+// client region as signing region, and the endpoint source is set to
+// EndpointSourceCustom.You can provide functional options to configure endpoint
+// values for the resolved endpoint.
+func EndpointResolverFromURL(url string, optFns ...func(*aws.Endpoint)) EndpointResolver {
+ e := aws.Endpoint{URL: url, Source: aws.EndpointSourceCustom}
+ for _, fn := range optFns {
+ fn(&e)
+ }
+
+ return EndpointResolverFunc(
+ func(region string, options EndpointResolverOptions) (aws.Endpoint, error) {
+ if len(e.SigningRegion) == 0 {
+ e.SigningRegion = region
+ }
+ return e, nil
+ },
+ )
+}
+
+type ResolveEndpoint struct {
+ Resolver EndpointResolver
+ Options EndpointResolverOptions
+}
+
+func (*ResolveEndpoint) ID() string {
+ return "ResolveEndpoint"
+}
+
+func (m *ResolveEndpoint) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ req, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown transport type %T", in.Request)
+ }
+
+ if m.Resolver == nil {
+ return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil")
+ }
+
+ var endpoint aws.Endpoint
+ endpoint, err = m.Resolver.ResolveEndpoint(awsmiddleware.GetRegion(ctx), m.Options)
+ if err != nil {
+ return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err)
+ }
+
+ req.URL, err = url.Parse(endpoint.URL)
+ if err != nil {
+ return out, metadata, fmt.Errorf("failed to parse endpoint URL: %w", err)
+ }
+
+ if len(awsmiddleware.GetSigningName(ctx)) == 0 {
+ signingName := endpoint.SigningName
+ if len(signingName) == 0 {
+ signingName = "s3"
+ }
+ ctx = awsmiddleware.SetSigningName(ctx, signingName)
+ }
+ ctx = awsmiddleware.SetEndpointSource(ctx, endpoint.Source)
+ ctx = smithyhttp.SetHostnameImmutable(ctx, endpoint.HostnameImmutable)
+ ctx = awsmiddleware.SetSigningRegion(ctx, endpoint.SigningRegion)
+ ctx = awsmiddleware.SetPartitionID(ctx, endpoint.PartitionID)
+ return next.HandleSerialize(ctx, in)
+}
+func addResolveEndpointMiddleware(stack *middleware.Stack, o Options) error {
+ return stack.Serialize.Insert(&ResolveEndpoint{
+ Resolver: o.EndpointResolver,
+ Options: o.EndpointOptions,
+ }, "OperationSerializer", middleware.Before)
+}
+
+func removeResolveEndpointMiddleware(stack *middleware.Stack) error {
+ _, err := stack.Serialize.Remove((&ResolveEndpoint{}).ID())
+ return err
+}
+
+type wrappedEndpointResolver struct {
+ awsResolver aws.EndpointResolver
+ resolver EndpointResolver
+}
+
+func (w *wrappedEndpointResolver) ResolveEndpoint(region string, options EndpointResolverOptions) (endpoint aws.Endpoint, err error) {
+ if w.awsResolver == nil {
+ goto fallback
+ }
+ endpoint, err = w.awsResolver.ResolveEndpoint(ServiceID, region)
+ if err == nil {
+ return endpoint, nil
+ }
+
+ if nf := (&aws.EndpointNotFoundError{}); !errors.As(err, &nf) {
+ return endpoint, err
+ }
+
+fallback:
+ if w.resolver == nil {
+ return endpoint, fmt.Errorf("default endpoint resolver provided was nil")
+ }
+ return w.resolver.ResolveEndpoint(region, options)
+}
+
+// withEndpointResolver returns an EndpointResolver that first delegates endpoint
+// resolution to the awsResolver. If awsResolver returns aws.EndpointNotFoundError
+// error, the resolver will use the the provided fallbackResolver for resolution.
+// awsResolver and fallbackResolver must not be nil
+func withEndpointResolver(awsResolver aws.EndpointResolver, fallbackResolver EndpointResolver) EndpointResolver {
+ return &wrappedEndpointResolver{
+ awsResolver: awsResolver,
+ resolver: fallbackResolver,
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/go.mod b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/go.mod
new file mode 100644
index 000000000..2dcb322ed
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/go.mod
@@ -0,0 +1,20 @@
+module github.com/aws/aws-sdk-go-v2/service/s3
+
+go 1.15
+
+require (
+ github.com/aws/aws-sdk-go-v2 v1.2.1
+ github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.0.2
+ github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.0.3
+ github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.1.1
+ github.com/aws/smithy-go v1.2.0
+ github.com/google/go-cmp v0.5.4
+)
+
+replace github.com/aws/aws-sdk-go-v2 => ../../
+
+replace github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding => ../../service/internal/accept-encoding/
+
+replace github.com/aws/aws-sdk-go-v2/service/internal/presigned-url => ../../service/internal/presigned-url/
+
+replace github.com/aws/aws-sdk-go-v2/service/internal/s3shared => ../../service/internal/s3shared/
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/go.sum b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/go.sum
new file mode 100644
index 000000000..c3783ae60
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/go.sum
@@ -0,0 +1,13 @@
+github.com/aws/smithy-go v1.2.0 h1:0PoGBWXkXDIyVdPaZW9gMhaGzj3UOAgTdiVoHuuZAFA=
+github.com/aws/smithy-go v1.2.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAmR5n+E=
+github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/google/go-cmp v0.5.4 h1:L8R9j+yAqZuZjsqh/z+F1NCffTKKLShY6zXTItVIZ8M=
+github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
+github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U=
+github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4=
+golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/arn/arn_parser.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/arn/arn_parser.go
new file mode 100644
index 000000000..dfe5c70f5
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/arn/arn_parser.go
@@ -0,0 +1,70 @@
+package arn
+
+import (
+ "strings"
+
+ awsarn "github.com/aws/aws-sdk-go-v2/aws/arn"
+ "github.com/aws/aws-sdk-go-v2/service/internal/s3shared/arn"
+)
+
+// ParseEndpointARN parses a given generic aws ARN into a s3 arn resource.
+func ParseEndpointARN(v awsarn.ARN) (arn.Resource, error) {
+ return arn.ParseResource(v, accessPointResourceParser)
+}
+
+func accessPointResourceParser(a awsarn.ARN) (arn.Resource, error) {
+ resParts := arn.SplitResource(a.Resource)
+ switch resParts[0] {
+ case "accesspoint":
+ if a.Service != "s3" {
+ return arn.AccessPointARN{}, arn.InvalidARNError{ARN: a, Reason: "service is not s3"}
+ }
+ return arn.ParseAccessPointResource(a, resParts[1:])
+ case "outpost":
+ if a.Service != "s3-outposts" {
+ return arn.OutpostAccessPointARN{}, arn.InvalidARNError{ARN: a, Reason: "service is not s3-outposts"}
+ }
+ return parseOutpostAccessPointResource(a, resParts[1:])
+ default:
+ return nil, arn.InvalidARNError{ARN: a, Reason: "unknown resource type"}
+ }
+}
+
+func parseOutpostAccessPointResource(a awsarn.ARN, resParts []string) (arn.OutpostAccessPointARN, error) {
+ // outpost accesspoint arn is only valid if service is s3-outposts
+ if a.Service != "s3-outposts" {
+ return arn.OutpostAccessPointARN{}, arn.InvalidARNError{ARN: a, Reason: "service is not s3-outposts"}
+ }
+
+ if len(resParts) == 0 {
+ return arn.OutpostAccessPointARN{}, arn.InvalidARNError{ARN: a, Reason: "outpost resource-id not set"}
+ }
+
+ if len(resParts) < 3 {
+ return arn.OutpostAccessPointARN{}, arn.InvalidARNError{
+ ARN: a, Reason: "access-point resource not set in Outpost ARN",
+ }
+ }
+
+ resID := strings.TrimSpace(resParts[0])
+ if len(resID) == 0 {
+ return arn.OutpostAccessPointARN{}, arn.InvalidARNError{ARN: a, Reason: "outpost resource-id not set"}
+ }
+
+ var outpostAccessPointARN = arn.OutpostAccessPointARN{}
+ switch resParts[1] {
+ case "accesspoint":
+ accessPointARN, err := arn.ParseAccessPointResource(a, resParts[2:])
+ if err != nil {
+ return arn.OutpostAccessPointARN{}, err
+ }
+ // set access-point arn
+ outpostAccessPointARN.AccessPointARN = accessPointARN
+ default:
+ return arn.OutpostAccessPointARN{}, arn.InvalidARNError{ARN: a, Reason: "access-point resource not set in Outpost ARN"}
+ }
+
+ // set outpost id
+ outpostAccessPointARN.OutpostID = resID
+ return outpostAccessPointARN, nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/doc.go
new file mode 100644
index 000000000..2502b39fa
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/doc.go
@@ -0,0 +1,87 @@
+/*
+Package customizations provides customizations for the Amazon S3 API client.
+
+This package provides support for following S3 customizations
+
+ ProcessARN Middleware: processes an ARN if provided as input and updates the endpoint as per the arn type
+
+ UpdateEndpoint Middleware: resolves a custom endpoint as per s3 config options
+
+ RemoveBucket Middleware: removes a serialized bucket name from request url path
+
+ processResponseWith200Error Middleware: Deserializing response error with 200 status code
+
+
+Virtual Host style url addressing
+
+Since serializers serialize by default as path style url, we use customization
+to modify the endpoint url when `UsePathStyle` option on S3Client is unset or
+false. This flag will be ignored if `UseAccelerate` option is set to true.
+
+If UseAccelerate is not enabled, and the bucket name is not a valid hostname
+label, they SDK will fallback to forcing the request to be made as if
+UsePathStyle was enabled. This behavior is also used if UseDualStack is enabled.
+
+https://docs.aws.amazon.com/AmazonS3/latest/dev/dual-stack-endpoints.html#dual-stack-endpoints-description
+
+
+Transfer acceleration
+
+By default S3 Transfer acceleration support is disabled. By enabling `UseAccelerate`
+option on S3Client, one can enable s3 transfer acceleration support. Transfer
+acceleration only works with Virtual Host style addressing, and thus `UsePathStyle`
+option if set is ignored. Transfer acceleration is not supported for S3 operations
+DeleteBucket, ListBuckets, and CreateBucket.
+
+
+Dualstack support
+
+By default dualstack support for s3 client is disabled. By enabling `UseDualstack`
+option on s3 client, you can enable dualstack endpoint support.
+
+
+Endpoint customizations
+
+
+Customizations to lookup ARN, process ARN needs to happen before request serialization.
+UpdateEndpoint middleware which mutates resources based on Options such as
+UseDualstack, UseAccelerate for modifying resolved endpoint are executed after
+request serialization. Remove bucket middleware is executed after
+an request is serialized, and removes the serialized bucket name from request path
+
+ Middleware layering:
+
+
+ Initialize : HTTP Request -> ARN Lookup -> Input-Validation -> Serialize step
+
+ Serialize : HTTP Request -> Process ARN -> operation serializer -> Update-Endpoint customization -> Remove-Bucket -> next middleware
+
+
+Customization options:
+ UseARNRegion (Disabled by Default)
+
+ UsePathStyle (Disabled by Default)
+
+ UseAccelerate (Disabled by Default)
+
+ UseDualstack (Disabled by Default)
+
+
+Handle Error response with 200 status code
+
+S3 operations: CopyObject, CompleteMultipartUpload, UploadPartCopy can have an
+error Response with status code 2xx. The processResponseWith200Error middleware
+customizations enables SDK to check for an error within response body prior to
+deserialization.
+
+As the check for 2xx response containing an error needs to be performed earlier
+than response deserialization. Since the behavior of Deserialization is in
+reverse order to the other stack steps its easier to consider that "after" means
+"before".
+
+ Middleware layering:
+
+ HTTP Response -> handle 200 error customization -> deserialize
+
+*/
+package customizations
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/handle_200_error.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/handle_200_error.go
new file mode 100644
index 000000000..2b11b1fa2
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/handle_200_error.go
@@ -0,0 +1,74 @@
+package customizations
+
+import (
+ "bytes"
+ "context"
+ "encoding/xml"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "strings"
+
+ "github.com/aws/smithy-go"
+ smithyxml "github.com/aws/smithy-go/encoding/xml"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// HandleResponseErrorWith200Status check for S3 200 error response.
+// If an s3 200 error is found, status code for the response is modified temporarily to
+// 5xx response status code.
+func HandleResponseErrorWith200Status(stack *middleware.Stack) error {
+ return stack.Deserialize.Insert(&processResponseFor200ErrorMiddleware{}, "OperationDeserializer", middleware.After)
+}
+
+// middleware to process raw response and look for error response with 200 status code
+type processResponseFor200ErrorMiddleware struct{}
+
+// ID returns the middleware ID.
+func (*processResponseFor200ErrorMiddleware) ID() string {
+ return "S3:ProcessResponseFor200Error"
+}
+
+func (m *processResponseFor200ErrorMiddleware) HandleDeserialize(
+ ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ // check if response status code is 2xx.
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return
+ }
+
+ var readBuff bytes.Buffer
+ body := io.TeeReader(response.Body, &readBuff)
+
+ rootDecoder := xml.NewDecoder(body)
+ t, err := smithyxml.FetchRootElement(rootDecoder)
+ if err == io.EOF {
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("received empty response payload"),
+ }
+ }
+
+ // rewind response body
+ response.Body = ioutil.NopCloser(io.MultiReader(&readBuff, response.Body))
+
+ // if start tag is "Error", the response is consider error response.
+ if strings.EqualFold(t.Name.Local, "Error") {
+ // according to https://aws.amazon.com/premiumsupport/knowledge-center/s3-resolve-200-internalerror/
+ // 200 error responses are similar to 5xx errors.
+ response.StatusCode = 500
+ }
+
+ return out, metadata, err
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/presigned_expires.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/presigned_expires.go
new file mode 100644
index 000000000..f4bbb4b6d
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/presigned_expires.go
@@ -0,0 +1,49 @@
+package customizations
+
+import (
+ "context"
+ "fmt"
+ "strconv"
+ "time"
+
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// AddExpiresOnPresignedURL represents a build middleware used to assign
+// expiration on a presigned URL.
+type AddExpiresOnPresignedURL struct {
+
+ // Expires is time.Duration within which presigned url should be expired.
+ // This should be the duration in seconds the presigned URL should be considered valid for.
+ // By default the S3 presigned url expires in 15 minutes ie. 900 seconds.
+ Expires time.Duration
+}
+
+// ID representing the middleware
+func (*AddExpiresOnPresignedURL) ID() string {
+ return "S3:AddExpiresOnPresignedURL"
+}
+
+// HandleBuild handles the build step middleware behavior
+func (m *AddExpiresOnPresignedURL) HandleBuild(ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler) (
+ out middleware.BuildOutput, metadata middleware.Metadata, err error,
+) {
+ // if expiration is unset skip this middleware
+ if m.Expires == 0 {
+ // default to 15 * time.Minutes
+ m.Expires = 15 * time.Minute
+ }
+
+ req, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown transport type %T", req)
+ }
+
+ // set S3 X-AMZ-Expires header
+ query := req.URL.Query()
+ query.Set("X-Amz-Expires", strconv.FormatInt(int64(m.Expires/time.Second), 10))
+ req.URL.RawQuery = query.Encode()
+
+ return next.HandleBuild(ctx, in)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/process_arn_resource.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/process_arn_resource.go
new file mode 100644
index 000000000..9a40509a4
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/process_arn_resource.go
@@ -0,0 +1,348 @@
+package customizations
+
+import (
+ "context"
+ "fmt"
+ "net/url"
+ "strings"
+
+ "github.com/aws/smithy-go/middleware"
+ "github.com/aws/smithy-go/transport/http"
+
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/service/internal/s3shared"
+ "github.com/aws/aws-sdk-go-v2/service/internal/s3shared/arn"
+ s3arn "github.com/aws/aws-sdk-go-v2/service/s3/internal/arn"
+)
+
+// processARNResource is used to process an ARN resource.
+type processARNResource struct {
+
+ // UseARNRegion indicates if region parsed from an ARN should be used.
+ UseARNRegion bool
+
+ // UseAccelerate indicates if s3 transfer acceleration is enabled
+ UseAccelerate bool
+
+ // UseDualstack instructs if s3 dualstack endpoint config is enabled
+ UseDualstack bool
+
+ // EndpointResolver used to resolve endpoints. This may be a custom endpoint resolver
+ EndpointResolver EndpointResolver
+
+ // EndpointResolverOptions used by endpoint resolver
+ EndpointResolverOptions EndpointResolverOptions
+}
+
+// ID returns the middleware ID.
+func (*processARNResource) ID() string { return "S3:ProcessARNResource" }
+
+func (m *processARNResource) HandleSerialize(
+ ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler,
+) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ // check if arn was provided, if not skip this middleware
+ arnValue, ok := s3shared.GetARNResourceFromContext(ctx)
+ if !ok {
+ return next.HandleSerialize(ctx, in)
+ }
+
+ req, ok := in.Request.(*http.Request)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown request type %T", req)
+ }
+
+ // parse arn into an endpoint arn wrt to service
+ resource, err := s3arn.ParseEndpointARN(arnValue)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ // build a resource request struct
+ resourceRequest := s3shared.ResourceRequest{
+ Resource: resource,
+ UseARNRegion: m.UseARNRegion,
+ RequestRegion: awsmiddleware.GetRegion(ctx),
+ SigningRegion: awsmiddleware.GetSigningRegion(ctx),
+ PartitionID: awsmiddleware.GetPartitionID(ctx),
+ }
+
+ // validate resource request
+ if err := validateResourceRequest(resourceRequest); err != nil {
+ return out, metadata, err
+ }
+
+ // switch to correct endpoint updater
+ switch tv := resource.(type) {
+ case arn.AccessPointARN:
+ // check if accelerate
+ if m.UseAccelerate {
+ return out, metadata, s3shared.NewClientConfiguredForAccelerateError(tv,
+ resourceRequest.PartitionID, resourceRequest.RequestRegion, nil)
+ }
+
+ // fetch arn region to resolve request
+ resolveRegion := tv.Region
+ // check if request region is FIPS
+ if resourceRequest.UseFips() {
+ // if use arn region is enabled and request signing region is not same as arn region
+ if m.UseARNRegion && resourceRequest.IsCrossRegion() {
+ // FIPS with cross region is not supported, the SDK must fail
+ // because there is no well defined method for SDK to construct a
+ // correct FIPS endpoint.
+ return out, metadata,
+ s3shared.NewClientConfiguredForCrossRegionFIPSError(
+ tv,
+ resourceRequest.PartitionID,
+ resourceRequest.RequestRegion,
+ nil,
+ )
+ }
+
+ // if use arn region is NOT set, we should use the request region
+ resolveRegion = resourceRequest.RequestRegion
+ }
+
+ // build access point request
+ ctx, err = buildAccessPointRequest(ctx, accesspointOptions{
+ processARNResource: *m,
+ request: req,
+ resource: tv,
+ resolveRegion: resolveRegion,
+ partitionID: resourceRequest.PartitionID,
+ requestRegion: resourceRequest.RequestRegion,
+ })
+ if err != nil {
+ return out, metadata, err
+ }
+
+ // process outpost accesspoint ARN
+ case arn.OutpostAccessPointARN:
+ // check if accelerate
+ if m.UseAccelerate {
+ return out, metadata, s3shared.NewClientConfiguredForAccelerateError(tv,
+ resourceRequest.PartitionID, resourceRequest.RequestRegion, nil)
+ }
+
+ // check if dual stack
+ if m.UseDualstack {
+ return out, metadata, s3shared.NewClientConfiguredForDualStackError(tv,
+ resourceRequest.PartitionID, resourceRequest.RequestRegion, nil)
+ }
+
+ // check if resource arn region is FIPS
+ if resourceRequest.ResourceConfiguredForFIPS() {
+ return out, metadata, s3shared.NewInvalidARNWithFIPSError(tv, nil)
+ }
+
+ // build outpost access point request
+ ctx, err = buildOutpostAccessPointRequest(ctx, outpostAccessPointOptions{
+ processARNResource: *m,
+ resource: tv,
+ request: req,
+ partitionID: resourceRequest.PartitionID,
+ requestRegion: resourceRequest.RequestRegion,
+ })
+ if err != nil {
+ return out, metadata, err
+ }
+
+ default:
+ return out, metadata, s3shared.NewInvalidARNError(resource, nil)
+ }
+
+ return next.HandleSerialize(ctx, in)
+}
+
+// validate if s3 resource and request config is compatible.
+func validateResourceRequest(resourceRequest s3shared.ResourceRequest) error {
+ // check if resourceRequest leads to a cross partition error
+ v, err := resourceRequest.IsCrossPartition()
+ if err != nil {
+ return err
+ }
+ if v {
+ // if cross partition
+ return s3shared.NewClientPartitionMismatchError(resourceRequest.Resource,
+ resourceRequest.PartitionID, resourceRequest.RequestRegion, nil)
+ }
+
+ // check if resourceRequest leads to a cross region error
+ if !resourceRequest.AllowCrossRegion() && resourceRequest.IsCrossRegion() {
+ // if cross region, but not use ARN region is not enabled
+ return s3shared.NewClientRegionMismatchError(resourceRequest.Resource,
+ resourceRequest.PartitionID, resourceRequest.RequestRegion, nil)
+ }
+
+ return nil
+}
+
+// === Accesspoint ==========
+
+type accesspointOptions struct {
+ processARNResource
+ request *http.Request
+ resource arn.AccessPointARN
+ resolveRegion string
+ partitionID string
+ requestRegion string
+}
+
+func buildAccessPointRequest(ctx context.Context, options accesspointOptions) (context.Context, error) {
+ tv := options.resource
+ req := options.request
+ resolveRegion := options.resolveRegion
+
+ resolveService := tv.Service
+
+ // resolve endpoint
+ endpoint, err := options.EndpointResolver.ResolveEndpoint(resolveRegion, options.EndpointResolverOptions)
+ if err != nil {
+ return ctx, s3shared.NewFailedToResolveEndpointError(
+ tv,
+ options.partitionID,
+ options.requestRegion,
+ err,
+ )
+ }
+
+ // assign resolved endpoint url to request url
+ req.URL, err = url.Parse(endpoint.URL)
+ if err != nil {
+ return ctx, fmt.Errorf("failed to parse endpoint URL: %w", err)
+ }
+
+ if len(endpoint.SigningName) != 0 {
+ ctx = awsmiddleware.SetSigningName(ctx, endpoint.SigningName)
+ } else {
+ ctx = awsmiddleware.SetSigningName(ctx, resolveService)
+ }
+
+ if len(endpoint.SigningRegion) != 0 {
+ ctx = awsmiddleware.SetSigningRegion(ctx, endpoint.SigningRegion)
+ } else {
+ ctx = awsmiddleware.SetSigningRegion(ctx, resolveRegion)
+ }
+
+ // skip arn processing, if arn region resolves to a immutable endpoint
+ if endpoint.HostnameImmutable {
+ return ctx, nil
+ }
+
+ const serviceEndpointLabel = "s3-accesspoint"
+ cfgHost := req.URL.Host
+ if strings.HasPrefix(cfgHost, "s3") {
+ // replace service hostlabel "s3" to "s3-accesspoint"
+ req.URL.Host = serviceEndpointLabel + cfgHost[len("s3"):]
+
+ // update serviceID to "s3-accesspoint"
+ ctx = awsmiddleware.SetServiceID(ctx, serviceEndpointLabel)
+ }
+
+ // add host prefix for s3-accesspoint
+ accessPointHostPrefix := tv.AccessPointName + "-" + tv.AccountID + "."
+ req.URL.Host = accessPointHostPrefix + req.URL.Host
+ if len(req.Host) > 0 {
+ req.Host = accessPointHostPrefix + req.Host
+ }
+
+ // validate the endpoint host
+ if err := http.ValidateEndpointHost(req.URL.Host); err != nil {
+ return ctx, s3shared.NewInvalidARNError(tv, err)
+ }
+
+ // disable host prefix behavior
+ ctx = http.DisableEndpointHostPrefix(ctx, true)
+
+ // remove the serialized arn in place of /{Bucket}
+ ctx = setBucketToRemoveOnContext(ctx, tv.String())
+
+ return ctx, nil
+}
+
+// ====== Outpost Accesspoint ========
+
+type outpostAccessPointOptions struct {
+ processARNResource
+ request *http.Request
+ resource arn.OutpostAccessPointARN
+ partitionID string
+ requestRegion string
+}
+
+func buildOutpostAccessPointRequest(ctx context.Context, options outpostAccessPointOptions) (context.Context, error) {
+ tv := options.resource
+ req := options.request
+
+ resolveRegion := tv.Region
+ resolveService := tv.Service
+ endpointsID := resolveService
+ if strings.EqualFold(resolveService, "s3-outposts") {
+ // assign endpoints ID as "S3"
+ endpointsID = "s3"
+ }
+
+ // resolve regional endpoint for resolved region.
+ endpoint, err := options.EndpointResolver.ResolveEndpoint(resolveRegion, options.EndpointResolverOptions)
+ if err != nil {
+ return ctx, s3shared.NewFailedToResolveEndpointError(
+ tv,
+ options.partitionID,
+ options.requestRegion,
+ err,
+ )
+ }
+
+ // assign resolved endpoint url to request url
+ req.URL, err = url.Parse(endpoint.URL)
+ if err != nil {
+ return ctx, fmt.Errorf("failed to parse endpoint URL: %w", err)
+ }
+
+ if len(endpoint.SigningName) != 0 {
+ ctx = awsmiddleware.SetSigningName(ctx, endpoint.SigningName)
+ } else {
+ // assign resolved service from arn as signing name
+ ctx = awsmiddleware.SetSigningName(ctx, resolveService)
+ }
+
+ if len(endpoint.SigningRegion) != 0 {
+ // redirect signer to use resolved endpoint signing name and region
+ ctx = awsmiddleware.SetSigningRegion(ctx, endpoint.SigningRegion)
+ } else {
+ ctx = awsmiddleware.SetSigningRegion(ctx, resolveRegion)
+ }
+
+ // skip further customizations, if arn region resolves to a immutable endpoint
+ if endpoint.HostnameImmutable {
+ return ctx, nil
+ }
+
+ cfgHost := req.URL.Host
+ if strings.HasPrefix(cfgHost, endpointsID) {
+ // replace service endpointID label with resolved service
+ req.URL.Host = resolveService + cfgHost[len(endpointsID):]
+ // update serviceID to resolved service id
+ ctx = awsmiddleware.SetServiceID(ctx, resolveService)
+ }
+
+ // add host prefix for s3-outposts
+ outpostAPHostPrefix := tv.AccessPointName + "-" + tv.AccountID + "." + tv.OutpostID + "."
+ req.URL.Host = outpostAPHostPrefix + req.URL.Host
+ if len(req.Host) > 0 {
+ req.Host = outpostAPHostPrefix + req.Host
+ }
+
+ // validate the endpoint host
+ if err := http.ValidateEndpointHost(req.URL.Host); err != nil {
+ return ctx, s3shared.NewInvalidARNError(tv, err)
+ }
+
+ // disable host prefix behavior
+ ctx = http.DisableEndpointHostPrefix(ctx, true)
+
+ // remove the serialized arn in place of /{Bucket}
+ ctx = setBucketToRemoveOnContext(ctx, tv.String())
+ return ctx, nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/remove_bucket_middleware.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/remove_bucket_middleware.go
new file mode 100644
index 000000000..2e030f29c
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/remove_bucket_middleware.go
@@ -0,0 +1,58 @@
+package customizations
+
+import (
+ "context"
+ "fmt"
+
+ "github.com/aws/smithy-go/middleware"
+ "github.com/aws/smithy-go/transport/http"
+)
+
+// removeBucketFromPathMiddleware needs to be executed after serialize step is performed
+type removeBucketFromPathMiddleware struct {
+}
+
+func (m *removeBucketFromPathMiddleware) ID() string {
+ return "S3:RemoveBucketFromPathMiddleware"
+}
+
+func (m *removeBucketFromPathMiddleware) HandleSerialize(
+ ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler,
+) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ // check if a bucket removal from HTTP path is required
+ bucket, ok := getRemoveBucketFromPath(ctx)
+ if !ok {
+ return next.HandleSerialize(ctx, in)
+ }
+
+ req, ok := in.Request.(*http.Request)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown request type %T", req)
+ }
+
+ removeBucketFromPath(req.URL, bucket)
+ return next.HandleSerialize(ctx, in)
+}
+
+type removeBucketKey struct {
+ bucket string
+}
+
+// setBucketToRemoveOnContext sets the bucket name to be removed.
+//
+// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues
+// to clear all stack values.
+func setBucketToRemoveOnContext(ctx context.Context, bucket string) context.Context {
+ return middleware.WithStackValue(ctx, removeBucketKey{}, bucket)
+}
+
+// getRemoveBucketFromPath returns the bucket name to remove from the path.
+//
+// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues
+// to clear all stack values.
+func getRemoveBucketFromPath(ctx context.Context) (string, bool) {
+ v, ok := middleware.GetStackValue(ctx, removeBucketKey{}).(string)
+ return v, ok
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/update_endpoint.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/update_endpoint.go
new file mode 100644
index 000000000..1e10c19ec
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/update_endpoint.go
@@ -0,0 +1,296 @@
+package customizations
+
+import (
+ "context"
+ "fmt"
+ "github.com/aws/smithy-go/encoding/httpbinding"
+ "log"
+ "net/url"
+ "strings"
+
+ "github.com/aws/aws-sdk-go-v2/aws"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/service/internal/s3shared"
+
+ internalendpoints "github.com/aws/aws-sdk-go-v2/service/s3/internal/endpoints"
+)
+
+// EndpointResolver interface for resolving service endpoints.
+type EndpointResolver interface {
+ ResolveEndpoint(region string, options EndpointResolverOptions) (aws.Endpoint, error)
+}
+
+// EndpointResolverOptions is the service endpoint resolver options
+type EndpointResolverOptions = internalendpoints.Options
+
+// UpdateEndpointParameterAccessor represents accessor functions used by the middleware
+type UpdateEndpointParameterAccessor struct {
+ // functional pointer to fetch bucket name from provided input.
+ // The function is intended to take an input value, and
+ // return a string pointer to value of string, and bool if
+ // input has no bucket member.
+ GetBucketFromInput func(interface{}) (*string, bool)
+}
+
+// UpdateEndpointOptions provides the options for the UpdateEndpoint middleware setup.
+type UpdateEndpointOptions struct {
+
+ // Accessor are parameter accessors used by the middleware
+ Accessor UpdateEndpointParameterAccessor
+
+ // use path style
+ UsePathStyle bool
+
+ // use transfer acceleration
+ UseAccelerate bool
+
+ // indicates if an operation supports s3 transfer acceleration.
+ SupportsAccelerate bool
+
+ // use dualstack
+ UseDualstack bool
+
+ // use ARN region
+ UseARNRegion bool
+
+ // EndpointResolver used to resolve endpoints. This may be a custom endpoint resolver
+ EndpointResolver EndpointResolver
+
+ // EndpointResolverOptions used by endpoint resolver
+ EndpointResolverOptions EndpointResolverOptions
+}
+
+// UpdateEndpoint adds the middleware to the middleware stack based on the UpdateEndpointOptions.
+func UpdateEndpoint(stack *middleware.Stack, options UpdateEndpointOptions) (err error) {
+ // initial arn look up middleware
+ err = stack.Initialize.Add(&s3shared.ARNLookup{
+ GetARNValue: options.Accessor.GetBucketFromInput,
+ }, middleware.Before)
+ if err != nil {
+ return err
+ }
+
+ // process arn
+ err = stack.Serialize.Insert(&processARNResource{
+ UseARNRegion: options.UseARNRegion,
+ UseAccelerate: options.UseAccelerate,
+ UseDualstack: options.UseDualstack,
+ EndpointResolver: options.EndpointResolver,
+ EndpointResolverOptions: options.EndpointResolverOptions,
+ }, "OperationSerializer", middleware.Before)
+ if err != nil {
+ return err
+ }
+
+ // remove bucket arn middleware
+ err = stack.Serialize.Insert(&removeBucketFromPathMiddleware{}, "OperationSerializer", middleware.After)
+ if err != nil {
+ return err
+ }
+
+ // enable dual stack support
+ err = stack.Serialize.Insert(&s3shared.EnableDualstack{
+ UseDualstack: options.UseDualstack,
+ DefaultServiceID: "s3",
+ }, "OperationSerializer", middleware.After)
+ if err != nil {
+ return err
+ }
+
+ // update endpoint to use options for path style and accelerate
+ err = stack.Serialize.Insert(&updateEndpoint{
+ usePathStyle: options.UsePathStyle,
+ getBucketFromInput: options.Accessor.GetBucketFromInput,
+ useAccelerate: options.UseAccelerate,
+ supportsAccelerate: options.SupportsAccelerate,
+ }, (*s3shared.EnableDualstack)(nil).ID(), middleware.After)
+ if err != nil {
+ return err
+ }
+
+ return err
+}
+
+type updateEndpoint struct {
+ // path style options
+ usePathStyle bool
+ getBucketFromInput func(interface{}) (*string, bool)
+
+ // accelerate options
+ useAccelerate bool
+ supportsAccelerate bool
+}
+
+// ID returns the middleware ID.
+func (*updateEndpoint) ID() string {
+ return "S3:UpdateEndpoint"
+}
+
+func (u *updateEndpoint) HandleSerialize(
+ ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler,
+) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ // if arn was processed, skip this middleware
+ if _, ok := s3shared.GetARNResourceFromContext(ctx); ok {
+ return next.HandleSerialize(ctx, in)
+ }
+
+ // skip this customization if host name is set as immutable
+ if smithyhttp.GetHostnameImmutable(ctx) {
+ return next.HandleSerialize(ctx, in)
+ }
+
+ req, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown request type %T", req)
+ }
+
+ // check if accelerate is supported
+ if u.useAccelerate && !u.supportsAccelerate {
+ // accelerate is not supported, thus will be ignored
+ log.Println("Transfer acceleration is not supported for the operation, ignoring UseAccelerate.")
+ u.useAccelerate = false
+ }
+
+ // transfer acceleration is not supported with path style urls
+ if u.useAccelerate && u.usePathStyle {
+ log.Println("UseAccelerate is not compatible with UsePathStyle, ignoring UsePathStyle.")
+ u.usePathStyle = false
+ }
+
+ if u.getBucketFromInput != nil {
+ // Below customization only apply if bucket name is provided
+ bucket, ok := u.getBucketFromInput(in.Parameters)
+ if ok && bucket != nil {
+ region := awsmiddleware.GetRegion(ctx)
+ if err := u.updateEndpointFromConfig(req, *bucket, region); err != nil {
+ return out, metadata, err
+ }
+ }
+ }
+
+ return next.HandleSerialize(ctx, in)
+}
+
+func (u updateEndpoint) updateEndpointFromConfig(req *smithyhttp.Request, bucket string, region string) error {
+ // do nothing if path style is enforced
+ if u.usePathStyle {
+ return nil
+ }
+
+ if !hostCompatibleBucketName(req.URL, bucket) {
+ // bucket name must be valid to put into the host for accelerate operations.
+ // For non-accelerate operations the bucket name can stay in the path if
+ // not valid hostname.
+ var err error
+ if u.useAccelerate {
+ err = fmt.Errorf("bucket name %s is not compatible with S3", bucket)
+ }
+
+ // No-Op if not using accelerate.
+ return err
+ }
+
+ // accelerate is only supported if use path style is disabled
+ if u.useAccelerate {
+ parts := strings.Split(req.URL.Host, ".")
+ if len(parts) < 3 {
+ return fmt.Errorf("unable to update endpoint host for S3 accelerate, hostname invalid, %s", req.URL.Host)
+ }
+
+ if parts[0] == "s3" || strings.HasPrefix(parts[0], "s3-") {
+ parts[0] = "s3-accelerate"
+ }
+
+ for i := 1; i+1 < len(parts); i++ {
+ if strings.EqualFold(parts[i], region) {
+ parts = append(parts[:i], parts[i+1:]...)
+ break
+ }
+ }
+
+ // construct the url host
+ req.URL.Host = strings.Join(parts, ".")
+ }
+
+ // move bucket to follow virtual host style
+ moveBucketNameToHost(req.URL, bucket)
+ return nil
+}
+
+// updates endpoint to use virtual host styling
+func moveBucketNameToHost(u *url.URL, bucket string) {
+ u.Host = bucket + "." + u.Host
+ removeBucketFromPath(u, bucket)
+}
+
+// remove bucket from url
+func removeBucketFromPath(u *url.URL, bucket string) {
+ if strings.HasPrefix(u.Path, "/"+bucket) {
+ // modify url path
+ u.Path = strings.Replace(u.Path, "/"+bucket, "", 1)
+
+ // modify url raw path
+ u.RawPath = strings.Replace(u.RawPath, "/"+httpbinding.EscapePath(bucket, true), "", 1)
+ }
+
+ if u.Path == "" {
+ u.Path = "/"
+ }
+
+ if u.RawPath == "" {
+ u.RawPath = "/"
+ }
+}
+
+// hostCompatibleBucketName returns true if the request should
+// put the bucket in the host. This is false if S3ForcePathStyle is
+// explicitly set or if the bucket is not DNS compatible.
+func hostCompatibleBucketName(u *url.URL, bucket string) bool {
+ // Bucket might be DNS compatible but dots in the hostname will fail
+ // certificate validation, so do not use host-style.
+ if u.Scheme == "https" && strings.Contains(bucket, ".") {
+ return false
+ }
+
+ // if the bucket is DNS compatible
+ return dnsCompatibleBucketName(bucket)
+}
+
+// dnsCompatibleBucketName returns true if the bucket name is DNS compatible.
+// Buckets created outside of the classic region MUST be DNS compatible.
+func dnsCompatibleBucketName(bucket string) bool {
+ if strings.Contains(bucket, "..") {
+ return false
+ }
+
+ // checks for `^[a-z0-9][a-z0-9\.\-]{1,61}[a-z0-9]$` domain mapping
+ if !((bucket[0] > 96 && bucket[0] < 123) || (bucket[0] > 47 && bucket[0] < 58)) {
+ return false
+ }
+
+ for _, c := range bucket[1:] {
+ if !((c > 96 && c < 123) || (c > 47 && c < 58) || c == 46 || c == 45) {
+ return false
+ }
+ }
+
+ // checks for `^(\d+\.){3}\d+$` IPaddressing
+ v := strings.SplitN(bucket, ".", -1)
+ if len(v) == 4 {
+ for _, c := range bucket {
+ if !((c > 47 && c < 58) || c == 46) {
+ // we confirm that this is not a IP address
+ return true
+ }
+ }
+ // this is a IP address
+ return false
+ }
+
+ return true
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/endpoints/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/endpoints/endpoints.go
new file mode 100644
index 000000000..cfabf191c
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/endpoints/endpoints.go
@@ -0,0 +1,180 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package endpoints
+
+import (
+ "github.com/aws/aws-sdk-go-v2/aws"
+ "github.com/aws/aws-sdk-go-v2/internal/endpoints"
+ "regexp"
+)
+
+// Options is the endpoint resolver configuration options
+type Options struct {
+ DisableHTTPS bool
+}
+
+// Resolver S3 endpoint resolver
+type Resolver struct {
+ partitions endpoints.Partitions
+}
+
+// ResolveEndpoint resolves the service endpoint for the given region and options
+func (r *Resolver) ResolveEndpoint(region string, options Options) (endpoint aws.Endpoint, err error) {
+ if len(region) == 0 {
+ return endpoint, &aws.MissingRegionError{}
+ }
+
+ opt := endpoints.Options{
+ DisableHTTPS: options.DisableHTTPS,
+ }
+ return r.partitions.ResolveEndpoint(region, opt)
+}
+
+// New returns a new Resolver
+func New() *Resolver {
+ return &Resolver{
+ partitions: defaultPartitions,
+ }
+}
+
+var defaultPartitions = endpoints.Partitions{
+ {
+ ID: "aws",
+ Defaults: endpoints.Endpoint{
+ Hostname: "s3.{region}.amazonaws.com",
+ Protocols: []string{"http", "https"},
+ SignatureVersions: []string{"s3v4"},
+ },
+ RegionRegex: regexp.MustCompile("^(us|eu|ap|sa|ca|me|af)\\-\\w+\\-\\d+$"),
+ IsRegionalized: true,
+ Endpoints: endpoints.Endpoints{
+ "af-south-1": endpoints.Endpoint{},
+ "ap-east-1": endpoints.Endpoint{},
+ "ap-northeast-1": endpoints.Endpoint{
+ Hostname: "s3.ap-northeast-1.amazonaws.com",
+ SignatureVersions: []string{"s3", "s3v4"},
+ },
+ "ap-northeast-2": endpoints.Endpoint{},
+ "ap-south-1": endpoints.Endpoint{},
+ "ap-southeast-1": endpoints.Endpoint{
+ Hostname: "s3.ap-southeast-1.amazonaws.com",
+ SignatureVersions: []string{"s3", "s3v4"},
+ },
+ "ap-southeast-2": endpoints.Endpoint{
+ Hostname: "s3.ap-southeast-2.amazonaws.com",
+ SignatureVersions: []string{"s3", "s3v4"},
+ },
+ "aws-global": endpoints.Endpoint{
+ Hostname: "s3.amazonaws.com",
+ SignatureVersions: []string{"s3", "s3v4"},
+ CredentialScope: endpoints.CredentialScope{
+ Region: "us-east-1",
+ },
+ },
+ "ca-central-1": endpoints.Endpoint{},
+ "eu-central-1": endpoints.Endpoint{},
+ "eu-north-1": endpoints.Endpoint{},
+ "eu-south-1": endpoints.Endpoint{},
+ "eu-west-1": endpoints.Endpoint{
+ Hostname: "s3.eu-west-1.amazonaws.com",
+ SignatureVersions: []string{"s3", "s3v4"},
+ },
+ "eu-west-2": endpoints.Endpoint{},
+ "eu-west-3": endpoints.Endpoint{},
+ "me-south-1": endpoints.Endpoint{},
+ "s3-external-1": endpoints.Endpoint{
+ Hostname: "s3-external-1.amazonaws.com",
+ SignatureVersions: []string{"s3", "s3v4"},
+ CredentialScope: endpoints.CredentialScope{
+ Region: "us-east-1",
+ },
+ },
+ "sa-east-1": endpoints.Endpoint{
+ Hostname: "s3.sa-east-1.amazonaws.com",
+ SignatureVersions: []string{"s3", "s3v4"},
+ },
+ "us-east-1": endpoints.Endpoint{
+ Hostname: "s3.us-east-1.amazonaws.com",
+ SignatureVersions: []string{"s3", "s3v4"},
+ },
+ "us-east-2": endpoints.Endpoint{},
+ "us-west-1": endpoints.Endpoint{
+ Hostname: "s3.us-west-1.amazonaws.com",
+ SignatureVersions: []string{"s3", "s3v4"},
+ },
+ "us-west-2": endpoints.Endpoint{
+ Hostname: "s3.us-west-2.amazonaws.com",
+ SignatureVersions: []string{"s3", "s3v4"},
+ },
+ },
+ },
+ {
+ ID: "aws-cn",
+ Defaults: endpoints.Endpoint{
+ Hostname: "s3.{region}.amazonaws.com.cn",
+ Protocols: []string{"http", "https"},
+ SignatureVersions: []string{"s3v4"},
+ },
+ RegionRegex: regexp.MustCompile("^cn\\-\\w+\\-\\d+$"),
+ IsRegionalized: true,
+ Endpoints: endpoints.Endpoints{
+ "cn-north-1": endpoints.Endpoint{},
+ "cn-northwest-1": endpoints.Endpoint{},
+ },
+ },
+ {
+ ID: "aws-iso",
+ Defaults: endpoints.Endpoint{
+ Hostname: "s3.{region}.c2s.ic.gov",
+ Protocols: []string{"https"},
+ SignatureVersions: []string{"s3v4"},
+ },
+ RegionRegex: regexp.MustCompile("^us\\-iso\\-\\w+\\-\\d+$"),
+ IsRegionalized: true,
+ Endpoints: endpoints.Endpoints{
+ "us-iso-east-1": endpoints.Endpoint{
+ Protocols: []string{"http", "https"},
+ SignatureVersions: []string{"s3v4"},
+ },
+ },
+ },
+ {
+ ID: "aws-iso-b",
+ Defaults: endpoints.Endpoint{
+ Hostname: "s3.{region}.sc2s.sgov.gov",
+ Protocols: []string{"http", "https"},
+ SignatureVersions: []string{"s3v4"},
+ },
+ RegionRegex: regexp.MustCompile("^us\\-isob\\-\\w+\\-\\d+$"),
+ IsRegionalized: true,
+ Endpoints: endpoints.Endpoints{
+ "us-isob-east-1": endpoints.Endpoint{},
+ },
+ },
+ {
+ ID: "aws-us-gov",
+ Defaults: endpoints.Endpoint{
+ Hostname: "s3.{region}.amazonaws.com",
+ Protocols: []string{"https"},
+ SignatureVersions: []string{"s3", "s3v4"},
+ },
+ RegionRegex: regexp.MustCompile("^us\\-gov\\-\\w+\\-\\d+$"),
+ IsRegionalized: true,
+ Endpoints: endpoints.Endpoints{
+ "fips-us-gov-west-1": endpoints.Endpoint{
+ Hostname: "s3-fips.us-gov-west-1.amazonaws.com",
+ CredentialScope: endpoints.CredentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ "us-gov-east-1": endpoints.Endpoint{
+ Hostname: "s3.us-gov-east-1.amazonaws.com",
+ Protocols: []string{"http", "https"},
+ },
+ "us-gov-west-1": endpoints.Endpoint{
+ Hostname: "s3.us-gov-west-1.amazonaws.com",
+ Protocols: []string{"http", "https"},
+ },
+ },
+ },
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/serializers.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/serializers.go
new file mode 100644
index 000000000..6382025a2
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/serializers.go
@@ -0,0 +1,12112 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package s3
+
+import (
+ "bytes"
+ "context"
+ "fmt"
+ "github.com/aws/aws-sdk-go-v2/service/s3/types"
+ smithy "github.com/aws/smithy-go"
+ "github.com/aws/smithy-go/encoding/httpbinding"
+ smithyxml "github.com/aws/smithy-go/encoding/xml"
+ "github.com/aws/smithy-go/middleware"
+ smithytime "github.com/aws/smithy-go/time"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+ "net/http"
+ "strings"
+)
+
+type awsRestxml_serializeOpAbortMultipartUpload struct {
+}
+
+func (*awsRestxml_serializeOpAbortMultipartUpload) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsRestxml_serializeOpAbortMultipartUpload) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*AbortMultipartUploadInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ opPath, opQuery := httpbinding.SplitURI("/{Bucket}/{Key+}?x-id=AbortMultipartUpload")
+ request.URL.Path = opPath
+ if len(request.URL.RawQuery) > 0 {
+ request.URL.RawQuery = "&" + opQuery
+ } else {
+ request.URL.RawQuery = opQuery
+ }
+
+ request.Method = "DELETE"
+ restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if err := awsRestxml_serializeOpHttpBindingsAbortMultipartUploadInput(input, restEncoder); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request.Request, err = restEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ return next.HandleSerialize(ctx, in)
+}
+func awsRestxml_serializeOpHttpBindingsAbortMultipartUploadInput(v *AbortMultipartUploadInput, encoder *httpbinding.Encoder) error {
+ if v == nil {
+ return fmt.Errorf("unsupported serialization of nil %T", v)
+ }
+
+ if v.Bucket == nil || len(*v.Bucket) == 0 {
+ return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")}
+ }
+ if v.Bucket != nil {
+ if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil {
+ return err
+ }
+ }
+
+ if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 {
+ locationName := "X-Amz-Expected-Bucket-Owner"
+ encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner)
+ }
+
+ if v.Key == nil || len(*v.Key) == 0 {
+ return &smithy.SerializationError{Err: fmt.Errorf("input member Key must not be empty")}
+ }
+ if v.Key != nil {
+ if err := encoder.SetURI("Key").String(*v.Key); err != nil {
+ return err
+ }
+ }
+
+ if len(v.RequestPayer) > 0 {
+ locationName := "X-Amz-Request-Payer"
+ encoder.SetHeader(locationName).String(string(v.RequestPayer))
+ }
+
+ if v.UploadId != nil {
+ encoder.SetQuery("uploadId").String(*v.UploadId)
+ }
+
+ return nil
+}
+
+type awsRestxml_serializeOpCompleteMultipartUpload struct {
+}
+
+func (*awsRestxml_serializeOpCompleteMultipartUpload) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsRestxml_serializeOpCompleteMultipartUpload) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*CompleteMultipartUploadInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ opPath, opQuery := httpbinding.SplitURI("/{Bucket}/{Key+}")
+ request.URL.Path = opPath
+ if len(request.URL.RawQuery) > 0 {
+ request.URL.RawQuery = "&" + opQuery
+ } else {
+ request.URL.RawQuery = opQuery
+ }
+
+ request.Method = "POST"
+ restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if err := awsRestxml_serializeOpHttpBindingsCompleteMultipartUploadInput(input, restEncoder); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if input.MultipartUpload != nil {
+ if !restEncoder.HasHeader("Content-Type") {
+ restEncoder.SetHeader("Content-Type").String("application/xml")
+ }
+
+ xmlEncoder := smithyxml.NewEncoder(bytes.NewBuffer(nil))
+ payloadRootAttr := []smithyxml.Attr{}
+ payloadRoot := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "CompleteMultipartUpload",
+ },
+ Attr: payloadRootAttr,
+ }
+ payloadRoot.Attr = append(payloadRoot.Attr, smithyxml.NewNamespaceAttribute("", "http://s3.amazonaws.com/doc/2006-03-01/"))
+ if err := awsRestxml_serializeDocumentCompletedMultipartUpload(input.MultipartUpload, xmlEncoder.RootElement(payloadRoot)); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ payload := bytes.NewReader(xmlEncoder.Bytes())
+ if request, err = request.SetStream(payload); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ }
+
+ if request.Request, err = restEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ return next.HandleSerialize(ctx, in)
+}
+func awsRestxml_serializeOpHttpBindingsCompleteMultipartUploadInput(v *CompleteMultipartUploadInput, encoder *httpbinding.Encoder) error {
+ if v == nil {
+ return fmt.Errorf("unsupported serialization of nil %T", v)
+ }
+
+ if v.Bucket == nil || len(*v.Bucket) == 0 {
+ return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")}
+ }
+ if v.Bucket != nil {
+ if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil {
+ return err
+ }
+ }
+
+ if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 {
+ locationName := "X-Amz-Expected-Bucket-Owner"
+ encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner)
+ }
+
+ if v.Key == nil || len(*v.Key) == 0 {
+ return &smithy.SerializationError{Err: fmt.Errorf("input member Key must not be empty")}
+ }
+ if v.Key != nil {
+ if err := encoder.SetURI("Key").String(*v.Key); err != nil {
+ return err
+ }
+ }
+
+ if len(v.RequestPayer) > 0 {
+ locationName := "X-Amz-Request-Payer"
+ encoder.SetHeader(locationName).String(string(v.RequestPayer))
+ }
+
+ if v.UploadId != nil {
+ encoder.SetQuery("uploadId").String(*v.UploadId)
+ }
+
+ return nil
+}
+
+type awsRestxml_serializeOpCopyObject struct {
+}
+
+func (*awsRestxml_serializeOpCopyObject) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsRestxml_serializeOpCopyObject) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*CopyObjectInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ opPath, opQuery := httpbinding.SplitURI("/{Bucket}/{Key+}?x-id=CopyObject")
+ request.URL.Path = opPath
+ if len(request.URL.RawQuery) > 0 {
+ request.URL.RawQuery = "&" + opQuery
+ } else {
+ request.URL.RawQuery = opQuery
+ }
+
+ request.Method = "PUT"
+ restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if err := awsRestxml_serializeOpHttpBindingsCopyObjectInput(input, restEncoder); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request.Request, err = restEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ return next.HandleSerialize(ctx, in)
+}
+func awsRestxml_serializeOpHttpBindingsCopyObjectInput(v *CopyObjectInput, encoder *httpbinding.Encoder) error {
+ if v == nil {
+ return fmt.Errorf("unsupported serialization of nil %T", v)
+ }
+
+ if len(v.ACL) > 0 {
+ locationName := "X-Amz-Acl"
+ encoder.SetHeader(locationName).String(string(v.ACL))
+ }
+
+ if v.Bucket == nil || len(*v.Bucket) == 0 {
+ return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")}
+ }
+ if v.Bucket != nil {
+ if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil {
+ return err
+ }
+ }
+
+ if v.BucketKeyEnabled {
+ locationName := "X-Amz-Server-Side-Encryption-Bucket-Key-Enabled"
+ encoder.SetHeader(locationName).Boolean(v.BucketKeyEnabled)
+ }
+
+ if v.CacheControl != nil && len(*v.CacheControl) > 0 {
+ locationName := "Cache-Control"
+ encoder.SetHeader(locationName).String(*v.CacheControl)
+ }
+
+ if v.ContentDisposition != nil && len(*v.ContentDisposition) > 0 {
+ locationName := "Content-Disposition"
+ encoder.SetHeader(locationName).String(*v.ContentDisposition)
+ }
+
+ if v.ContentEncoding != nil && len(*v.ContentEncoding) > 0 {
+ locationName := "Content-Encoding"
+ encoder.SetHeader(locationName).String(*v.ContentEncoding)
+ }
+
+ if v.ContentLanguage != nil && len(*v.ContentLanguage) > 0 {
+ locationName := "Content-Language"
+ encoder.SetHeader(locationName).String(*v.ContentLanguage)
+ }
+
+ if v.ContentType != nil && len(*v.ContentType) > 0 {
+ locationName := "Content-Type"
+ encoder.SetHeader(locationName).String(*v.ContentType)
+ }
+
+ if v.CopySource != nil && len(*v.CopySource) > 0 {
+ locationName := "X-Amz-Copy-Source"
+ encoder.SetHeader(locationName).String(*v.CopySource)
+ }
+
+ if v.CopySourceIfMatch != nil && len(*v.CopySourceIfMatch) > 0 {
+ locationName := "X-Amz-Copy-Source-If-Match"
+ encoder.SetHeader(locationName).String(*v.CopySourceIfMatch)
+ }
+
+ if v.CopySourceIfModifiedSince != nil {
+ locationName := "X-Amz-Copy-Source-If-Modified-Since"
+ encoder.SetHeader(locationName).String(smithytime.FormatHTTPDate(*v.CopySourceIfModifiedSince))
+ }
+
+ if v.CopySourceIfNoneMatch != nil && len(*v.CopySourceIfNoneMatch) > 0 {
+ locationName := "X-Amz-Copy-Source-If-None-Match"
+ encoder.SetHeader(locationName).String(*v.CopySourceIfNoneMatch)
+ }
+
+ if v.CopySourceIfUnmodifiedSince != nil {
+ locationName := "X-Amz-Copy-Source-If-Unmodified-Since"
+ encoder.SetHeader(locationName).String(smithytime.FormatHTTPDate(*v.CopySourceIfUnmodifiedSince))
+ }
+
+ if v.CopySourceSSECustomerAlgorithm != nil && len(*v.CopySourceSSECustomerAlgorithm) > 0 {
+ locationName := "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm"
+ encoder.SetHeader(locationName).String(*v.CopySourceSSECustomerAlgorithm)
+ }
+
+ if v.CopySourceSSECustomerKey != nil && len(*v.CopySourceSSECustomerKey) > 0 {
+ locationName := "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key"
+ encoder.SetHeader(locationName).String(*v.CopySourceSSECustomerKey)
+ }
+
+ if v.CopySourceSSECustomerKeyMD5 != nil && len(*v.CopySourceSSECustomerKeyMD5) > 0 {
+ locationName := "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key-Md5"
+ encoder.SetHeader(locationName).String(*v.CopySourceSSECustomerKeyMD5)
+ }
+
+ if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 {
+ locationName := "X-Amz-Expected-Bucket-Owner"
+ encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner)
+ }
+
+ if v.ExpectedSourceBucketOwner != nil && len(*v.ExpectedSourceBucketOwner) > 0 {
+ locationName := "X-Amz-Source-Expected-Bucket-Owner"
+ encoder.SetHeader(locationName).String(*v.ExpectedSourceBucketOwner)
+ }
+
+ if v.Expires != nil {
+ locationName := "Expires"
+ encoder.SetHeader(locationName).String(smithytime.FormatHTTPDate(*v.Expires))
+ }
+
+ if v.GrantFullControl != nil && len(*v.GrantFullControl) > 0 {
+ locationName := "X-Amz-Grant-Full-Control"
+ encoder.SetHeader(locationName).String(*v.GrantFullControl)
+ }
+
+ if v.GrantRead != nil && len(*v.GrantRead) > 0 {
+ locationName := "X-Amz-Grant-Read"
+ encoder.SetHeader(locationName).String(*v.GrantRead)
+ }
+
+ if v.GrantReadACP != nil && len(*v.GrantReadACP) > 0 {
+ locationName := "X-Amz-Grant-Read-Acp"
+ encoder.SetHeader(locationName).String(*v.GrantReadACP)
+ }
+
+ if v.GrantWriteACP != nil && len(*v.GrantWriteACP) > 0 {
+ locationName := "X-Amz-Grant-Write-Acp"
+ encoder.SetHeader(locationName).String(*v.GrantWriteACP)
+ }
+
+ if v.Key == nil || len(*v.Key) == 0 {
+ return &smithy.SerializationError{Err: fmt.Errorf("input member Key must not be empty")}
+ }
+ if v.Key != nil {
+ if err := encoder.SetURI("Key").String(*v.Key); err != nil {
+ return err
+ }
+ }
+
+ if v.Metadata != nil {
+ hv := encoder.Headers("X-Amz-Meta-")
+ for mapKey, mapVal := range v.Metadata {
+ if len(mapVal) > 0 {
+ hv.SetHeader(http.CanonicalHeaderKey(mapKey)).String(mapVal)
+ }
+ }
+ }
+
+ if len(v.MetadataDirective) > 0 {
+ locationName := "X-Amz-Metadata-Directive"
+ encoder.SetHeader(locationName).String(string(v.MetadataDirective))
+ }
+
+ if len(v.ObjectLockLegalHoldStatus) > 0 {
+ locationName := "X-Amz-Object-Lock-Legal-Hold"
+ encoder.SetHeader(locationName).String(string(v.ObjectLockLegalHoldStatus))
+ }
+
+ if len(v.ObjectLockMode) > 0 {
+ locationName := "X-Amz-Object-Lock-Mode"
+ encoder.SetHeader(locationName).String(string(v.ObjectLockMode))
+ }
+
+ if v.ObjectLockRetainUntilDate != nil {
+ locationName := "X-Amz-Object-Lock-Retain-Until-Date"
+ encoder.SetHeader(locationName).String(smithytime.FormatDateTime(*v.ObjectLockRetainUntilDate))
+ }
+
+ if len(v.RequestPayer) > 0 {
+ locationName := "X-Amz-Request-Payer"
+ encoder.SetHeader(locationName).String(string(v.RequestPayer))
+ }
+
+ if len(v.ServerSideEncryption) > 0 {
+ locationName := "X-Amz-Server-Side-Encryption"
+ encoder.SetHeader(locationName).String(string(v.ServerSideEncryption))
+ }
+
+ if v.SSECustomerAlgorithm != nil && len(*v.SSECustomerAlgorithm) > 0 {
+ locationName := "X-Amz-Server-Side-Encryption-Customer-Algorithm"
+ encoder.SetHeader(locationName).String(*v.SSECustomerAlgorithm)
+ }
+
+ if v.SSECustomerKey != nil && len(*v.SSECustomerKey) > 0 {
+ locationName := "X-Amz-Server-Side-Encryption-Customer-Key"
+ encoder.SetHeader(locationName).String(*v.SSECustomerKey)
+ }
+
+ if v.SSECustomerKeyMD5 != nil && len(*v.SSECustomerKeyMD5) > 0 {
+ locationName := "X-Amz-Server-Side-Encryption-Customer-Key-Md5"
+ encoder.SetHeader(locationName).String(*v.SSECustomerKeyMD5)
+ }
+
+ if v.SSEKMSEncryptionContext != nil && len(*v.SSEKMSEncryptionContext) > 0 {
+ locationName := "X-Amz-Server-Side-Encryption-Context"
+ encoder.SetHeader(locationName).String(*v.SSEKMSEncryptionContext)
+ }
+
+ if v.SSEKMSKeyId != nil && len(*v.SSEKMSKeyId) > 0 {
+ locationName := "X-Amz-Server-Side-Encryption-Aws-Kms-Key-Id"
+ encoder.SetHeader(locationName).String(*v.SSEKMSKeyId)
+ }
+
+ if len(v.StorageClass) > 0 {
+ locationName := "X-Amz-Storage-Class"
+ encoder.SetHeader(locationName).String(string(v.StorageClass))
+ }
+
+ if v.Tagging != nil && len(*v.Tagging) > 0 {
+ locationName := "X-Amz-Tagging"
+ encoder.SetHeader(locationName).String(*v.Tagging)
+ }
+
+ if len(v.TaggingDirective) > 0 {
+ locationName := "X-Amz-Tagging-Directive"
+ encoder.SetHeader(locationName).String(string(v.TaggingDirective))
+ }
+
+ if v.WebsiteRedirectLocation != nil && len(*v.WebsiteRedirectLocation) > 0 {
+ locationName := "X-Amz-Website-Redirect-Location"
+ encoder.SetHeader(locationName).String(*v.WebsiteRedirectLocation)
+ }
+
+ return nil
+}
+
+type awsRestxml_serializeOpCreateBucket struct {
+}
+
+func (*awsRestxml_serializeOpCreateBucket) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsRestxml_serializeOpCreateBucket) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*CreateBucketInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ opPath, opQuery := httpbinding.SplitURI("/{Bucket}")
+ request.URL.Path = opPath
+ if len(request.URL.RawQuery) > 0 {
+ request.URL.RawQuery = "&" + opQuery
+ } else {
+ request.URL.RawQuery = opQuery
+ }
+
+ request.Method = "PUT"
+ restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if err := awsRestxml_serializeOpHttpBindingsCreateBucketInput(input, restEncoder); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if input.CreateBucketConfiguration != nil {
+ if !restEncoder.HasHeader("Content-Type") {
+ restEncoder.SetHeader("Content-Type").String("application/xml")
+ }
+
+ xmlEncoder := smithyxml.NewEncoder(bytes.NewBuffer(nil))
+ payloadRootAttr := []smithyxml.Attr{}
+ payloadRoot := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "CreateBucketConfiguration",
+ },
+ Attr: payloadRootAttr,
+ }
+ payloadRoot.Attr = append(payloadRoot.Attr, smithyxml.NewNamespaceAttribute("", "http://s3.amazonaws.com/doc/2006-03-01/"))
+ if err := awsRestxml_serializeDocumentCreateBucketConfiguration(input.CreateBucketConfiguration, xmlEncoder.RootElement(payloadRoot)); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ payload := bytes.NewReader(xmlEncoder.Bytes())
+ if request, err = request.SetStream(payload); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ }
+
+ if request.Request, err = restEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ return next.HandleSerialize(ctx, in)
+}
+func awsRestxml_serializeOpHttpBindingsCreateBucketInput(v *CreateBucketInput, encoder *httpbinding.Encoder) error {
+ if v == nil {
+ return fmt.Errorf("unsupported serialization of nil %T", v)
+ }
+
+ if len(v.ACL) > 0 {
+ locationName := "X-Amz-Acl"
+ encoder.SetHeader(locationName).String(string(v.ACL))
+ }
+
+ if v.Bucket == nil || len(*v.Bucket) == 0 {
+ return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")}
+ }
+ if v.Bucket != nil {
+ if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil {
+ return err
+ }
+ }
+
+ if v.GrantFullControl != nil && len(*v.GrantFullControl) > 0 {
+ locationName := "X-Amz-Grant-Full-Control"
+ encoder.SetHeader(locationName).String(*v.GrantFullControl)
+ }
+
+ if v.GrantRead != nil && len(*v.GrantRead) > 0 {
+ locationName := "X-Amz-Grant-Read"
+ encoder.SetHeader(locationName).String(*v.GrantRead)
+ }
+
+ if v.GrantReadACP != nil && len(*v.GrantReadACP) > 0 {
+ locationName := "X-Amz-Grant-Read-Acp"
+ encoder.SetHeader(locationName).String(*v.GrantReadACP)
+ }
+
+ if v.GrantWrite != nil && len(*v.GrantWrite) > 0 {
+ locationName := "X-Amz-Grant-Write"
+ encoder.SetHeader(locationName).String(*v.GrantWrite)
+ }
+
+ if v.GrantWriteACP != nil && len(*v.GrantWriteACP) > 0 {
+ locationName := "X-Amz-Grant-Write-Acp"
+ encoder.SetHeader(locationName).String(*v.GrantWriteACP)
+ }
+
+ if v.ObjectLockEnabledForBucket {
+ locationName := "X-Amz-Bucket-Object-Lock-Enabled"
+ encoder.SetHeader(locationName).Boolean(v.ObjectLockEnabledForBucket)
+ }
+
+ return nil
+}
+
+type awsRestxml_serializeOpCreateMultipartUpload struct {
+}
+
+func (*awsRestxml_serializeOpCreateMultipartUpload) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsRestxml_serializeOpCreateMultipartUpload) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*CreateMultipartUploadInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ opPath, opQuery := httpbinding.SplitURI("/{Bucket}/{Key+}?uploads")
+ request.URL.Path = opPath
+ if len(request.URL.RawQuery) > 0 {
+ request.URL.RawQuery = "&" + opQuery
+ } else {
+ request.URL.RawQuery = opQuery
+ }
+
+ request.Method = "POST"
+ restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if err := awsRestxml_serializeOpHttpBindingsCreateMultipartUploadInput(input, restEncoder); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request.Request, err = restEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ return next.HandleSerialize(ctx, in)
+}
+func awsRestxml_serializeOpHttpBindingsCreateMultipartUploadInput(v *CreateMultipartUploadInput, encoder *httpbinding.Encoder) error {
+ if v == nil {
+ return fmt.Errorf("unsupported serialization of nil %T", v)
+ }
+
+ if len(v.ACL) > 0 {
+ locationName := "X-Amz-Acl"
+ encoder.SetHeader(locationName).String(string(v.ACL))
+ }
+
+ if v.Bucket == nil || len(*v.Bucket) == 0 {
+ return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")}
+ }
+ if v.Bucket != nil {
+ if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil {
+ return err
+ }
+ }
+
+ if v.BucketKeyEnabled {
+ locationName := "X-Amz-Server-Side-Encryption-Bucket-Key-Enabled"
+ encoder.SetHeader(locationName).Boolean(v.BucketKeyEnabled)
+ }
+
+ if v.CacheControl != nil && len(*v.CacheControl) > 0 {
+ locationName := "Cache-Control"
+ encoder.SetHeader(locationName).String(*v.CacheControl)
+ }
+
+ if v.ContentDisposition != nil && len(*v.ContentDisposition) > 0 {
+ locationName := "Content-Disposition"
+ encoder.SetHeader(locationName).String(*v.ContentDisposition)
+ }
+
+ if v.ContentEncoding != nil && len(*v.ContentEncoding) > 0 {
+ locationName := "Content-Encoding"
+ encoder.SetHeader(locationName).String(*v.ContentEncoding)
+ }
+
+ if v.ContentLanguage != nil && len(*v.ContentLanguage) > 0 {
+ locationName := "Content-Language"
+ encoder.SetHeader(locationName).String(*v.ContentLanguage)
+ }
+
+ if v.ContentType != nil && len(*v.ContentType) > 0 {
+ locationName := "Content-Type"
+ encoder.SetHeader(locationName).String(*v.ContentType)
+ }
+
+ if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 {
+ locationName := "X-Amz-Expected-Bucket-Owner"
+ encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner)
+ }
+
+ if v.Expires != nil {
+ locationName := "Expires"
+ encoder.SetHeader(locationName).String(smithytime.FormatHTTPDate(*v.Expires))
+ }
+
+ if v.GrantFullControl != nil && len(*v.GrantFullControl) > 0 {
+ locationName := "X-Amz-Grant-Full-Control"
+ encoder.SetHeader(locationName).String(*v.GrantFullControl)
+ }
+
+ if v.GrantRead != nil && len(*v.GrantRead) > 0 {
+ locationName := "X-Amz-Grant-Read"
+ encoder.SetHeader(locationName).String(*v.GrantRead)
+ }
+
+ if v.GrantReadACP != nil && len(*v.GrantReadACP) > 0 {
+ locationName := "X-Amz-Grant-Read-Acp"
+ encoder.SetHeader(locationName).String(*v.GrantReadACP)
+ }
+
+ if v.GrantWriteACP != nil && len(*v.GrantWriteACP) > 0 {
+ locationName := "X-Amz-Grant-Write-Acp"
+ encoder.SetHeader(locationName).String(*v.GrantWriteACP)
+ }
+
+ if v.Key == nil || len(*v.Key) == 0 {
+ return &smithy.SerializationError{Err: fmt.Errorf("input member Key must not be empty")}
+ }
+ if v.Key != nil {
+ if err := encoder.SetURI("Key").String(*v.Key); err != nil {
+ return err
+ }
+ }
+
+ if v.Metadata != nil {
+ hv := encoder.Headers("X-Amz-Meta-")
+ for mapKey, mapVal := range v.Metadata {
+ if len(mapVal) > 0 {
+ hv.SetHeader(http.CanonicalHeaderKey(mapKey)).String(mapVal)
+ }
+ }
+ }
+
+ if len(v.ObjectLockLegalHoldStatus) > 0 {
+ locationName := "X-Amz-Object-Lock-Legal-Hold"
+ encoder.SetHeader(locationName).String(string(v.ObjectLockLegalHoldStatus))
+ }
+
+ if len(v.ObjectLockMode) > 0 {
+ locationName := "X-Amz-Object-Lock-Mode"
+ encoder.SetHeader(locationName).String(string(v.ObjectLockMode))
+ }
+
+ if v.ObjectLockRetainUntilDate != nil {
+ locationName := "X-Amz-Object-Lock-Retain-Until-Date"
+ encoder.SetHeader(locationName).String(smithytime.FormatDateTime(*v.ObjectLockRetainUntilDate))
+ }
+
+ if len(v.RequestPayer) > 0 {
+ locationName := "X-Amz-Request-Payer"
+ encoder.SetHeader(locationName).String(string(v.RequestPayer))
+ }
+
+ if len(v.ServerSideEncryption) > 0 {
+ locationName := "X-Amz-Server-Side-Encryption"
+ encoder.SetHeader(locationName).String(string(v.ServerSideEncryption))
+ }
+
+ if v.SSECustomerAlgorithm != nil && len(*v.SSECustomerAlgorithm) > 0 {
+ locationName := "X-Amz-Server-Side-Encryption-Customer-Algorithm"
+ encoder.SetHeader(locationName).String(*v.SSECustomerAlgorithm)
+ }
+
+ if v.SSECustomerKey != nil && len(*v.SSECustomerKey) > 0 {
+ locationName := "X-Amz-Server-Side-Encryption-Customer-Key"
+ encoder.SetHeader(locationName).String(*v.SSECustomerKey)
+ }
+
+ if v.SSECustomerKeyMD5 != nil && len(*v.SSECustomerKeyMD5) > 0 {
+ locationName := "X-Amz-Server-Side-Encryption-Customer-Key-Md5"
+ encoder.SetHeader(locationName).String(*v.SSECustomerKeyMD5)
+ }
+
+ if v.SSEKMSEncryptionContext != nil && len(*v.SSEKMSEncryptionContext) > 0 {
+ locationName := "X-Amz-Server-Side-Encryption-Context"
+ encoder.SetHeader(locationName).String(*v.SSEKMSEncryptionContext)
+ }
+
+ if v.SSEKMSKeyId != nil && len(*v.SSEKMSKeyId) > 0 {
+ locationName := "X-Amz-Server-Side-Encryption-Aws-Kms-Key-Id"
+ encoder.SetHeader(locationName).String(*v.SSEKMSKeyId)
+ }
+
+ if len(v.StorageClass) > 0 {
+ locationName := "X-Amz-Storage-Class"
+ encoder.SetHeader(locationName).String(string(v.StorageClass))
+ }
+
+ if v.Tagging != nil && len(*v.Tagging) > 0 {
+ locationName := "X-Amz-Tagging"
+ encoder.SetHeader(locationName).String(*v.Tagging)
+ }
+
+ if v.WebsiteRedirectLocation != nil && len(*v.WebsiteRedirectLocation) > 0 {
+ locationName := "X-Amz-Website-Redirect-Location"
+ encoder.SetHeader(locationName).String(*v.WebsiteRedirectLocation)
+ }
+
+ return nil
+}
+
+type awsRestxml_serializeOpDeleteBucket struct {
+}
+
+func (*awsRestxml_serializeOpDeleteBucket) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsRestxml_serializeOpDeleteBucket) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*DeleteBucketInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ opPath, opQuery := httpbinding.SplitURI("/{Bucket}")
+ request.URL.Path = opPath
+ if len(request.URL.RawQuery) > 0 {
+ request.URL.RawQuery = "&" + opQuery
+ } else {
+ request.URL.RawQuery = opQuery
+ }
+
+ request.Method = "DELETE"
+ restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if err := awsRestxml_serializeOpHttpBindingsDeleteBucketInput(input, restEncoder); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request.Request, err = restEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ return next.HandleSerialize(ctx, in)
+}
+func awsRestxml_serializeOpHttpBindingsDeleteBucketInput(v *DeleteBucketInput, encoder *httpbinding.Encoder) error {
+ if v == nil {
+ return fmt.Errorf("unsupported serialization of nil %T", v)
+ }
+
+ if v.Bucket == nil || len(*v.Bucket) == 0 {
+ return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")}
+ }
+ if v.Bucket != nil {
+ if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil {
+ return err
+ }
+ }
+
+ if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 {
+ locationName := "X-Amz-Expected-Bucket-Owner"
+ encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner)
+ }
+
+ return nil
+}
+
+type awsRestxml_serializeOpDeleteBucketAnalyticsConfiguration struct {
+}
+
+func (*awsRestxml_serializeOpDeleteBucketAnalyticsConfiguration) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsRestxml_serializeOpDeleteBucketAnalyticsConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*DeleteBucketAnalyticsConfigurationInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ opPath, opQuery := httpbinding.SplitURI("/{Bucket}?analytics")
+ request.URL.Path = opPath
+ if len(request.URL.RawQuery) > 0 {
+ request.URL.RawQuery = "&" + opQuery
+ } else {
+ request.URL.RawQuery = opQuery
+ }
+
+ request.Method = "DELETE"
+ restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if err := awsRestxml_serializeOpHttpBindingsDeleteBucketAnalyticsConfigurationInput(input, restEncoder); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request.Request, err = restEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ return next.HandleSerialize(ctx, in)
+}
+func awsRestxml_serializeOpHttpBindingsDeleteBucketAnalyticsConfigurationInput(v *DeleteBucketAnalyticsConfigurationInput, encoder *httpbinding.Encoder) error {
+ if v == nil {
+ return fmt.Errorf("unsupported serialization of nil %T", v)
+ }
+
+ if v.Bucket == nil || len(*v.Bucket) == 0 {
+ return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")}
+ }
+ if v.Bucket != nil {
+ if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil {
+ return err
+ }
+ }
+
+ if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 {
+ locationName := "X-Amz-Expected-Bucket-Owner"
+ encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner)
+ }
+
+ if v.Id != nil {
+ encoder.SetQuery("id").String(*v.Id)
+ }
+
+ return nil
+}
+
+type awsRestxml_serializeOpDeleteBucketCors struct {
+}
+
+func (*awsRestxml_serializeOpDeleteBucketCors) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsRestxml_serializeOpDeleteBucketCors) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*DeleteBucketCorsInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ opPath, opQuery := httpbinding.SplitURI("/{Bucket}?cors")
+ request.URL.Path = opPath
+ if len(request.URL.RawQuery) > 0 {
+ request.URL.RawQuery = "&" + opQuery
+ } else {
+ request.URL.RawQuery = opQuery
+ }
+
+ request.Method = "DELETE"
+ restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if err := awsRestxml_serializeOpHttpBindingsDeleteBucketCorsInput(input, restEncoder); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request.Request, err = restEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ return next.HandleSerialize(ctx, in)
+}
+func awsRestxml_serializeOpHttpBindingsDeleteBucketCorsInput(v *DeleteBucketCorsInput, encoder *httpbinding.Encoder) error {
+ if v == nil {
+ return fmt.Errorf("unsupported serialization of nil %T", v)
+ }
+
+ if v.Bucket == nil || len(*v.Bucket) == 0 {
+ return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")}
+ }
+ if v.Bucket != nil {
+ if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil {
+ return err
+ }
+ }
+
+ if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 {
+ locationName := "X-Amz-Expected-Bucket-Owner"
+ encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner)
+ }
+
+ return nil
+}
+
+type awsRestxml_serializeOpDeleteBucketEncryption struct {
+}
+
+func (*awsRestxml_serializeOpDeleteBucketEncryption) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsRestxml_serializeOpDeleteBucketEncryption) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*DeleteBucketEncryptionInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ opPath, opQuery := httpbinding.SplitURI("/{Bucket}?encryption")
+ request.URL.Path = opPath
+ if len(request.URL.RawQuery) > 0 {
+ request.URL.RawQuery = "&" + opQuery
+ } else {
+ request.URL.RawQuery = opQuery
+ }
+
+ request.Method = "DELETE"
+ restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if err := awsRestxml_serializeOpHttpBindingsDeleteBucketEncryptionInput(input, restEncoder); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request.Request, err = restEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ return next.HandleSerialize(ctx, in)
+}
+func awsRestxml_serializeOpHttpBindingsDeleteBucketEncryptionInput(v *DeleteBucketEncryptionInput, encoder *httpbinding.Encoder) error {
+ if v == nil {
+ return fmt.Errorf("unsupported serialization of nil %T", v)
+ }
+
+ if v.Bucket == nil || len(*v.Bucket) == 0 {
+ return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")}
+ }
+ if v.Bucket != nil {
+ if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil {
+ return err
+ }
+ }
+
+ if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 {
+ locationName := "X-Amz-Expected-Bucket-Owner"
+ encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner)
+ }
+
+ return nil
+}
+
+type awsRestxml_serializeOpDeleteBucketIntelligentTieringConfiguration struct {
+}
+
+func (*awsRestxml_serializeOpDeleteBucketIntelligentTieringConfiguration) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsRestxml_serializeOpDeleteBucketIntelligentTieringConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*DeleteBucketIntelligentTieringConfigurationInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ opPath, opQuery := httpbinding.SplitURI("/{Bucket}?intelligent-tiering")
+ request.URL.Path = opPath
+ if len(request.URL.RawQuery) > 0 {
+ request.URL.RawQuery = "&" + opQuery
+ } else {
+ request.URL.RawQuery = opQuery
+ }
+
+ request.Method = "DELETE"
+ restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if err := awsRestxml_serializeOpHttpBindingsDeleteBucketIntelligentTieringConfigurationInput(input, restEncoder); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request.Request, err = restEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ return next.HandleSerialize(ctx, in)
+}
+func awsRestxml_serializeOpHttpBindingsDeleteBucketIntelligentTieringConfigurationInput(v *DeleteBucketIntelligentTieringConfigurationInput, encoder *httpbinding.Encoder) error {
+ if v == nil {
+ return fmt.Errorf("unsupported serialization of nil %T", v)
+ }
+
+ if v.Bucket == nil || len(*v.Bucket) == 0 {
+ return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")}
+ }
+ if v.Bucket != nil {
+ if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil {
+ return err
+ }
+ }
+
+ if v.Id != nil {
+ encoder.SetQuery("id").String(*v.Id)
+ }
+
+ return nil
+}
+
+type awsRestxml_serializeOpDeleteBucketInventoryConfiguration struct {
+}
+
+func (*awsRestxml_serializeOpDeleteBucketInventoryConfiguration) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsRestxml_serializeOpDeleteBucketInventoryConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*DeleteBucketInventoryConfigurationInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ opPath, opQuery := httpbinding.SplitURI("/{Bucket}?inventory")
+ request.URL.Path = opPath
+ if len(request.URL.RawQuery) > 0 {
+ request.URL.RawQuery = "&" + opQuery
+ } else {
+ request.URL.RawQuery = opQuery
+ }
+
+ request.Method = "DELETE"
+ restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if err := awsRestxml_serializeOpHttpBindingsDeleteBucketInventoryConfigurationInput(input, restEncoder); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request.Request, err = restEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ return next.HandleSerialize(ctx, in)
+}
+func awsRestxml_serializeOpHttpBindingsDeleteBucketInventoryConfigurationInput(v *DeleteBucketInventoryConfigurationInput, encoder *httpbinding.Encoder) error {
+ if v == nil {
+ return fmt.Errorf("unsupported serialization of nil %T", v)
+ }
+
+ if v.Bucket == nil || len(*v.Bucket) == 0 {
+ return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")}
+ }
+ if v.Bucket != nil {
+ if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil {
+ return err
+ }
+ }
+
+ if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 {
+ locationName := "X-Amz-Expected-Bucket-Owner"
+ encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner)
+ }
+
+ if v.Id != nil {
+ encoder.SetQuery("id").String(*v.Id)
+ }
+
+ return nil
+}
+
+type awsRestxml_serializeOpDeleteBucketLifecycle struct {
+}
+
+func (*awsRestxml_serializeOpDeleteBucketLifecycle) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsRestxml_serializeOpDeleteBucketLifecycle) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*DeleteBucketLifecycleInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ opPath, opQuery := httpbinding.SplitURI("/{Bucket}?lifecycle")
+ request.URL.Path = opPath
+ if len(request.URL.RawQuery) > 0 {
+ request.URL.RawQuery = "&" + opQuery
+ } else {
+ request.URL.RawQuery = opQuery
+ }
+
+ request.Method = "DELETE"
+ restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if err := awsRestxml_serializeOpHttpBindingsDeleteBucketLifecycleInput(input, restEncoder); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request.Request, err = restEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ return next.HandleSerialize(ctx, in)
+}
+func awsRestxml_serializeOpHttpBindingsDeleteBucketLifecycleInput(v *DeleteBucketLifecycleInput, encoder *httpbinding.Encoder) error {
+ if v == nil {
+ return fmt.Errorf("unsupported serialization of nil %T", v)
+ }
+
+ if v.Bucket == nil || len(*v.Bucket) == 0 {
+ return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")}
+ }
+ if v.Bucket != nil {
+ if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil {
+ return err
+ }
+ }
+
+ if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 {
+ locationName := "X-Amz-Expected-Bucket-Owner"
+ encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner)
+ }
+
+ return nil
+}
+
+type awsRestxml_serializeOpDeleteBucketMetricsConfiguration struct {
+}
+
+func (*awsRestxml_serializeOpDeleteBucketMetricsConfiguration) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsRestxml_serializeOpDeleteBucketMetricsConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*DeleteBucketMetricsConfigurationInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ opPath, opQuery := httpbinding.SplitURI("/{Bucket}?metrics")
+ request.URL.Path = opPath
+ if len(request.URL.RawQuery) > 0 {
+ request.URL.RawQuery = "&" + opQuery
+ } else {
+ request.URL.RawQuery = opQuery
+ }
+
+ request.Method = "DELETE"
+ restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if err := awsRestxml_serializeOpHttpBindingsDeleteBucketMetricsConfigurationInput(input, restEncoder); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request.Request, err = restEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ return next.HandleSerialize(ctx, in)
+}
+func awsRestxml_serializeOpHttpBindingsDeleteBucketMetricsConfigurationInput(v *DeleteBucketMetricsConfigurationInput, encoder *httpbinding.Encoder) error {
+ if v == nil {
+ return fmt.Errorf("unsupported serialization of nil %T", v)
+ }
+
+ if v.Bucket == nil || len(*v.Bucket) == 0 {
+ return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")}
+ }
+ if v.Bucket != nil {
+ if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil {
+ return err
+ }
+ }
+
+ if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 {
+ locationName := "X-Amz-Expected-Bucket-Owner"
+ encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner)
+ }
+
+ if v.Id != nil {
+ encoder.SetQuery("id").String(*v.Id)
+ }
+
+ return nil
+}
+
+type awsRestxml_serializeOpDeleteBucketOwnershipControls struct {
+}
+
+func (*awsRestxml_serializeOpDeleteBucketOwnershipControls) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsRestxml_serializeOpDeleteBucketOwnershipControls) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*DeleteBucketOwnershipControlsInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ opPath, opQuery := httpbinding.SplitURI("/{Bucket}?ownershipControls")
+ request.URL.Path = opPath
+ if len(request.URL.RawQuery) > 0 {
+ request.URL.RawQuery = "&" + opQuery
+ } else {
+ request.URL.RawQuery = opQuery
+ }
+
+ request.Method = "DELETE"
+ restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if err := awsRestxml_serializeOpHttpBindingsDeleteBucketOwnershipControlsInput(input, restEncoder); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request.Request, err = restEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ return next.HandleSerialize(ctx, in)
+}
+func awsRestxml_serializeOpHttpBindingsDeleteBucketOwnershipControlsInput(v *DeleteBucketOwnershipControlsInput, encoder *httpbinding.Encoder) error {
+ if v == nil {
+ return fmt.Errorf("unsupported serialization of nil %T", v)
+ }
+
+ if v.Bucket == nil || len(*v.Bucket) == 0 {
+ return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")}
+ }
+ if v.Bucket != nil {
+ if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil {
+ return err
+ }
+ }
+
+ if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 {
+ locationName := "X-Amz-Expected-Bucket-Owner"
+ encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner)
+ }
+
+ return nil
+}
+
+type awsRestxml_serializeOpDeleteBucketPolicy struct {
+}
+
+func (*awsRestxml_serializeOpDeleteBucketPolicy) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsRestxml_serializeOpDeleteBucketPolicy) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*DeleteBucketPolicyInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ opPath, opQuery := httpbinding.SplitURI("/{Bucket}?policy")
+ request.URL.Path = opPath
+ if len(request.URL.RawQuery) > 0 {
+ request.URL.RawQuery = "&" + opQuery
+ } else {
+ request.URL.RawQuery = opQuery
+ }
+
+ request.Method = "DELETE"
+ restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if err := awsRestxml_serializeOpHttpBindingsDeleteBucketPolicyInput(input, restEncoder); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request.Request, err = restEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ return next.HandleSerialize(ctx, in)
+}
+func awsRestxml_serializeOpHttpBindingsDeleteBucketPolicyInput(v *DeleteBucketPolicyInput, encoder *httpbinding.Encoder) error {
+ if v == nil {
+ return fmt.Errorf("unsupported serialization of nil %T", v)
+ }
+
+ if v.Bucket == nil || len(*v.Bucket) == 0 {
+ return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")}
+ }
+ if v.Bucket != nil {
+ if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil {
+ return err
+ }
+ }
+
+ if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 {
+ locationName := "X-Amz-Expected-Bucket-Owner"
+ encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner)
+ }
+
+ return nil
+}
+
+type awsRestxml_serializeOpDeleteBucketReplication struct {
+}
+
+func (*awsRestxml_serializeOpDeleteBucketReplication) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsRestxml_serializeOpDeleteBucketReplication) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*DeleteBucketReplicationInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ opPath, opQuery := httpbinding.SplitURI("/{Bucket}?replication")
+ request.URL.Path = opPath
+ if len(request.URL.RawQuery) > 0 {
+ request.URL.RawQuery = "&" + opQuery
+ } else {
+ request.URL.RawQuery = opQuery
+ }
+
+ request.Method = "DELETE"
+ restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if err := awsRestxml_serializeOpHttpBindingsDeleteBucketReplicationInput(input, restEncoder); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request.Request, err = restEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ return next.HandleSerialize(ctx, in)
+}
+func awsRestxml_serializeOpHttpBindingsDeleteBucketReplicationInput(v *DeleteBucketReplicationInput, encoder *httpbinding.Encoder) error {
+ if v == nil {
+ return fmt.Errorf("unsupported serialization of nil %T", v)
+ }
+
+ if v.Bucket == nil || len(*v.Bucket) == 0 {
+ return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")}
+ }
+ if v.Bucket != nil {
+ if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil {
+ return err
+ }
+ }
+
+ if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 {
+ locationName := "X-Amz-Expected-Bucket-Owner"
+ encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner)
+ }
+
+ return nil
+}
+
+type awsRestxml_serializeOpDeleteBucketTagging struct {
+}
+
+func (*awsRestxml_serializeOpDeleteBucketTagging) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsRestxml_serializeOpDeleteBucketTagging) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*DeleteBucketTaggingInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ opPath, opQuery := httpbinding.SplitURI("/{Bucket}?tagging")
+ request.URL.Path = opPath
+ if len(request.URL.RawQuery) > 0 {
+ request.URL.RawQuery = "&" + opQuery
+ } else {
+ request.URL.RawQuery = opQuery
+ }
+
+ request.Method = "DELETE"
+ restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if err := awsRestxml_serializeOpHttpBindingsDeleteBucketTaggingInput(input, restEncoder); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request.Request, err = restEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ return next.HandleSerialize(ctx, in)
+}
+func awsRestxml_serializeOpHttpBindingsDeleteBucketTaggingInput(v *DeleteBucketTaggingInput, encoder *httpbinding.Encoder) error {
+ if v == nil {
+ return fmt.Errorf("unsupported serialization of nil %T", v)
+ }
+
+ if v.Bucket == nil || len(*v.Bucket) == 0 {
+ return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")}
+ }
+ if v.Bucket != nil {
+ if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil {
+ return err
+ }
+ }
+
+ if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 {
+ locationName := "X-Amz-Expected-Bucket-Owner"
+ encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner)
+ }
+
+ return nil
+}
+
+type awsRestxml_serializeOpDeleteBucketWebsite struct {
+}
+
+func (*awsRestxml_serializeOpDeleteBucketWebsite) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsRestxml_serializeOpDeleteBucketWebsite) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*DeleteBucketWebsiteInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ opPath, opQuery := httpbinding.SplitURI("/{Bucket}?website")
+ request.URL.Path = opPath
+ if len(request.URL.RawQuery) > 0 {
+ request.URL.RawQuery = "&" + opQuery
+ } else {
+ request.URL.RawQuery = opQuery
+ }
+
+ request.Method = "DELETE"
+ restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if err := awsRestxml_serializeOpHttpBindingsDeleteBucketWebsiteInput(input, restEncoder); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request.Request, err = restEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ return next.HandleSerialize(ctx, in)
+}
+func awsRestxml_serializeOpHttpBindingsDeleteBucketWebsiteInput(v *DeleteBucketWebsiteInput, encoder *httpbinding.Encoder) error {
+ if v == nil {
+ return fmt.Errorf("unsupported serialization of nil %T", v)
+ }
+
+ if v.Bucket == nil || len(*v.Bucket) == 0 {
+ return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")}
+ }
+ if v.Bucket != nil {
+ if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil {
+ return err
+ }
+ }
+
+ if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 {
+ locationName := "X-Amz-Expected-Bucket-Owner"
+ encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner)
+ }
+
+ return nil
+}
+
+type awsRestxml_serializeOpDeleteObject struct {
+}
+
+func (*awsRestxml_serializeOpDeleteObject) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsRestxml_serializeOpDeleteObject) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*DeleteObjectInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ opPath, opQuery := httpbinding.SplitURI("/{Bucket}/{Key+}?x-id=DeleteObject")
+ request.URL.Path = opPath
+ if len(request.URL.RawQuery) > 0 {
+ request.URL.RawQuery = "&" + opQuery
+ } else {
+ request.URL.RawQuery = opQuery
+ }
+
+ request.Method = "DELETE"
+ restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if err := awsRestxml_serializeOpHttpBindingsDeleteObjectInput(input, restEncoder); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request.Request, err = restEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ return next.HandleSerialize(ctx, in)
+}
+func awsRestxml_serializeOpHttpBindingsDeleteObjectInput(v *DeleteObjectInput, encoder *httpbinding.Encoder) error {
+ if v == nil {
+ return fmt.Errorf("unsupported serialization of nil %T", v)
+ }
+
+ if v.Bucket == nil || len(*v.Bucket) == 0 {
+ return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")}
+ }
+ if v.Bucket != nil {
+ if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil {
+ return err
+ }
+ }
+
+ if v.BypassGovernanceRetention {
+ locationName := "X-Amz-Bypass-Governance-Retention"
+ encoder.SetHeader(locationName).Boolean(v.BypassGovernanceRetention)
+ }
+
+ if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 {
+ locationName := "X-Amz-Expected-Bucket-Owner"
+ encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner)
+ }
+
+ if v.Key == nil || len(*v.Key) == 0 {
+ return &smithy.SerializationError{Err: fmt.Errorf("input member Key must not be empty")}
+ }
+ if v.Key != nil {
+ if err := encoder.SetURI("Key").String(*v.Key); err != nil {
+ return err
+ }
+ }
+
+ if v.MFA != nil && len(*v.MFA) > 0 {
+ locationName := "X-Amz-Mfa"
+ encoder.SetHeader(locationName).String(*v.MFA)
+ }
+
+ if len(v.RequestPayer) > 0 {
+ locationName := "X-Amz-Request-Payer"
+ encoder.SetHeader(locationName).String(string(v.RequestPayer))
+ }
+
+ if v.VersionId != nil {
+ encoder.SetQuery("versionId").String(*v.VersionId)
+ }
+
+ return nil
+}
+
+type awsRestxml_serializeOpDeleteObjects struct {
+}
+
+func (*awsRestxml_serializeOpDeleteObjects) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsRestxml_serializeOpDeleteObjects) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*DeleteObjectsInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ opPath, opQuery := httpbinding.SplitURI("/{Bucket}?delete")
+ request.URL.Path = opPath
+ if len(request.URL.RawQuery) > 0 {
+ request.URL.RawQuery = "&" + opQuery
+ } else {
+ request.URL.RawQuery = opQuery
+ }
+
+ request.Method = "POST"
+ restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if err := awsRestxml_serializeOpHttpBindingsDeleteObjectsInput(input, restEncoder); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if input.Delete != nil {
+ if !restEncoder.HasHeader("Content-Type") {
+ restEncoder.SetHeader("Content-Type").String("application/xml")
+ }
+
+ xmlEncoder := smithyxml.NewEncoder(bytes.NewBuffer(nil))
+ payloadRootAttr := []smithyxml.Attr{}
+ payloadRoot := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Delete",
+ },
+ Attr: payloadRootAttr,
+ }
+ payloadRoot.Attr = append(payloadRoot.Attr, smithyxml.NewNamespaceAttribute("", "http://s3.amazonaws.com/doc/2006-03-01/"))
+ if err := awsRestxml_serializeDocumentDelete(input.Delete, xmlEncoder.RootElement(payloadRoot)); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ payload := bytes.NewReader(xmlEncoder.Bytes())
+ if request, err = request.SetStream(payload); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ }
+
+ if request.Request, err = restEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ return next.HandleSerialize(ctx, in)
+}
+func awsRestxml_serializeOpHttpBindingsDeleteObjectsInput(v *DeleteObjectsInput, encoder *httpbinding.Encoder) error {
+ if v == nil {
+ return fmt.Errorf("unsupported serialization of nil %T", v)
+ }
+
+ if v.Bucket == nil || len(*v.Bucket) == 0 {
+ return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")}
+ }
+ if v.Bucket != nil {
+ if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil {
+ return err
+ }
+ }
+
+ if v.BypassGovernanceRetention {
+ locationName := "X-Amz-Bypass-Governance-Retention"
+ encoder.SetHeader(locationName).Boolean(v.BypassGovernanceRetention)
+ }
+
+ if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 {
+ locationName := "X-Amz-Expected-Bucket-Owner"
+ encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner)
+ }
+
+ if v.MFA != nil && len(*v.MFA) > 0 {
+ locationName := "X-Amz-Mfa"
+ encoder.SetHeader(locationName).String(*v.MFA)
+ }
+
+ if len(v.RequestPayer) > 0 {
+ locationName := "X-Amz-Request-Payer"
+ encoder.SetHeader(locationName).String(string(v.RequestPayer))
+ }
+
+ return nil
+}
+
+type awsRestxml_serializeOpDeleteObjectTagging struct {
+}
+
+func (*awsRestxml_serializeOpDeleteObjectTagging) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsRestxml_serializeOpDeleteObjectTagging) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*DeleteObjectTaggingInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ opPath, opQuery := httpbinding.SplitURI("/{Bucket}/{Key+}?tagging")
+ request.URL.Path = opPath
+ if len(request.URL.RawQuery) > 0 {
+ request.URL.RawQuery = "&" + opQuery
+ } else {
+ request.URL.RawQuery = opQuery
+ }
+
+ request.Method = "DELETE"
+ restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if err := awsRestxml_serializeOpHttpBindingsDeleteObjectTaggingInput(input, restEncoder); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request.Request, err = restEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ return next.HandleSerialize(ctx, in)
+}
+func awsRestxml_serializeOpHttpBindingsDeleteObjectTaggingInput(v *DeleteObjectTaggingInput, encoder *httpbinding.Encoder) error {
+ if v == nil {
+ return fmt.Errorf("unsupported serialization of nil %T", v)
+ }
+
+ if v.Bucket == nil || len(*v.Bucket) == 0 {
+ return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")}
+ }
+ if v.Bucket != nil {
+ if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil {
+ return err
+ }
+ }
+
+ if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 {
+ locationName := "X-Amz-Expected-Bucket-Owner"
+ encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner)
+ }
+
+ if v.Key == nil || len(*v.Key) == 0 {
+ return &smithy.SerializationError{Err: fmt.Errorf("input member Key must not be empty")}
+ }
+ if v.Key != nil {
+ if err := encoder.SetURI("Key").String(*v.Key); err != nil {
+ return err
+ }
+ }
+
+ if v.VersionId != nil {
+ encoder.SetQuery("versionId").String(*v.VersionId)
+ }
+
+ return nil
+}
+
+type awsRestxml_serializeOpDeletePublicAccessBlock struct {
+}
+
+func (*awsRestxml_serializeOpDeletePublicAccessBlock) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsRestxml_serializeOpDeletePublicAccessBlock) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*DeletePublicAccessBlockInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ opPath, opQuery := httpbinding.SplitURI("/{Bucket}?publicAccessBlock")
+ request.URL.Path = opPath
+ if len(request.URL.RawQuery) > 0 {
+ request.URL.RawQuery = "&" + opQuery
+ } else {
+ request.URL.RawQuery = opQuery
+ }
+
+ request.Method = "DELETE"
+ restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if err := awsRestxml_serializeOpHttpBindingsDeletePublicAccessBlockInput(input, restEncoder); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request.Request, err = restEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ return next.HandleSerialize(ctx, in)
+}
+func awsRestxml_serializeOpHttpBindingsDeletePublicAccessBlockInput(v *DeletePublicAccessBlockInput, encoder *httpbinding.Encoder) error {
+ if v == nil {
+ return fmt.Errorf("unsupported serialization of nil %T", v)
+ }
+
+ if v.Bucket == nil || len(*v.Bucket) == 0 {
+ return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")}
+ }
+ if v.Bucket != nil {
+ if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil {
+ return err
+ }
+ }
+
+ if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 {
+ locationName := "X-Amz-Expected-Bucket-Owner"
+ encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner)
+ }
+
+ return nil
+}
+
+type awsRestxml_serializeOpGetBucketAccelerateConfiguration struct {
+}
+
+func (*awsRestxml_serializeOpGetBucketAccelerateConfiguration) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsRestxml_serializeOpGetBucketAccelerateConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*GetBucketAccelerateConfigurationInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ opPath, opQuery := httpbinding.SplitURI("/{Bucket}?accelerate")
+ request.URL.Path = opPath
+ if len(request.URL.RawQuery) > 0 {
+ request.URL.RawQuery = "&" + opQuery
+ } else {
+ request.URL.RawQuery = opQuery
+ }
+
+ request.Method = "GET"
+ restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if err := awsRestxml_serializeOpHttpBindingsGetBucketAccelerateConfigurationInput(input, restEncoder); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request.Request, err = restEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ return next.HandleSerialize(ctx, in)
+}
+func awsRestxml_serializeOpHttpBindingsGetBucketAccelerateConfigurationInput(v *GetBucketAccelerateConfigurationInput, encoder *httpbinding.Encoder) error {
+ if v == nil {
+ return fmt.Errorf("unsupported serialization of nil %T", v)
+ }
+
+ if v.Bucket == nil || len(*v.Bucket) == 0 {
+ return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")}
+ }
+ if v.Bucket != nil {
+ if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil {
+ return err
+ }
+ }
+
+ if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 {
+ locationName := "X-Amz-Expected-Bucket-Owner"
+ encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner)
+ }
+
+ return nil
+}
+
+type awsRestxml_serializeOpGetBucketAcl struct {
+}
+
+func (*awsRestxml_serializeOpGetBucketAcl) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsRestxml_serializeOpGetBucketAcl) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*GetBucketAclInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ opPath, opQuery := httpbinding.SplitURI("/{Bucket}?acl")
+ request.URL.Path = opPath
+ if len(request.URL.RawQuery) > 0 {
+ request.URL.RawQuery = "&" + opQuery
+ } else {
+ request.URL.RawQuery = opQuery
+ }
+
+ request.Method = "GET"
+ restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if err := awsRestxml_serializeOpHttpBindingsGetBucketAclInput(input, restEncoder); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request.Request, err = restEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ return next.HandleSerialize(ctx, in)
+}
+func awsRestxml_serializeOpHttpBindingsGetBucketAclInput(v *GetBucketAclInput, encoder *httpbinding.Encoder) error {
+ if v == nil {
+ return fmt.Errorf("unsupported serialization of nil %T", v)
+ }
+
+ if v.Bucket == nil || len(*v.Bucket) == 0 {
+ return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")}
+ }
+ if v.Bucket != nil {
+ if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil {
+ return err
+ }
+ }
+
+ if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 {
+ locationName := "X-Amz-Expected-Bucket-Owner"
+ encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner)
+ }
+
+ return nil
+}
+
+type awsRestxml_serializeOpGetBucketAnalyticsConfiguration struct {
+}
+
+func (*awsRestxml_serializeOpGetBucketAnalyticsConfiguration) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsRestxml_serializeOpGetBucketAnalyticsConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*GetBucketAnalyticsConfigurationInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ opPath, opQuery := httpbinding.SplitURI("/{Bucket}?analytics&x-id=GetBucketAnalyticsConfiguration")
+ request.URL.Path = opPath
+ if len(request.URL.RawQuery) > 0 {
+ request.URL.RawQuery = "&" + opQuery
+ } else {
+ request.URL.RawQuery = opQuery
+ }
+
+ request.Method = "GET"
+ restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if err := awsRestxml_serializeOpHttpBindingsGetBucketAnalyticsConfigurationInput(input, restEncoder); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request.Request, err = restEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ return next.HandleSerialize(ctx, in)
+}
+func awsRestxml_serializeOpHttpBindingsGetBucketAnalyticsConfigurationInput(v *GetBucketAnalyticsConfigurationInput, encoder *httpbinding.Encoder) error {
+ if v == nil {
+ return fmt.Errorf("unsupported serialization of nil %T", v)
+ }
+
+ if v.Bucket == nil || len(*v.Bucket) == 0 {
+ return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")}
+ }
+ if v.Bucket != nil {
+ if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil {
+ return err
+ }
+ }
+
+ if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 {
+ locationName := "X-Amz-Expected-Bucket-Owner"
+ encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner)
+ }
+
+ if v.Id != nil {
+ encoder.SetQuery("id").String(*v.Id)
+ }
+
+ return nil
+}
+
+type awsRestxml_serializeOpGetBucketCors struct {
+}
+
+func (*awsRestxml_serializeOpGetBucketCors) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsRestxml_serializeOpGetBucketCors) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*GetBucketCorsInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ opPath, opQuery := httpbinding.SplitURI("/{Bucket}?cors")
+ request.URL.Path = opPath
+ if len(request.URL.RawQuery) > 0 {
+ request.URL.RawQuery = "&" + opQuery
+ } else {
+ request.URL.RawQuery = opQuery
+ }
+
+ request.Method = "GET"
+ restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if err := awsRestxml_serializeOpHttpBindingsGetBucketCorsInput(input, restEncoder); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request.Request, err = restEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ return next.HandleSerialize(ctx, in)
+}
+func awsRestxml_serializeOpHttpBindingsGetBucketCorsInput(v *GetBucketCorsInput, encoder *httpbinding.Encoder) error {
+ if v == nil {
+ return fmt.Errorf("unsupported serialization of nil %T", v)
+ }
+
+ if v.Bucket == nil || len(*v.Bucket) == 0 {
+ return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")}
+ }
+ if v.Bucket != nil {
+ if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil {
+ return err
+ }
+ }
+
+ if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 {
+ locationName := "X-Amz-Expected-Bucket-Owner"
+ encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner)
+ }
+
+ return nil
+}
+
+type awsRestxml_serializeOpGetBucketEncryption struct {
+}
+
+func (*awsRestxml_serializeOpGetBucketEncryption) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsRestxml_serializeOpGetBucketEncryption) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*GetBucketEncryptionInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ opPath, opQuery := httpbinding.SplitURI("/{Bucket}?encryption")
+ request.URL.Path = opPath
+ if len(request.URL.RawQuery) > 0 {
+ request.URL.RawQuery = "&" + opQuery
+ } else {
+ request.URL.RawQuery = opQuery
+ }
+
+ request.Method = "GET"
+ restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if err := awsRestxml_serializeOpHttpBindingsGetBucketEncryptionInput(input, restEncoder); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request.Request, err = restEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ return next.HandleSerialize(ctx, in)
+}
+func awsRestxml_serializeOpHttpBindingsGetBucketEncryptionInput(v *GetBucketEncryptionInput, encoder *httpbinding.Encoder) error {
+ if v == nil {
+ return fmt.Errorf("unsupported serialization of nil %T", v)
+ }
+
+ if v.Bucket == nil || len(*v.Bucket) == 0 {
+ return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")}
+ }
+ if v.Bucket != nil {
+ if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil {
+ return err
+ }
+ }
+
+ if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 {
+ locationName := "X-Amz-Expected-Bucket-Owner"
+ encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner)
+ }
+
+ return nil
+}
+
+type awsRestxml_serializeOpGetBucketIntelligentTieringConfiguration struct {
+}
+
+func (*awsRestxml_serializeOpGetBucketIntelligentTieringConfiguration) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsRestxml_serializeOpGetBucketIntelligentTieringConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*GetBucketIntelligentTieringConfigurationInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ opPath, opQuery := httpbinding.SplitURI("/{Bucket}?intelligent-tiering&x-id=GetBucketIntelligentTieringConfiguration")
+ request.URL.Path = opPath
+ if len(request.URL.RawQuery) > 0 {
+ request.URL.RawQuery = "&" + opQuery
+ } else {
+ request.URL.RawQuery = opQuery
+ }
+
+ request.Method = "GET"
+ restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if err := awsRestxml_serializeOpHttpBindingsGetBucketIntelligentTieringConfigurationInput(input, restEncoder); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request.Request, err = restEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ return next.HandleSerialize(ctx, in)
+}
+func awsRestxml_serializeOpHttpBindingsGetBucketIntelligentTieringConfigurationInput(v *GetBucketIntelligentTieringConfigurationInput, encoder *httpbinding.Encoder) error {
+ if v == nil {
+ return fmt.Errorf("unsupported serialization of nil %T", v)
+ }
+
+ if v.Bucket == nil || len(*v.Bucket) == 0 {
+ return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")}
+ }
+ if v.Bucket != nil {
+ if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil {
+ return err
+ }
+ }
+
+ if v.Id != nil {
+ encoder.SetQuery("id").String(*v.Id)
+ }
+
+ return nil
+}
+
+type awsRestxml_serializeOpGetBucketInventoryConfiguration struct {
+}
+
+func (*awsRestxml_serializeOpGetBucketInventoryConfiguration) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsRestxml_serializeOpGetBucketInventoryConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*GetBucketInventoryConfigurationInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ opPath, opQuery := httpbinding.SplitURI("/{Bucket}?inventory&x-id=GetBucketInventoryConfiguration")
+ request.URL.Path = opPath
+ if len(request.URL.RawQuery) > 0 {
+ request.URL.RawQuery = "&" + opQuery
+ } else {
+ request.URL.RawQuery = opQuery
+ }
+
+ request.Method = "GET"
+ restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if err := awsRestxml_serializeOpHttpBindingsGetBucketInventoryConfigurationInput(input, restEncoder); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request.Request, err = restEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ return next.HandleSerialize(ctx, in)
+}
+func awsRestxml_serializeOpHttpBindingsGetBucketInventoryConfigurationInput(v *GetBucketInventoryConfigurationInput, encoder *httpbinding.Encoder) error {
+ if v == nil {
+ return fmt.Errorf("unsupported serialization of nil %T", v)
+ }
+
+ if v.Bucket == nil || len(*v.Bucket) == 0 {
+ return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")}
+ }
+ if v.Bucket != nil {
+ if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil {
+ return err
+ }
+ }
+
+ if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 {
+ locationName := "X-Amz-Expected-Bucket-Owner"
+ encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner)
+ }
+
+ if v.Id != nil {
+ encoder.SetQuery("id").String(*v.Id)
+ }
+
+ return nil
+}
+
+type awsRestxml_serializeOpGetBucketLifecycleConfiguration struct {
+}
+
+func (*awsRestxml_serializeOpGetBucketLifecycleConfiguration) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsRestxml_serializeOpGetBucketLifecycleConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*GetBucketLifecycleConfigurationInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ opPath, opQuery := httpbinding.SplitURI("/{Bucket}?lifecycle")
+ request.URL.Path = opPath
+ if len(request.URL.RawQuery) > 0 {
+ request.URL.RawQuery = "&" + opQuery
+ } else {
+ request.URL.RawQuery = opQuery
+ }
+
+ request.Method = "GET"
+ restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if err := awsRestxml_serializeOpHttpBindingsGetBucketLifecycleConfigurationInput(input, restEncoder); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request.Request, err = restEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ return next.HandleSerialize(ctx, in)
+}
+func awsRestxml_serializeOpHttpBindingsGetBucketLifecycleConfigurationInput(v *GetBucketLifecycleConfigurationInput, encoder *httpbinding.Encoder) error {
+ if v == nil {
+ return fmt.Errorf("unsupported serialization of nil %T", v)
+ }
+
+ if v.Bucket == nil || len(*v.Bucket) == 0 {
+ return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")}
+ }
+ if v.Bucket != nil {
+ if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil {
+ return err
+ }
+ }
+
+ if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 {
+ locationName := "X-Amz-Expected-Bucket-Owner"
+ encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner)
+ }
+
+ return nil
+}
+
+type awsRestxml_serializeOpGetBucketLocation struct {
+}
+
+func (*awsRestxml_serializeOpGetBucketLocation) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsRestxml_serializeOpGetBucketLocation) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*GetBucketLocationInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ opPath, opQuery := httpbinding.SplitURI("/{Bucket}?location")
+ request.URL.Path = opPath
+ if len(request.URL.RawQuery) > 0 {
+ request.URL.RawQuery = "&" + opQuery
+ } else {
+ request.URL.RawQuery = opQuery
+ }
+
+ request.Method = "GET"
+ restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if err := awsRestxml_serializeOpHttpBindingsGetBucketLocationInput(input, restEncoder); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request.Request, err = restEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ return next.HandleSerialize(ctx, in)
+}
+func awsRestxml_serializeOpHttpBindingsGetBucketLocationInput(v *GetBucketLocationInput, encoder *httpbinding.Encoder) error {
+ if v == nil {
+ return fmt.Errorf("unsupported serialization of nil %T", v)
+ }
+
+ if v.Bucket == nil || len(*v.Bucket) == 0 {
+ return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")}
+ }
+ if v.Bucket != nil {
+ if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil {
+ return err
+ }
+ }
+
+ if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 {
+ locationName := "X-Amz-Expected-Bucket-Owner"
+ encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner)
+ }
+
+ return nil
+}
+
+type awsRestxml_serializeOpGetBucketLogging struct {
+}
+
+func (*awsRestxml_serializeOpGetBucketLogging) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsRestxml_serializeOpGetBucketLogging) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*GetBucketLoggingInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ opPath, opQuery := httpbinding.SplitURI("/{Bucket}?logging")
+ request.URL.Path = opPath
+ if len(request.URL.RawQuery) > 0 {
+ request.URL.RawQuery = "&" + opQuery
+ } else {
+ request.URL.RawQuery = opQuery
+ }
+
+ request.Method = "GET"
+ restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if err := awsRestxml_serializeOpHttpBindingsGetBucketLoggingInput(input, restEncoder); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request.Request, err = restEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ return next.HandleSerialize(ctx, in)
+}
+func awsRestxml_serializeOpHttpBindingsGetBucketLoggingInput(v *GetBucketLoggingInput, encoder *httpbinding.Encoder) error {
+ if v == nil {
+ return fmt.Errorf("unsupported serialization of nil %T", v)
+ }
+
+ if v.Bucket == nil || len(*v.Bucket) == 0 {
+ return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")}
+ }
+ if v.Bucket != nil {
+ if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil {
+ return err
+ }
+ }
+
+ if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 {
+ locationName := "X-Amz-Expected-Bucket-Owner"
+ encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner)
+ }
+
+ return nil
+}
+
+type awsRestxml_serializeOpGetBucketMetricsConfiguration struct {
+}
+
+func (*awsRestxml_serializeOpGetBucketMetricsConfiguration) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsRestxml_serializeOpGetBucketMetricsConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*GetBucketMetricsConfigurationInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ opPath, opQuery := httpbinding.SplitURI("/{Bucket}?metrics&x-id=GetBucketMetricsConfiguration")
+ request.URL.Path = opPath
+ if len(request.URL.RawQuery) > 0 {
+ request.URL.RawQuery = "&" + opQuery
+ } else {
+ request.URL.RawQuery = opQuery
+ }
+
+ request.Method = "GET"
+ restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if err := awsRestxml_serializeOpHttpBindingsGetBucketMetricsConfigurationInput(input, restEncoder); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request.Request, err = restEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ return next.HandleSerialize(ctx, in)
+}
+func awsRestxml_serializeOpHttpBindingsGetBucketMetricsConfigurationInput(v *GetBucketMetricsConfigurationInput, encoder *httpbinding.Encoder) error {
+ if v == nil {
+ return fmt.Errorf("unsupported serialization of nil %T", v)
+ }
+
+ if v.Bucket == nil || len(*v.Bucket) == 0 {
+ return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")}
+ }
+ if v.Bucket != nil {
+ if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil {
+ return err
+ }
+ }
+
+ if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 {
+ locationName := "X-Amz-Expected-Bucket-Owner"
+ encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner)
+ }
+
+ if v.Id != nil {
+ encoder.SetQuery("id").String(*v.Id)
+ }
+
+ return nil
+}
+
+type awsRestxml_serializeOpGetBucketNotificationConfiguration struct {
+}
+
+func (*awsRestxml_serializeOpGetBucketNotificationConfiguration) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsRestxml_serializeOpGetBucketNotificationConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*GetBucketNotificationConfigurationInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ opPath, opQuery := httpbinding.SplitURI("/{Bucket}?notification")
+ request.URL.Path = opPath
+ if len(request.URL.RawQuery) > 0 {
+ request.URL.RawQuery = "&" + opQuery
+ } else {
+ request.URL.RawQuery = opQuery
+ }
+
+ request.Method = "GET"
+ restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if err := awsRestxml_serializeOpHttpBindingsGetBucketNotificationConfigurationInput(input, restEncoder); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request.Request, err = restEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ return next.HandleSerialize(ctx, in)
+}
+func awsRestxml_serializeOpHttpBindingsGetBucketNotificationConfigurationInput(v *GetBucketNotificationConfigurationInput, encoder *httpbinding.Encoder) error {
+ if v == nil {
+ return fmt.Errorf("unsupported serialization of nil %T", v)
+ }
+
+ if v.Bucket == nil || len(*v.Bucket) == 0 {
+ return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")}
+ }
+ if v.Bucket != nil {
+ if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil {
+ return err
+ }
+ }
+
+ if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 {
+ locationName := "X-Amz-Expected-Bucket-Owner"
+ encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner)
+ }
+
+ return nil
+}
+
+type awsRestxml_serializeOpGetBucketOwnershipControls struct {
+}
+
+func (*awsRestxml_serializeOpGetBucketOwnershipControls) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsRestxml_serializeOpGetBucketOwnershipControls) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*GetBucketOwnershipControlsInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ opPath, opQuery := httpbinding.SplitURI("/{Bucket}?ownershipControls")
+ request.URL.Path = opPath
+ if len(request.URL.RawQuery) > 0 {
+ request.URL.RawQuery = "&" + opQuery
+ } else {
+ request.URL.RawQuery = opQuery
+ }
+
+ request.Method = "GET"
+ restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if err := awsRestxml_serializeOpHttpBindingsGetBucketOwnershipControlsInput(input, restEncoder); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request.Request, err = restEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ return next.HandleSerialize(ctx, in)
+}
+func awsRestxml_serializeOpHttpBindingsGetBucketOwnershipControlsInput(v *GetBucketOwnershipControlsInput, encoder *httpbinding.Encoder) error {
+ if v == nil {
+ return fmt.Errorf("unsupported serialization of nil %T", v)
+ }
+
+ if v.Bucket == nil || len(*v.Bucket) == 0 {
+ return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")}
+ }
+ if v.Bucket != nil {
+ if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil {
+ return err
+ }
+ }
+
+ if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 {
+ locationName := "X-Amz-Expected-Bucket-Owner"
+ encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner)
+ }
+
+ return nil
+}
+
+type awsRestxml_serializeOpGetBucketPolicy struct {
+}
+
+func (*awsRestxml_serializeOpGetBucketPolicy) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsRestxml_serializeOpGetBucketPolicy) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*GetBucketPolicyInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ opPath, opQuery := httpbinding.SplitURI("/{Bucket}?policy")
+ request.URL.Path = opPath
+ if len(request.URL.RawQuery) > 0 {
+ request.URL.RawQuery = "&" + opQuery
+ } else {
+ request.URL.RawQuery = opQuery
+ }
+
+ request.Method = "GET"
+ restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if err := awsRestxml_serializeOpHttpBindingsGetBucketPolicyInput(input, restEncoder); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request.Request, err = restEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ return next.HandleSerialize(ctx, in)
+}
+func awsRestxml_serializeOpHttpBindingsGetBucketPolicyInput(v *GetBucketPolicyInput, encoder *httpbinding.Encoder) error {
+ if v == nil {
+ return fmt.Errorf("unsupported serialization of nil %T", v)
+ }
+
+ if v.Bucket == nil || len(*v.Bucket) == 0 {
+ return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")}
+ }
+ if v.Bucket != nil {
+ if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil {
+ return err
+ }
+ }
+
+ if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 {
+ locationName := "X-Amz-Expected-Bucket-Owner"
+ encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner)
+ }
+
+ return nil
+}
+
+type awsRestxml_serializeOpGetBucketPolicyStatus struct {
+}
+
+func (*awsRestxml_serializeOpGetBucketPolicyStatus) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsRestxml_serializeOpGetBucketPolicyStatus) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*GetBucketPolicyStatusInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ opPath, opQuery := httpbinding.SplitURI("/{Bucket}?policyStatus")
+ request.URL.Path = opPath
+ if len(request.URL.RawQuery) > 0 {
+ request.URL.RawQuery = "&" + opQuery
+ } else {
+ request.URL.RawQuery = opQuery
+ }
+
+ request.Method = "GET"
+ restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if err := awsRestxml_serializeOpHttpBindingsGetBucketPolicyStatusInput(input, restEncoder); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request.Request, err = restEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ return next.HandleSerialize(ctx, in)
+}
+func awsRestxml_serializeOpHttpBindingsGetBucketPolicyStatusInput(v *GetBucketPolicyStatusInput, encoder *httpbinding.Encoder) error {
+ if v == nil {
+ return fmt.Errorf("unsupported serialization of nil %T", v)
+ }
+
+ if v.Bucket == nil || len(*v.Bucket) == 0 {
+ return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")}
+ }
+ if v.Bucket != nil {
+ if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil {
+ return err
+ }
+ }
+
+ if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 {
+ locationName := "X-Amz-Expected-Bucket-Owner"
+ encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner)
+ }
+
+ return nil
+}
+
+type awsRestxml_serializeOpGetBucketReplication struct {
+}
+
+func (*awsRestxml_serializeOpGetBucketReplication) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsRestxml_serializeOpGetBucketReplication) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*GetBucketReplicationInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ opPath, opQuery := httpbinding.SplitURI("/{Bucket}?replication")
+ request.URL.Path = opPath
+ if len(request.URL.RawQuery) > 0 {
+ request.URL.RawQuery = "&" + opQuery
+ } else {
+ request.URL.RawQuery = opQuery
+ }
+
+ request.Method = "GET"
+ restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if err := awsRestxml_serializeOpHttpBindingsGetBucketReplicationInput(input, restEncoder); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request.Request, err = restEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ return next.HandleSerialize(ctx, in)
+}
+func awsRestxml_serializeOpHttpBindingsGetBucketReplicationInput(v *GetBucketReplicationInput, encoder *httpbinding.Encoder) error {
+ if v == nil {
+ return fmt.Errorf("unsupported serialization of nil %T", v)
+ }
+
+ if v.Bucket == nil || len(*v.Bucket) == 0 {
+ return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")}
+ }
+ if v.Bucket != nil {
+ if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil {
+ return err
+ }
+ }
+
+ if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 {
+ locationName := "X-Amz-Expected-Bucket-Owner"
+ encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner)
+ }
+
+ return nil
+}
+
+type awsRestxml_serializeOpGetBucketRequestPayment struct {
+}
+
+func (*awsRestxml_serializeOpGetBucketRequestPayment) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsRestxml_serializeOpGetBucketRequestPayment) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*GetBucketRequestPaymentInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ opPath, opQuery := httpbinding.SplitURI("/{Bucket}?requestPayment")
+ request.URL.Path = opPath
+ if len(request.URL.RawQuery) > 0 {
+ request.URL.RawQuery = "&" + opQuery
+ } else {
+ request.URL.RawQuery = opQuery
+ }
+
+ request.Method = "GET"
+ restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if err := awsRestxml_serializeOpHttpBindingsGetBucketRequestPaymentInput(input, restEncoder); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request.Request, err = restEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ return next.HandleSerialize(ctx, in)
+}
+func awsRestxml_serializeOpHttpBindingsGetBucketRequestPaymentInput(v *GetBucketRequestPaymentInput, encoder *httpbinding.Encoder) error {
+ if v == nil {
+ return fmt.Errorf("unsupported serialization of nil %T", v)
+ }
+
+ if v.Bucket == nil || len(*v.Bucket) == 0 {
+ return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")}
+ }
+ if v.Bucket != nil {
+ if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil {
+ return err
+ }
+ }
+
+ if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 {
+ locationName := "X-Amz-Expected-Bucket-Owner"
+ encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner)
+ }
+
+ return nil
+}
+
+type awsRestxml_serializeOpGetBucketTagging struct {
+}
+
+func (*awsRestxml_serializeOpGetBucketTagging) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsRestxml_serializeOpGetBucketTagging) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*GetBucketTaggingInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ opPath, opQuery := httpbinding.SplitURI("/{Bucket}?tagging")
+ request.URL.Path = opPath
+ if len(request.URL.RawQuery) > 0 {
+ request.URL.RawQuery = "&" + opQuery
+ } else {
+ request.URL.RawQuery = opQuery
+ }
+
+ request.Method = "GET"
+ restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if err := awsRestxml_serializeOpHttpBindingsGetBucketTaggingInput(input, restEncoder); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request.Request, err = restEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ return next.HandleSerialize(ctx, in)
+}
+func awsRestxml_serializeOpHttpBindingsGetBucketTaggingInput(v *GetBucketTaggingInput, encoder *httpbinding.Encoder) error {
+ if v == nil {
+ return fmt.Errorf("unsupported serialization of nil %T", v)
+ }
+
+ if v.Bucket == nil || len(*v.Bucket) == 0 {
+ return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")}
+ }
+ if v.Bucket != nil {
+ if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil {
+ return err
+ }
+ }
+
+ if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 {
+ locationName := "X-Amz-Expected-Bucket-Owner"
+ encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner)
+ }
+
+ return nil
+}
+
+type awsRestxml_serializeOpGetBucketVersioning struct {
+}
+
+func (*awsRestxml_serializeOpGetBucketVersioning) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsRestxml_serializeOpGetBucketVersioning) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*GetBucketVersioningInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ opPath, opQuery := httpbinding.SplitURI("/{Bucket}?versioning")
+ request.URL.Path = opPath
+ if len(request.URL.RawQuery) > 0 {
+ request.URL.RawQuery = "&" + opQuery
+ } else {
+ request.URL.RawQuery = opQuery
+ }
+
+ request.Method = "GET"
+ restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if err := awsRestxml_serializeOpHttpBindingsGetBucketVersioningInput(input, restEncoder); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request.Request, err = restEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ return next.HandleSerialize(ctx, in)
+}
+func awsRestxml_serializeOpHttpBindingsGetBucketVersioningInput(v *GetBucketVersioningInput, encoder *httpbinding.Encoder) error {
+ if v == nil {
+ return fmt.Errorf("unsupported serialization of nil %T", v)
+ }
+
+ if v.Bucket == nil || len(*v.Bucket) == 0 {
+ return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")}
+ }
+ if v.Bucket != nil {
+ if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil {
+ return err
+ }
+ }
+
+ if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 {
+ locationName := "X-Amz-Expected-Bucket-Owner"
+ encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner)
+ }
+
+ return nil
+}
+
+type awsRestxml_serializeOpGetBucketWebsite struct {
+}
+
+func (*awsRestxml_serializeOpGetBucketWebsite) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsRestxml_serializeOpGetBucketWebsite) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*GetBucketWebsiteInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ opPath, opQuery := httpbinding.SplitURI("/{Bucket}?website")
+ request.URL.Path = opPath
+ if len(request.URL.RawQuery) > 0 {
+ request.URL.RawQuery = "&" + opQuery
+ } else {
+ request.URL.RawQuery = opQuery
+ }
+
+ request.Method = "GET"
+ restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if err := awsRestxml_serializeOpHttpBindingsGetBucketWebsiteInput(input, restEncoder); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request.Request, err = restEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ return next.HandleSerialize(ctx, in)
+}
+func awsRestxml_serializeOpHttpBindingsGetBucketWebsiteInput(v *GetBucketWebsiteInput, encoder *httpbinding.Encoder) error {
+ if v == nil {
+ return fmt.Errorf("unsupported serialization of nil %T", v)
+ }
+
+ if v.Bucket == nil || len(*v.Bucket) == 0 {
+ return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")}
+ }
+ if v.Bucket != nil {
+ if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil {
+ return err
+ }
+ }
+
+ if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 {
+ locationName := "X-Amz-Expected-Bucket-Owner"
+ encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner)
+ }
+
+ return nil
+}
+
+type awsRestxml_serializeOpGetObject struct {
+}
+
+func (*awsRestxml_serializeOpGetObject) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsRestxml_serializeOpGetObject) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*GetObjectInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ opPath, opQuery := httpbinding.SplitURI("/{Bucket}/{Key+}?x-id=GetObject")
+ request.URL.Path = opPath
+ if len(request.URL.RawQuery) > 0 {
+ request.URL.RawQuery = "&" + opQuery
+ } else {
+ request.URL.RawQuery = opQuery
+ }
+
+ request.Method = "GET"
+ restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if err := awsRestxml_serializeOpHttpBindingsGetObjectInput(input, restEncoder); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request.Request, err = restEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ return next.HandleSerialize(ctx, in)
+}
+func awsRestxml_serializeOpHttpBindingsGetObjectInput(v *GetObjectInput, encoder *httpbinding.Encoder) error {
+ if v == nil {
+ return fmt.Errorf("unsupported serialization of nil %T", v)
+ }
+
+ if v.Bucket == nil || len(*v.Bucket) == 0 {
+ return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")}
+ }
+ if v.Bucket != nil {
+ if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil {
+ return err
+ }
+ }
+
+ if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 {
+ locationName := "X-Amz-Expected-Bucket-Owner"
+ encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner)
+ }
+
+ if v.IfMatch != nil && len(*v.IfMatch) > 0 {
+ locationName := "If-Match"
+ encoder.SetHeader(locationName).String(*v.IfMatch)
+ }
+
+ if v.IfModifiedSince != nil {
+ locationName := "If-Modified-Since"
+ encoder.SetHeader(locationName).String(smithytime.FormatHTTPDate(*v.IfModifiedSince))
+ }
+
+ if v.IfNoneMatch != nil && len(*v.IfNoneMatch) > 0 {
+ locationName := "If-None-Match"
+ encoder.SetHeader(locationName).String(*v.IfNoneMatch)
+ }
+
+ if v.IfUnmodifiedSince != nil {
+ locationName := "If-Unmodified-Since"
+ encoder.SetHeader(locationName).String(smithytime.FormatHTTPDate(*v.IfUnmodifiedSince))
+ }
+
+ if v.Key == nil || len(*v.Key) == 0 {
+ return &smithy.SerializationError{Err: fmt.Errorf("input member Key must not be empty")}
+ }
+ if v.Key != nil {
+ if err := encoder.SetURI("Key").String(*v.Key); err != nil {
+ return err
+ }
+ }
+
+ if v.PartNumber != 0 {
+ encoder.SetQuery("partNumber").Integer(v.PartNumber)
+ }
+
+ if v.Range != nil && len(*v.Range) > 0 {
+ locationName := "Range"
+ encoder.SetHeader(locationName).String(*v.Range)
+ }
+
+ if len(v.RequestPayer) > 0 {
+ locationName := "X-Amz-Request-Payer"
+ encoder.SetHeader(locationName).String(string(v.RequestPayer))
+ }
+
+ if v.ResponseCacheControl != nil {
+ encoder.SetQuery("response-cache-control").String(*v.ResponseCacheControl)
+ }
+
+ if v.ResponseContentDisposition != nil {
+ encoder.SetQuery("response-content-disposition").String(*v.ResponseContentDisposition)
+ }
+
+ if v.ResponseContentEncoding != nil {
+ encoder.SetQuery("response-content-encoding").String(*v.ResponseContentEncoding)
+ }
+
+ if v.ResponseContentLanguage != nil {
+ encoder.SetQuery("response-content-language").String(*v.ResponseContentLanguage)
+ }
+
+ if v.ResponseContentType != nil {
+ encoder.SetQuery("response-content-type").String(*v.ResponseContentType)
+ }
+
+ if v.ResponseExpires != nil {
+ encoder.SetQuery("response-expires").String(smithytime.FormatHTTPDate(*v.ResponseExpires))
+ }
+
+ if v.SSECustomerAlgorithm != nil && len(*v.SSECustomerAlgorithm) > 0 {
+ locationName := "X-Amz-Server-Side-Encryption-Customer-Algorithm"
+ encoder.SetHeader(locationName).String(*v.SSECustomerAlgorithm)
+ }
+
+ if v.SSECustomerKey != nil && len(*v.SSECustomerKey) > 0 {
+ locationName := "X-Amz-Server-Side-Encryption-Customer-Key"
+ encoder.SetHeader(locationName).String(*v.SSECustomerKey)
+ }
+
+ if v.SSECustomerKeyMD5 != nil && len(*v.SSECustomerKeyMD5) > 0 {
+ locationName := "X-Amz-Server-Side-Encryption-Customer-Key-Md5"
+ encoder.SetHeader(locationName).String(*v.SSECustomerKeyMD5)
+ }
+
+ if v.VersionId != nil {
+ encoder.SetQuery("versionId").String(*v.VersionId)
+ }
+
+ return nil
+}
+
+type awsRestxml_serializeOpGetObjectAcl struct {
+}
+
+func (*awsRestxml_serializeOpGetObjectAcl) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsRestxml_serializeOpGetObjectAcl) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*GetObjectAclInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ opPath, opQuery := httpbinding.SplitURI("/{Bucket}/{Key+}?acl")
+ request.URL.Path = opPath
+ if len(request.URL.RawQuery) > 0 {
+ request.URL.RawQuery = "&" + opQuery
+ } else {
+ request.URL.RawQuery = opQuery
+ }
+
+ request.Method = "GET"
+ restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if err := awsRestxml_serializeOpHttpBindingsGetObjectAclInput(input, restEncoder); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request.Request, err = restEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ return next.HandleSerialize(ctx, in)
+}
+func awsRestxml_serializeOpHttpBindingsGetObjectAclInput(v *GetObjectAclInput, encoder *httpbinding.Encoder) error {
+ if v == nil {
+ return fmt.Errorf("unsupported serialization of nil %T", v)
+ }
+
+ if v.Bucket == nil || len(*v.Bucket) == 0 {
+ return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")}
+ }
+ if v.Bucket != nil {
+ if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil {
+ return err
+ }
+ }
+
+ if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 {
+ locationName := "X-Amz-Expected-Bucket-Owner"
+ encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner)
+ }
+
+ if v.Key == nil || len(*v.Key) == 0 {
+ return &smithy.SerializationError{Err: fmt.Errorf("input member Key must not be empty")}
+ }
+ if v.Key != nil {
+ if err := encoder.SetURI("Key").String(*v.Key); err != nil {
+ return err
+ }
+ }
+
+ if len(v.RequestPayer) > 0 {
+ locationName := "X-Amz-Request-Payer"
+ encoder.SetHeader(locationName).String(string(v.RequestPayer))
+ }
+
+ if v.VersionId != nil {
+ encoder.SetQuery("versionId").String(*v.VersionId)
+ }
+
+ return nil
+}
+
+type awsRestxml_serializeOpGetObjectLegalHold struct {
+}
+
+func (*awsRestxml_serializeOpGetObjectLegalHold) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsRestxml_serializeOpGetObjectLegalHold) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*GetObjectLegalHoldInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ opPath, opQuery := httpbinding.SplitURI("/{Bucket}/{Key+}?legal-hold")
+ request.URL.Path = opPath
+ if len(request.URL.RawQuery) > 0 {
+ request.URL.RawQuery = "&" + opQuery
+ } else {
+ request.URL.RawQuery = opQuery
+ }
+
+ request.Method = "GET"
+ restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if err := awsRestxml_serializeOpHttpBindingsGetObjectLegalHoldInput(input, restEncoder); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request.Request, err = restEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ return next.HandleSerialize(ctx, in)
+}
+func awsRestxml_serializeOpHttpBindingsGetObjectLegalHoldInput(v *GetObjectLegalHoldInput, encoder *httpbinding.Encoder) error {
+ if v == nil {
+ return fmt.Errorf("unsupported serialization of nil %T", v)
+ }
+
+ if v.Bucket == nil || len(*v.Bucket) == 0 {
+ return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")}
+ }
+ if v.Bucket != nil {
+ if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil {
+ return err
+ }
+ }
+
+ if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 {
+ locationName := "X-Amz-Expected-Bucket-Owner"
+ encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner)
+ }
+
+ if v.Key == nil || len(*v.Key) == 0 {
+ return &smithy.SerializationError{Err: fmt.Errorf("input member Key must not be empty")}
+ }
+ if v.Key != nil {
+ if err := encoder.SetURI("Key").String(*v.Key); err != nil {
+ return err
+ }
+ }
+
+ if len(v.RequestPayer) > 0 {
+ locationName := "X-Amz-Request-Payer"
+ encoder.SetHeader(locationName).String(string(v.RequestPayer))
+ }
+
+ if v.VersionId != nil {
+ encoder.SetQuery("versionId").String(*v.VersionId)
+ }
+
+ return nil
+}
+
+type awsRestxml_serializeOpGetObjectLockConfiguration struct {
+}
+
+func (*awsRestxml_serializeOpGetObjectLockConfiguration) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsRestxml_serializeOpGetObjectLockConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*GetObjectLockConfigurationInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ opPath, opQuery := httpbinding.SplitURI("/{Bucket}?object-lock")
+ request.URL.Path = opPath
+ if len(request.URL.RawQuery) > 0 {
+ request.URL.RawQuery = "&" + opQuery
+ } else {
+ request.URL.RawQuery = opQuery
+ }
+
+ request.Method = "GET"
+ restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if err := awsRestxml_serializeOpHttpBindingsGetObjectLockConfigurationInput(input, restEncoder); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request.Request, err = restEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ return next.HandleSerialize(ctx, in)
+}
+func awsRestxml_serializeOpHttpBindingsGetObjectLockConfigurationInput(v *GetObjectLockConfigurationInput, encoder *httpbinding.Encoder) error {
+ if v == nil {
+ return fmt.Errorf("unsupported serialization of nil %T", v)
+ }
+
+ if v.Bucket == nil || len(*v.Bucket) == 0 {
+ return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")}
+ }
+ if v.Bucket != nil {
+ if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil {
+ return err
+ }
+ }
+
+ if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 {
+ locationName := "X-Amz-Expected-Bucket-Owner"
+ encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner)
+ }
+
+ return nil
+}
+
+type awsRestxml_serializeOpGetObjectRetention struct {
+}
+
+func (*awsRestxml_serializeOpGetObjectRetention) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsRestxml_serializeOpGetObjectRetention) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*GetObjectRetentionInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ opPath, opQuery := httpbinding.SplitURI("/{Bucket}/{Key+}?retention")
+ request.URL.Path = opPath
+ if len(request.URL.RawQuery) > 0 {
+ request.URL.RawQuery = "&" + opQuery
+ } else {
+ request.URL.RawQuery = opQuery
+ }
+
+ request.Method = "GET"
+ restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if err := awsRestxml_serializeOpHttpBindingsGetObjectRetentionInput(input, restEncoder); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request.Request, err = restEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ return next.HandleSerialize(ctx, in)
+}
+func awsRestxml_serializeOpHttpBindingsGetObjectRetentionInput(v *GetObjectRetentionInput, encoder *httpbinding.Encoder) error {
+ if v == nil {
+ return fmt.Errorf("unsupported serialization of nil %T", v)
+ }
+
+ if v.Bucket == nil || len(*v.Bucket) == 0 {
+ return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")}
+ }
+ if v.Bucket != nil {
+ if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil {
+ return err
+ }
+ }
+
+ if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 {
+ locationName := "X-Amz-Expected-Bucket-Owner"
+ encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner)
+ }
+
+ if v.Key == nil || len(*v.Key) == 0 {
+ return &smithy.SerializationError{Err: fmt.Errorf("input member Key must not be empty")}
+ }
+ if v.Key != nil {
+ if err := encoder.SetURI("Key").String(*v.Key); err != nil {
+ return err
+ }
+ }
+
+ if len(v.RequestPayer) > 0 {
+ locationName := "X-Amz-Request-Payer"
+ encoder.SetHeader(locationName).String(string(v.RequestPayer))
+ }
+
+ if v.VersionId != nil {
+ encoder.SetQuery("versionId").String(*v.VersionId)
+ }
+
+ return nil
+}
+
+type awsRestxml_serializeOpGetObjectTagging struct {
+}
+
+func (*awsRestxml_serializeOpGetObjectTagging) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsRestxml_serializeOpGetObjectTagging) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*GetObjectTaggingInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ opPath, opQuery := httpbinding.SplitURI("/{Bucket}/{Key+}?tagging")
+ request.URL.Path = opPath
+ if len(request.URL.RawQuery) > 0 {
+ request.URL.RawQuery = "&" + opQuery
+ } else {
+ request.URL.RawQuery = opQuery
+ }
+
+ request.Method = "GET"
+ restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if err := awsRestxml_serializeOpHttpBindingsGetObjectTaggingInput(input, restEncoder); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request.Request, err = restEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ return next.HandleSerialize(ctx, in)
+}
+func awsRestxml_serializeOpHttpBindingsGetObjectTaggingInput(v *GetObjectTaggingInput, encoder *httpbinding.Encoder) error {
+ if v == nil {
+ return fmt.Errorf("unsupported serialization of nil %T", v)
+ }
+
+ if v.Bucket == nil || len(*v.Bucket) == 0 {
+ return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")}
+ }
+ if v.Bucket != nil {
+ if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil {
+ return err
+ }
+ }
+
+ if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 {
+ locationName := "X-Amz-Expected-Bucket-Owner"
+ encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner)
+ }
+
+ if v.Key == nil || len(*v.Key) == 0 {
+ return &smithy.SerializationError{Err: fmt.Errorf("input member Key must not be empty")}
+ }
+ if v.Key != nil {
+ if err := encoder.SetURI("Key").String(*v.Key); err != nil {
+ return err
+ }
+ }
+
+ if v.VersionId != nil {
+ encoder.SetQuery("versionId").String(*v.VersionId)
+ }
+
+ return nil
+}
+
+type awsRestxml_serializeOpGetObjectTorrent struct {
+}
+
+func (*awsRestxml_serializeOpGetObjectTorrent) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsRestxml_serializeOpGetObjectTorrent) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*GetObjectTorrentInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ opPath, opQuery := httpbinding.SplitURI("/{Bucket}/{Key+}?torrent")
+ request.URL.Path = opPath
+ if len(request.URL.RawQuery) > 0 {
+ request.URL.RawQuery = "&" + opQuery
+ } else {
+ request.URL.RawQuery = opQuery
+ }
+
+ request.Method = "GET"
+ restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if err := awsRestxml_serializeOpHttpBindingsGetObjectTorrentInput(input, restEncoder); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request.Request, err = restEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ return next.HandleSerialize(ctx, in)
+}
+func awsRestxml_serializeOpHttpBindingsGetObjectTorrentInput(v *GetObjectTorrentInput, encoder *httpbinding.Encoder) error {
+ if v == nil {
+ return fmt.Errorf("unsupported serialization of nil %T", v)
+ }
+
+ if v.Bucket == nil || len(*v.Bucket) == 0 {
+ return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")}
+ }
+ if v.Bucket != nil {
+ if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil {
+ return err
+ }
+ }
+
+ if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 {
+ locationName := "X-Amz-Expected-Bucket-Owner"
+ encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner)
+ }
+
+ if v.Key == nil || len(*v.Key) == 0 {
+ return &smithy.SerializationError{Err: fmt.Errorf("input member Key must not be empty")}
+ }
+ if v.Key != nil {
+ if err := encoder.SetURI("Key").String(*v.Key); err != nil {
+ return err
+ }
+ }
+
+ if len(v.RequestPayer) > 0 {
+ locationName := "X-Amz-Request-Payer"
+ encoder.SetHeader(locationName).String(string(v.RequestPayer))
+ }
+
+ return nil
+}
+
+type awsRestxml_serializeOpGetPublicAccessBlock struct {
+}
+
+func (*awsRestxml_serializeOpGetPublicAccessBlock) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsRestxml_serializeOpGetPublicAccessBlock) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*GetPublicAccessBlockInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ opPath, opQuery := httpbinding.SplitURI("/{Bucket}?publicAccessBlock")
+ request.URL.Path = opPath
+ if len(request.URL.RawQuery) > 0 {
+ request.URL.RawQuery = "&" + opQuery
+ } else {
+ request.URL.RawQuery = opQuery
+ }
+
+ request.Method = "GET"
+ restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if err := awsRestxml_serializeOpHttpBindingsGetPublicAccessBlockInput(input, restEncoder); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request.Request, err = restEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ return next.HandleSerialize(ctx, in)
+}
+func awsRestxml_serializeOpHttpBindingsGetPublicAccessBlockInput(v *GetPublicAccessBlockInput, encoder *httpbinding.Encoder) error {
+ if v == nil {
+ return fmt.Errorf("unsupported serialization of nil %T", v)
+ }
+
+ if v.Bucket == nil || len(*v.Bucket) == 0 {
+ return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")}
+ }
+ if v.Bucket != nil {
+ if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil {
+ return err
+ }
+ }
+
+ if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 {
+ locationName := "X-Amz-Expected-Bucket-Owner"
+ encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner)
+ }
+
+ return nil
+}
+
+type awsRestxml_serializeOpHeadBucket struct {
+}
+
+func (*awsRestxml_serializeOpHeadBucket) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsRestxml_serializeOpHeadBucket) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*HeadBucketInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ opPath, opQuery := httpbinding.SplitURI("/{Bucket}")
+ request.URL.Path = opPath
+ if len(request.URL.RawQuery) > 0 {
+ request.URL.RawQuery = "&" + opQuery
+ } else {
+ request.URL.RawQuery = opQuery
+ }
+
+ request.Method = "HEAD"
+ restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if err := awsRestxml_serializeOpHttpBindingsHeadBucketInput(input, restEncoder); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request.Request, err = restEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ return next.HandleSerialize(ctx, in)
+}
+func awsRestxml_serializeOpHttpBindingsHeadBucketInput(v *HeadBucketInput, encoder *httpbinding.Encoder) error {
+ if v == nil {
+ return fmt.Errorf("unsupported serialization of nil %T", v)
+ }
+
+ if v.Bucket == nil || len(*v.Bucket) == 0 {
+ return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")}
+ }
+ if v.Bucket != nil {
+ if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil {
+ return err
+ }
+ }
+
+ if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 {
+ locationName := "X-Amz-Expected-Bucket-Owner"
+ encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner)
+ }
+
+ return nil
+}
+
+type awsRestxml_serializeOpHeadObject struct {
+}
+
+func (*awsRestxml_serializeOpHeadObject) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsRestxml_serializeOpHeadObject) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*HeadObjectInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ opPath, opQuery := httpbinding.SplitURI("/{Bucket}/{Key+}")
+ request.URL.Path = opPath
+ if len(request.URL.RawQuery) > 0 {
+ request.URL.RawQuery = "&" + opQuery
+ } else {
+ request.URL.RawQuery = opQuery
+ }
+
+ request.Method = "HEAD"
+ restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if err := awsRestxml_serializeOpHttpBindingsHeadObjectInput(input, restEncoder); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request.Request, err = restEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ return next.HandleSerialize(ctx, in)
+}
+func awsRestxml_serializeOpHttpBindingsHeadObjectInput(v *HeadObjectInput, encoder *httpbinding.Encoder) error {
+ if v == nil {
+ return fmt.Errorf("unsupported serialization of nil %T", v)
+ }
+
+ if v.Bucket == nil || len(*v.Bucket) == 0 {
+ return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")}
+ }
+ if v.Bucket != nil {
+ if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil {
+ return err
+ }
+ }
+
+ if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 {
+ locationName := "X-Amz-Expected-Bucket-Owner"
+ encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner)
+ }
+
+ if v.IfMatch != nil && len(*v.IfMatch) > 0 {
+ locationName := "If-Match"
+ encoder.SetHeader(locationName).String(*v.IfMatch)
+ }
+
+ if v.IfModifiedSince != nil {
+ locationName := "If-Modified-Since"
+ encoder.SetHeader(locationName).String(smithytime.FormatHTTPDate(*v.IfModifiedSince))
+ }
+
+ if v.IfNoneMatch != nil && len(*v.IfNoneMatch) > 0 {
+ locationName := "If-None-Match"
+ encoder.SetHeader(locationName).String(*v.IfNoneMatch)
+ }
+
+ if v.IfUnmodifiedSince != nil {
+ locationName := "If-Unmodified-Since"
+ encoder.SetHeader(locationName).String(smithytime.FormatHTTPDate(*v.IfUnmodifiedSince))
+ }
+
+ if v.Key == nil || len(*v.Key) == 0 {
+ return &smithy.SerializationError{Err: fmt.Errorf("input member Key must not be empty")}
+ }
+ if v.Key != nil {
+ if err := encoder.SetURI("Key").String(*v.Key); err != nil {
+ return err
+ }
+ }
+
+ if v.PartNumber != 0 {
+ encoder.SetQuery("partNumber").Integer(v.PartNumber)
+ }
+
+ if v.Range != nil && len(*v.Range) > 0 {
+ locationName := "Range"
+ encoder.SetHeader(locationName).String(*v.Range)
+ }
+
+ if len(v.RequestPayer) > 0 {
+ locationName := "X-Amz-Request-Payer"
+ encoder.SetHeader(locationName).String(string(v.RequestPayer))
+ }
+
+ if v.SSECustomerAlgorithm != nil && len(*v.SSECustomerAlgorithm) > 0 {
+ locationName := "X-Amz-Server-Side-Encryption-Customer-Algorithm"
+ encoder.SetHeader(locationName).String(*v.SSECustomerAlgorithm)
+ }
+
+ if v.SSECustomerKey != nil && len(*v.SSECustomerKey) > 0 {
+ locationName := "X-Amz-Server-Side-Encryption-Customer-Key"
+ encoder.SetHeader(locationName).String(*v.SSECustomerKey)
+ }
+
+ if v.SSECustomerKeyMD5 != nil && len(*v.SSECustomerKeyMD5) > 0 {
+ locationName := "X-Amz-Server-Side-Encryption-Customer-Key-Md5"
+ encoder.SetHeader(locationName).String(*v.SSECustomerKeyMD5)
+ }
+
+ if v.VersionId != nil {
+ encoder.SetQuery("versionId").String(*v.VersionId)
+ }
+
+ return nil
+}
+
+type awsRestxml_serializeOpListBucketAnalyticsConfigurations struct {
+}
+
+func (*awsRestxml_serializeOpListBucketAnalyticsConfigurations) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsRestxml_serializeOpListBucketAnalyticsConfigurations) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*ListBucketAnalyticsConfigurationsInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ opPath, opQuery := httpbinding.SplitURI("/{Bucket}?analytics&x-id=ListBucketAnalyticsConfigurations")
+ request.URL.Path = opPath
+ if len(request.URL.RawQuery) > 0 {
+ request.URL.RawQuery = "&" + opQuery
+ } else {
+ request.URL.RawQuery = opQuery
+ }
+
+ request.Method = "GET"
+ restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if err := awsRestxml_serializeOpHttpBindingsListBucketAnalyticsConfigurationsInput(input, restEncoder); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request.Request, err = restEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ return next.HandleSerialize(ctx, in)
+}
+func awsRestxml_serializeOpHttpBindingsListBucketAnalyticsConfigurationsInput(v *ListBucketAnalyticsConfigurationsInput, encoder *httpbinding.Encoder) error {
+ if v == nil {
+ return fmt.Errorf("unsupported serialization of nil %T", v)
+ }
+
+ if v.Bucket == nil || len(*v.Bucket) == 0 {
+ return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")}
+ }
+ if v.Bucket != nil {
+ if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil {
+ return err
+ }
+ }
+
+ if v.ContinuationToken != nil {
+ encoder.SetQuery("continuation-token").String(*v.ContinuationToken)
+ }
+
+ if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 {
+ locationName := "X-Amz-Expected-Bucket-Owner"
+ encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner)
+ }
+
+ return nil
+}
+
+type awsRestxml_serializeOpListBucketIntelligentTieringConfigurations struct {
+}
+
+func (*awsRestxml_serializeOpListBucketIntelligentTieringConfigurations) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsRestxml_serializeOpListBucketIntelligentTieringConfigurations) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*ListBucketIntelligentTieringConfigurationsInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ opPath, opQuery := httpbinding.SplitURI("/{Bucket}?intelligent-tiering&x-id=ListBucketIntelligentTieringConfigurations")
+ request.URL.Path = opPath
+ if len(request.URL.RawQuery) > 0 {
+ request.URL.RawQuery = "&" + opQuery
+ } else {
+ request.URL.RawQuery = opQuery
+ }
+
+ request.Method = "GET"
+ restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if err := awsRestxml_serializeOpHttpBindingsListBucketIntelligentTieringConfigurationsInput(input, restEncoder); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request.Request, err = restEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ return next.HandleSerialize(ctx, in)
+}
+func awsRestxml_serializeOpHttpBindingsListBucketIntelligentTieringConfigurationsInput(v *ListBucketIntelligentTieringConfigurationsInput, encoder *httpbinding.Encoder) error {
+ if v == nil {
+ return fmt.Errorf("unsupported serialization of nil %T", v)
+ }
+
+ if v.Bucket == nil || len(*v.Bucket) == 0 {
+ return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")}
+ }
+ if v.Bucket != nil {
+ if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil {
+ return err
+ }
+ }
+
+ if v.ContinuationToken != nil {
+ encoder.SetQuery("continuation-token").String(*v.ContinuationToken)
+ }
+
+ return nil
+}
+
+type awsRestxml_serializeOpListBucketInventoryConfigurations struct {
+}
+
+func (*awsRestxml_serializeOpListBucketInventoryConfigurations) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsRestxml_serializeOpListBucketInventoryConfigurations) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*ListBucketInventoryConfigurationsInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ opPath, opQuery := httpbinding.SplitURI("/{Bucket}?inventory&x-id=ListBucketInventoryConfigurations")
+ request.URL.Path = opPath
+ if len(request.URL.RawQuery) > 0 {
+ request.URL.RawQuery = "&" + opQuery
+ } else {
+ request.URL.RawQuery = opQuery
+ }
+
+ request.Method = "GET"
+ restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if err := awsRestxml_serializeOpHttpBindingsListBucketInventoryConfigurationsInput(input, restEncoder); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request.Request, err = restEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ return next.HandleSerialize(ctx, in)
+}
+func awsRestxml_serializeOpHttpBindingsListBucketInventoryConfigurationsInput(v *ListBucketInventoryConfigurationsInput, encoder *httpbinding.Encoder) error {
+ if v == nil {
+ return fmt.Errorf("unsupported serialization of nil %T", v)
+ }
+
+ if v.Bucket == nil || len(*v.Bucket) == 0 {
+ return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")}
+ }
+ if v.Bucket != nil {
+ if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil {
+ return err
+ }
+ }
+
+ if v.ContinuationToken != nil {
+ encoder.SetQuery("continuation-token").String(*v.ContinuationToken)
+ }
+
+ if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 {
+ locationName := "X-Amz-Expected-Bucket-Owner"
+ encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner)
+ }
+
+ return nil
+}
+
+type awsRestxml_serializeOpListBucketMetricsConfigurations struct {
+}
+
+func (*awsRestxml_serializeOpListBucketMetricsConfigurations) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsRestxml_serializeOpListBucketMetricsConfigurations) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*ListBucketMetricsConfigurationsInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ opPath, opQuery := httpbinding.SplitURI("/{Bucket}?metrics&x-id=ListBucketMetricsConfigurations")
+ request.URL.Path = opPath
+ if len(request.URL.RawQuery) > 0 {
+ request.URL.RawQuery = "&" + opQuery
+ } else {
+ request.URL.RawQuery = opQuery
+ }
+
+ request.Method = "GET"
+ restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if err := awsRestxml_serializeOpHttpBindingsListBucketMetricsConfigurationsInput(input, restEncoder); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request.Request, err = restEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ return next.HandleSerialize(ctx, in)
+}
+func awsRestxml_serializeOpHttpBindingsListBucketMetricsConfigurationsInput(v *ListBucketMetricsConfigurationsInput, encoder *httpbinding.Encoder) error {
+ if v == nil {
+ return fmt.Errorf("unsupported serialization of nil %T", v)
+ }
+
+ if v.Bucket == nil || len(*v.Bucket) == 0 {
+ return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")}
+ }
+ if v.Bucket != nil {
+ if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil {
+ return err
+ }
+ }
+
+ if v.ContinuationToken != nil {
+ encoder.SetQuery("continuation-token").String(*v.ContinuationToken)
+ }
+
+ if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 {
+ locationName := "X-Amz-Expected-Bucket-Owner"
+ encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner)
+ }
+
+ return nil
+}
+
+type awsRestxml_serializeOpListBuckets struct {
+}
+
+func (*awsRestxml_serializeOpListBuckets) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsRestxml_serializeOpListBuckets) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*ListBucketsInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ opPath, opQuery := httpbinding.SplitURI("/")
+ request.URL.Path = opPath
+ if len(request.URL.RawQuery) > 0 {
+ request.URL.RawQuery = "&" + opQuery
+ } else {
+ request.URL.RawQuery = opQuery
+ }
+
+ request.Method = "GET"
+ restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request.Request, err = restEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ return next.HandleSerialize(ctx, in)
+}
+func awsRestxml_serializeOpHttpBindingsListBucketsInput(v *ListBucketsInput, encoder *httpbinding.Encoder) error {
+ if v == nil {
+ return fmt.Errorf("unsupported serialization of nil %T", v)
+ }
+
+ return nil
+}
+
+type awsRestxml_serializeOpListMultipartUploads struct {
+}
+
+func (*awsRestxml_serializeOpListMultipartUploads) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsRestxml_serializeOpListMultipartUploads) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*ListMultipartUploadsInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ opPath, opQuery := httpbinding.SplitURI("/{Bucket}?uploads")
+ request.URL.Path = opPath
+ if len(request.URL.RawQuery) > 0 {
+ request.URL.RawQuery = "&" + opQuery
+ } else {
+ request.URL.RawQuery = opQuery
+ }
+
+ request.Method = "GET"
+ restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if err := awsRestxml_serializeOpHttpBindingsListMultipartUploadsInput(input, restEncoder); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request.Request, err = restEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ return next.HandleSerialize(ctx, in)
+}
+func awsRestxml_serializeOpHttpBindingsListMultipartUploadsInput(v *ListMultipartUploadsInput, encoder *httpbinding.Encoder) error {
+ if v == nil {
+ return fmt.Errorf("unsupported serialization of nil %T", v)
+ }
+
+ if v.Bucket == nil || len(*v.Bucket) == 0 {
+ return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")}
+ }
+ if v.Bucket != nil {
+ if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil {
+ return err
+ }
+ }
+
+ if v.Delimiter != nil {
+ encoder.SetQuery("delimiter").String(*v.Delimiter)
+ }
+
+ if len(v.EncodingType) > 0 {
+ encoder.SetQuery("encoding-type").String(string(v.EncodingType))
+ }
+
+ if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 {
+ locationName := "X-Amz-Expected-Bucket-Owner"
+ encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner)
+ }
+
+ if v.KeyMarker != nil {
+ encoder.SetQuery("key-marker").String(*v.KeyMarker)
+ }
+
+ if v.MaxUploads != 0 {
+ encoder.SetQuery("max-uploads").Integer(v.MaxUploads)
+ }
+
+ if v.Prefix != nil {
+ encoder.SetQuery("prefix").String(*v.Prefix)
+ }
+
+ if v.UploadIdMarker != nil {
+ encoder.SetQuery("upload-id-marker").String(*v.UploadIdMarker)
+ }
+
+ return nil
+}
+
+type awsRestxml_serializeOpListObjects struct {
+}
+
+func (*awsRestxml_serializeOpListObjects) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsRestxml_serializeOpListObjects) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*ListObjectsInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ opPath, opQuery := httpbinding.SplitURI("/{Bucket}")
+ request.URL.Path = opPath
+ if len(request.URL.RawQuery) > 0 {
+ request.URL.RawQuery = "&" + opQuery
+ } else {
+ request.URL.RawQuery = opQuery
+ }
+
+ request.Method = "GET"
+ restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if err := awsRestxml_serializeOpHttpBindingsListObjectsInput(input, restEncoder); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request.Request, err = restEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ return next.HandleSerialize(ctx, in)
+}
+func awsRestxml_serializeOpHttpBindingsListObjectsInput(v *ListObjectsInput, encoder *httpbinding.Encoder) error {
+ if v == nil {
+ return fmt.Errorf("unsupported serialization of nil %T", v)
+ }
+
+ if v.Bucket == nil || len(*v.Bucket) == 0 {
+ return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")}
+ }
+ if v.Bucket != nil {
+ if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil {
+ return err
+ }
+ }
+
+ if v.Delimiter != nil {
+ encoder.SetQuery("delimiter").String(*v.Delimiter)
+ }
+
+ if len(v.EncodingType) > 0 {
+ encoder.SetQuery("encoding-type").String(string(v.EncodingType))
+ }
+
+ if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 {
+ locationName := "X-Amz-Expected-Bucket-Owner"
+ encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner)
+ }
+
+ if v.Marker != nil {
+ encoder.SetQuery("marker").String(*v.Marker)
+ }
+
+ if v.MaxKeys != 0 {
+ encoder.SetQuery("max-keys").Integer(v.MaxKeys)
+ }
+
+ if v.Prefix != nil {
+ encoder.SetQuery("prefix").String(*v.Prefix)
+ }
+
+ if len(v.RequestPayer) > 0 {
+ locationName := "X-Amz-Request-Payer"
+ encoder.SetHeader(locationName).String(string(v.RequestPayer))
+ }
+
+ return nil
+}
+
+type awsRestxml_serializeOpListObjectsV2 struct {
+}
+
+func (*awsRestxml_serializeOpListObjectsV2) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsRestxml_serializeOpListObjectsV2) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*ListObjectsV2Input)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ opPath, opQuery := httpbinding.SplitURI("/{Bucket}?list-type=2")
+ request.URL.Path = opPath
+ if len(request.URL.RawQuery) > 0 {
+ request.URL.RawQuery = "&" + opQuery
+ } else {
+ request.URL.RawQuery = opQuery
+ }
+
+ request.Method = "GET"
+ restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if err := awsRestxml_serializeOpHttpBindingsListObjectsV2Input(input, restEncoder); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request.Request, err = restEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ return next.HandleSerialize(ctx, in)
+}
+func awsRestxml_serializeOpHttpBindingsListObjectsV2Input(v *ListObjectsV2Input, encoder *httpbinding.Encoder) error {
+ if v == nil {
+ return fmt.Errorf("unsupported serialization of nil %T", v)
+ }
+
+ if v.Bucket == nil || len(*v.Bucket) == 0 {
+ return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")}
+ }
+ if v.Bucket != nil {
+ if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil {
+ return err
+ }
+ }
+
+ if v.ContinuationToken != nil {
+ encoder.SetQuery("continuation-token").String(*v.ContinuationToken)
+ }
+
+ if v.Delimiter != nil {
+ encoder.SetQuery("delimiter").String(*v.Delimiter)
+ }
+
+ if len(v.EncodingType) > 0 {
+ encoder.SetQuery("encoding-type").String(string(v.EncodingType))
+ }
+
+ if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 {
+ locationName := "X-Amz-Expected-Bucket-Owner"
+ encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner)
+ }
+
+ if v.FetchOwner {
+ encoder.SetQuery("fetch-owner").Boolean(v.FetchOwner)
+ }
+
+ if v.MaxKeys != 0 {
+ encoder.SetQuery("max-keys").Integer(v.MaxKeys)
+ }
+
+ if v.Prefix != nil {
+ encoder.SetQuery("prefix").String(*v.Prefix)
+ }
+
+ if len(v.RequestPayer) > 0 {
+ locationName := "X-Amz-Request-Payer"
+ encoder.SetHeader(locationName).String(string(v.RequestPayer))
+ }
+
+ if v.StartAfter != nil {
+ encoder.SetQuery("start-after").String(*v.StartAfter)
+ }
+
+ return nil
+}
+
+type awsRestxml_serializeOpListObjectVersions struct {
+}
+
+func (*awsRestxml_serializeOpListObjectVersions) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsRestxml_serializeOpListObjectVersions) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*ListObjectVersionsInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ opPath, opQuery := httpbinding.SplitURI("/{Bucket}?versions")
+ request.URL.Path = opPath
+ if len(request.URL.RawQuery) > 0 {
+ request.URL.RawQuery = "&" + opQuery
+ } else {
+ request.URL.RawQuery = opQuery
+ }
+
+ request.Method = "GET"
+ restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if err := awsRestxml_serializeOpHttpBindingsListObjectVersionsInput(input, restEncoder); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request.Request, err = restEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ return next.HandleSerialize(ctx, in)
+}
+func awsRestxml_serializeOpHttpBindingsListObjectVersionsInput(v *ListObjectVersionsInput, encoder *httpbinding.Encoder) error {
+ if v == nil {
+ return fmt.Errorf("unsupported serialization of nil %T", v)
+ }
+
+ if v.Bucket == nil || len(*v.Bucket) == 0 {
+ return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")}
+ }
+ if v.Bucket != nil {
+ if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil {
+ return err
+ }
+ }
+
+ if v.Delimiter != nil {
+ encoder.SetQuery("delimiter").String(*v.Delimiter)
+ }
+
+ if len(v.EncodingType) > 0 {
+ encoder.SetQuery("encoding-type").String(string(v.EncodingType))
+ }
+
+ if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 {
+ locationName := "X-Amz-Expected-Bucket-Owner"
+ encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner)
+ }
+
+ if v.KeyMarker != nil {
+ encoder.SetQuery("key-marker").String(*v.KeyMarker)
+ }
+
+ if v.MaxKeys != 0 {
+ encoder.SetQuery("max-keys").Integer(v.MaxKeys)
+ }
+
+ if v.Prefix != nil {
+ encoder.SetQuery("prefix").String(*v.Prefix)
+ }
+
+ if v.VersionIdMarker != nil {
+ encoder.SetQuery("version-id-marker").String(*v.VersionIdMarker)
+ }
+
+ return nil
+}
+
+type awsRestxml_serializeOpListParts struct {
+}
+
+func (*awsRestxml_serializeOpListParts) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsRestxml_serializeOpListParts) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*ListPartsInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ opPath, opQuery := httpbinding.SplitURI("/{Bucket}/{Key+}?x-id=ListParts")
+ request.URL.Path = opPath
+ if len(request.URL.RawQuery) > 0 {
+ request.URL.RawQuery = "&" + opQuery
+ } else {
+ request.URL.RawQuery = opQuery
+ }
+
+ request.Method = "GET"
+ restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if err := awsRestxml_serializeOpHttpBindingsListPartsInput(input, restEncoder); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request.Request, err = restEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ return next.HandleSerialize(ctx, in)
+}
+func awsRestxml_serializeOpHttpBindingsListPartsInput(v *ListPartsInput, encoder *httpbinding.Encoder) error {
+ if v == nil {
+ return fmt.Errorf("unsupported serialization of nil %T", v)
+ }
+
+ if v.Bucket == nil || len(*v.Bucket) == 0 {
+ return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")}
+ }
+ if v.Bucket != nil {
+ if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil {
+ return err
+ }
+ }
+
+ if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 {
+ locationName := "X-Amz-Expected-Bucket-Owner"
+ encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner)
+ }
+
+ if v.Key == nil || len(*v.Key) == 0 {
+ return &smithy.SerializationError{Err: fmt.Errorf("input member Key must not be empty")}
+ }
+ if v.Key != nil {
+ if err := encoder.SetURI("Key").String(*v.Key); err != nil {
+ return err
+ }
+ }
+
+ if v.MaxParts != 0 {
+ encoder.SetQuery("max-parts").Integer(v.MaxParts)
+ }
+
+ if v.PartNumberMarker != nil {
+ encoder.SetQuery("part-number-marker").String(*v.PartNumberMarker)
+ }
+
+ if len(v.RequestPayer) > 0 {
+ locationName := "X-Amz-Request-Payer"
+ encoder.SetHeader(locationName).String(string(v.RequestPayer))
+ }
+
+ if v.UploadId != nil {
+ encoder.SetQuery("uploadId").String(*v.UploadId)
+ }
+
+ return nil
+}
+
+type awsRestxml_serializeOpPutBucketAccelerateConfiguration struct {
+}
+
+func (*awsRestxml_serializeOpPutBucketAccelerateConfiguration) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsRestxml_serializeOpPutBucketAccelerateConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*PutBucketAccelerateConfigurationInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ opPath, opQuery := httpbinding.SplitURI("/{Bucket}?accelerate")
+ request.URL.Path = opPath
+ if len(request.URL.RawQuery) > 0 {
+ request.URL.RawQuery = "&" + opQuery
+ } else {
+ request.URL.RawQuery = opQuery
+ }
+
+ request.Method = "PUT"
+ restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if err := awsRestxml_serializeOpHttpBindingsPutBucketAccelerateConfigurationInput(input, restEncoder); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if input.AccelerateConfiguration != nil {
+ if !restEncoder.HasHeader("Content-Type") {
+ restEncoder.SetHeader("Content-Type").String("application/xml")
+ }
+
+ xmlEncoder := smithyxml.NewEncoder(bytes.NewBuffer(nil))
+ payloadRootAttr := []smithyxml.Attr{}
+ payloadRoot := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "AccelerateConfiguration",
+ },
+ Attr: payloadRootAttr,
+ }
+ payloadRoot.Attr = append(payloadRoot.Attr, smithyxml.NewNamespaceAttribute("", "http://s3.amazonaws.com/doc/2006-03-01/"))
+ if err := awsRestxml_serializeDocumentAccelerateConfiguration(input.AccelerateConfiguration, xmlEncoder.RootElement(payloadRoot)); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ payload := bytes.NewReader(xmlEncoder.Bytes())
+ if request, err = request.SetStream(payload); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ }
+
+ if request.Request, err = restEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ return next.HandleSerialize(ctx, in)
+}
+func awsRestxml_serializeOpHttpBindingsPutBucketAccelerateConfigurationInput(v *PutBucketAccelerateConfigurationInput, encoder *httpbinding.Encoder) error {
+ if v == nil {
+ return fmt.Errorf("unsupported serialization of nil %T", v)
+ }
+
+ if v.Bucket == nil || len(*v.Bucket) == 0 {
+ return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")}
+ }
+ if v.Bucket != nil {
+ if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil {
+ return err
+ }
+ }
+
+ if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 {
+ locationName := "X-Amz-Expected-Bucket-Owner"
+ encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner)
+ }
+
+ return nil
+}
+
+type awsRestxml_serializeOpPutBucketAcl struct {
+}
+
+func (*awsRestxml_serializeOpPutBucketAcl) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsRestxml_serializeOpPutBucketAcl) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*PutBucketAclInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ opPath, opQuery := httpbinding.SplitURI("/{Bucket}?acl")
+ request.URL.Path = opPath
+ if len(request.URL.RawQuery) > 0 {
+ request.URL.RawQuery = "&" + opQuery
+ } else {
+ request.URL.RawQuery = opQuery
+ }
+
+ request.Method = "PUT"
+ restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if err := awsRestxml_serializeOpHttpBindingsPutBucketAclInput(input, restEncoder); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if input.AccessControlPolicy != nil {
+ if !restEncoder.HasHeader("Content-Type") {
+ restEncoder.SetHeader("Content-Type").String("application/xml")
+ }
+
+ xmlEncoder := smithyxml.NewEncoder(bytes.NewBuffer(nil))
+ payloadRootAttr := []smithyxml.Attr{}
+ payloadRoot := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "AccessControlPolicy",
+ },
+ Attr: payloadRootAttr,
+ }
+ payloadRoot.Attr = append(payloadRoot.Attr, smithyxml.NewNamespaceAttribute("", "http://s3.amazonaws.com/doc/2006-03-01/"))
+ if err := awsRestxml_serializeDocumentAccessControlPolicy(input.AccessControlPolicy, xmlEncoder.RootElement(payloadRoot)); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ payload := bytes.NewReader(xmlEncoder.Bytes())
+ if request, err = request.SetStream(payload); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ }
+
+ if request.Request, err = restEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ return next.HandleSerialize(ctx, in)
+}
+func awsRestxml_serializeOpHttpBindingsPutBucketAclInput(v *PutBucketAclInput, encoder *httpbinding.Encoder) error {
+ if v == nil {
+ return fmt.Errorf("unsupported serialization of nil %T", v)
+ }
+
+ if len(v.ACL) > 0 {
+ locationName := "X-Amz-Acl"
+ encoder.SetHeader(locationName).String(string(v.ACL))
+ }
+
+ if v.Bucket == nil || len(*v.Bucket) == 0 {
+ return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")}
+ }
+ if v.Bucket != nil {
+ if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil {
+ return err
+ }
+ }
+
+ if v.ContentMD5 != nil && len(*v.ContentMD5) > 0 {
+ locationName := "Content-Md5"
+ encoder.SetHeader(locationName).String(*v.ContentMD5)
+ }
+
+ if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 {
+ locationName := "X-Amz-Expected-Bucket-Owner"
+ encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner)
+ }
+
+ if v.GrantFullControl != nil && len(*v.GrantFullControl) > 0 {
+ locationName := "X-Amz-Grant-Full-Control"
+ encoder.SetHeader(locationName).String(*v.GrantFullControl)
+ }
+
+ if v.GrantRead != nil && len(*v.GrantRead) > 0 {
+ locationName := "X-Amz-Grant-Read"
+ encoder.SetHeader(locationName).String(*v.GrantRead)
+ }
+
+ if v.GrantReadACP != nil && len(*v.GrantReadACP) > 0 {
+ locationName := "X-Amz-Grant-Read-Acp"
+ encoder.SetHeader(locationName).String(*v.GrantReadACP)
+ }
+
+ if v.GrantWrite != nil && len(*v.GrantWrite) > 0 {
+ locationName := "X-Amz-Grant-Write"
+ encoder.SetHeader(locationName).String(*v.GrantWrite)
+ }
+
+ if v.GrantWriteACP != nil && len(*v.GrantWriteACP) > 0 {
+ locationName := "X-Amz-Grant-Write-Acp"
+ encoder.SetHeader(locationName).String(*v.GrantWriteACP)
+ }
+
+ return nil
+}
+
+type awsRestxml_serializeOpPutBucketAnalyticsConfiguration struct {
+}
+
+func (*awsRestxml_serializeOpPutBucketAnalyticsConfiguration) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsRestxml_serializeOpPutBucketAnalyticsConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*PutBucketAnalyticsConfigurationInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ opPath, opQuery := httpbinding.SplitURI("/{Bucket}?analytics")
+ request.URL.Path = opPath
+ if len(request.URL.RawQuery) > 0 {
+ request.URL.RawQuery = "&" + opQuery
+ } else {
+ request.URL.RawQuery = opQuery
+ }
+
+ request.Method = "PUT"
+ restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if err := awsRestxml_serializeOpHttpBindingsPutBucketAnalyticsConfigurationInput(input, restEncoder); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if input.AnalyticsConfiguration != nil {
+ if !restEncoder.HasHeader("Content-Type") {
+ restEncoder.SetHeader("Content-Type").String("application/xml")
+ }
+
+ xmlEncoder := smithyxml.NewEncoder(bytes.NewBuffer(nil))
+ payloadRootAttr := []smithyxml.Attr{}
+ payloadRoot := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "AnalyticsConfiguration",
+ },
+ Attr: payloadRootAttr,
+ }
+ payloadRoot.Attr = append(payloadRoot.Attr, smithyxml.NewNamespaceAttribute("", "http://s3.amazonaws.com/doc/2006-03-01/"))
+ if err := awsRestxml_serializeDocumentAnalyticsConfiguration(input.AnalyticsConfiguration, xmlEncoder.RootElement(payloadRoot)); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ payload := bytes.NewReader(xmlEncoder.Bytes())
+ if request, err = request.SetStream(payload); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ }
+
+ if request.Request, err = restEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ return next.HandleSerialize(ctx, in)
+}
+func awsRestxml_serializeOpHttpBindingsPutBucketAnalyticsConfigurationInput(v *PutBucketAnalyticsConfigurationInput, encoder *httpbinding.Encoder) error {
+ if v == nil {
+ return fmt.Errorf("unsupported serialization of nil %T", v)
+ }
+
+ if v.Bucket == nil || len(*v.Bucket) == 0 {
+ return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")}
+ }
+ if v.Bucket != nil {
+ if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil {
+ return err
+ }
+ }
+
+ if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 {
+ locationName := "X-Amz-Expected-Bucket-Owner"
+ encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner)
+ }
+
+ if v.Id != nil {
+ encoder.SetQuery("id").String(*v.Id)
+ }
+
+ return nil
+}
+
+type awsRestxml_serializeOpPutBucketCors struct {
+}
+
+func (*awsRestxml_serializeOpPutBucketCors) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsRestxml_serializeOpPutBucketCors) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*PutBucketCorsInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ opPath, opQuery := httpbinding.SplitURI("/{Bucket}?cors")
+ request.URL.Path = opPath
+ if len(request.URL.RawQuery) > 0 {
+ request.URL.RawQuery = "&" + opQuery
+ } else {
+ request.URL.RawQuery = opQuery
+ }
+
+ request.Method = "PUT"
+ restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if err := awsRestxml_serializeOpHttpBindingsPutBucketCorsInput(input, restEncoder); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if input.CORSConfiguration != nil {
+ if !restEncoder.HasHeader("Content-Type") {
+ restEncoder.SetHeader("Content-Type").String("application/xml")
+ }
+
+ xmlEncoder := smithyxml.NewEncoder(bytes.NewBuffer(nil))
+ payloadRootAttr := []smithyxml.Attr{}
+ payloadRoot := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "CORSConfiguration",
+ },
+ Attr: payloadRootAttr,
+ }
+ payloadRoot.Attr = append(payloadRoot.Attr, smithyxml.NewNamespaceAttribute("", "http://s3.amazonaws.com/doc/2006-03-01/"))
+ if err := awsRestxml_serializeDocumentCORSConfiguration(input.CORSConfiguration, xmlEncoder.RootElement(payloadRoot)); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ payload := bytes.NewReader(xmlEncoder.Bytes())
+ if request, err = request.SetStream(payload); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ }
+
+ if request.Request, err = restEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ return next.HandleSerialize(ctx, in)
+}
+func awsRestxml_serializeOpHttpBindingsPutBucketCorsInput(v *PutBucketCorsInput, encoder *httpbinding.Encoder) error {
+ if v == nil {
+ return fmt.Errorf("unsupported serialization of nil %T", v)
+ }
+
+ if v.Bucket == nil || len(*v.Bucket) == 0 {
+ return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")}
+ }
+ if v.Bucket != nil {
+ if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil {
+ return err
+ }
+ }
+
+ if v.ContentMD5 != nil && len(*v.ContentMD5) > 0 {
+ locationName := "Content-Md5"
+ encoder.SetHeader(locationName).String(*v.ContentMD5)
+ }
+
+ if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 {
+ locationName := "X-Amz-Expected-Bucket-Owner"
+ encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner)
+ }
+
+ return nil
+}
+
+type awsRestxml_serializeOpPutBucketEncryption struct {
+}
+
+func (*awsRestxml_serializeOpPutBucketEncryption) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsRestxml_serializeOpPutBucketEncryption) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*PutBucketEncryptionInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ opPath, opQuery := httpbinding.SplitURI("/{Bucket}?encryption")
+ request.URL.Path = opPath
+ if len(request.URL.RawQuery) > 0 {
+ request.URL.RawQuery = "&" + opQuery
+ } else {
+ request.URL.RawQuery = opQuery
+ }
+
+ request.Method = "PUT"
+ restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if err := awsRestxml_serializeOpHttpBindingsPutBucketEncryptionInput(input, restEncoder); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if input.ServerSideEncryptionConfiguration != nil {
+ if !restEncoder.HasHeader("Content-Type") {
+ restEncoder.SetHeader("Content-Type").String("application/xml")
+ }
+
+ xmlEncoder := smithyxml.NewEncoder(bytes.NewBuffer(nil))
+ payloadRootAttr := []smithyxml.Attr{}
+ payloadRoot := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "ServerSideEncryptionConfiguration",
+ },
+ Attr: payloadRootAttr,
+ }
+ payloadRoot.Attr = append(payloadRoot.Attr, smithyxml.NewNamespaceAttribute("", "http://s3.amazonaws.com/doc/2006-03-01/"))
+ if err := awsRestxml_serializeDocumentServerSideEncryptionConfiguration(input.ServerSideEncryptionConfiguration, xmlEncoder.RootElement(payloadRoot)); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ payload := bytes.NewReader(xmlEncoder.Bytes())
+ if request, err = request.SetStream(payload); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ }
+
+ if request.Request, err = restEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ return next.HandleSerialize(ctx, in)
+}
+func awsRestxml_serializeOpHttpBindingsPutBucketEncryptionInput(v *PutBucketEncryptionInput, encoder *httpbinding.Encoder) error {
+ if v == nil {
+ return fmt.Errorf("unsupported serialization of nil %T", v)
+ }
+
+ if v.Bucket == nil || len(*v.Bucket) == 0 {
+ return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")}
+ }
+ if v.Bucket != nil {
+ if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil {
+ return err
+ }
+ }
+
+ if v.ContentMD5 != nil && len(*v.ContentMD5) > 0 {
+ locationName := "Content-Md5"
+ encoder.SetHeader(locationName).String(*v.ContentMD5)
+ }
+
+ if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 {
+ locationName := "X-Amz-Expected-Bucket-Owner"
+ encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner)
+ }
+
+ return nil
+}
+
+type awsRestxml_serializeOpPutBucketIntelligentTieringConfiguration struct {
+}
+
+func (*awsRestxml_serializeOpPutBucketIntelligentTieringConfiguration) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsRestxml_serializeOpPutBucketIntelligentTieringConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*PutBucketIntelligentTieringConfigurationInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ opPath, opQuery := httpbinding.SplitURI("/{Bucket}?intelligent-tiering")
+ request.URL.Path = opPath
+ if len(request.URL.RawQuery) > 0 {
+ request.URL.RawQuery = "&" + opQuery
+ } else {
+ request.URL.RawQuery = opQuery
+ }
+
+ request.Method = "PUT"
+ restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if err := awsRestxml_serializeOpHttpBindingsPutBucketIntelligentTieringConfigurationInput(input, restEncoder); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if input.IntelligentTieringConfiguration != nil {
+ if !restEncoder.HasHeader("Content-Type") {
+ restEncoder.SetHeader("Content-Type").String("application/xml")
+ }
+
+ xmlEncoder := smithyxml.NewEncoder(bytes.NewBuffer(nil))
+ payloadRootAttr := []smithyxml.Attr{}
+ payloadRoot := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "IntelligentTieringConfiguration",
+ },
+ Attr: payloadRootAttr,
+ }
+ payloadRoot.Attr = append(payloadRoot.Attr, smithyxml.NewNamespaceAttribute("", "http://s3.amazonaws.com/doc/2006-03-01/"))
+ if err := awsRestxml_serializeDocumentIntelligentTieringConfiguration(input.IntelligentTieringConfiguration, xmlEncoder.RootElement(payloadRoot)); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ payload := bytes.NewReader(xmlEncoder.Bytes())
+ if request, err = request.SetStream(payload); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ }
+
+ if request.Request, err = restEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ return next.HandleSerialize(ctx, in)
+}
+func awsRestxml_serializeOpHttpBindingsPutBucketIntelligentTieringConfigurationInput(v *PutBucketIntelligentTieringConfigurationInput, encoder *httpbinding.Encoder) error {
+ if v == nil {
+ return fmt.Errorf("unsupported serialization of nil %T", v)
+ }
+
+ if v.Bucket == nil || len(*v.Bucket) == 0 {
+ return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")}
+ }
+ if v.Bucket != nil {
+ if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil {
+ return err
+ }
+ }
+
+ if v.Id != nil {
+ encoder.SetQuery("id").String(*v.Id)
+ }
+
+ return nil
+}
+
+type awsRestxml_serializeOpPutBucketInventoryConfiguration struct {
+}
+
+func (*awsRestxml_serializeOpPutBucketInventoryConfiguration) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsRestxml_serializeOpPutBucketInventoryConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*PutBucketInventoryConfigurationInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ opPath, opQuery := httpbinding.SplitURI("/{Bucket}?inventory")
+ request.URL.Path = opPath
+ if len(request.URL.RawQuery) > 0 {
+ request.URL.RawQuery = "&" + opQuery
+ } else {
+ request.URL.RawQuery = opQuery
+ }
+
+ request.Method = "PUT"
+ restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if err := awsRestxml_serializeOpHttpBindingsPutBucketInventoryConfigurationInput(input, restEncoder); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if input.InventoryConfiguration != nil {
+ if !restEncoder.HasHeader("Content-Type") {
+ restEncoder.SetHeader("Content-Type").String("application/xml")
+ }
+
+ xmlEncoder := smithyxml.NewEncoder(bytes.NewBuffer(nil))
+ payloadRootAttr := []smithyxml.Attr{}
+ payloadRoot := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "InventoryConfiguration",
+ },
+ Attr: payloadRootAttr,
+ }
+ payloadRoot.Attr = append(payloadRoot.Attr, smithyxml.NewNamespaceAttribute("", "http://s3.amazonaws.com/doc/2006-03-01/"))
+ if err := awsRestxml_serializeDocumentInventoryConfiguration(input.InventoryConfiguration, xmlEncoder.RootElement(payloadRoot)); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ payload := bytes.NewReader(xmlEncoder.Bytes())
+ if request, err = request.SetStream(payload); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ }
+
+ if request.Request, err = restEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ return next.HandleSerialize(ctx, in)
+}
+func awsRestxml_serializeOpHttpBindingsPutBucketInventoryConfigurationInput(v *PutBucketInventoryConfigurationInput, encoder *httpbinding.Encoder) error {
+ if v == nil {
+ return fmt.Errorf("unsupported serialization of nil %T", v)
+ }
+
+ if v.Bucket == nil || len(*v.Bucket) == 0 {
+ return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")}
+ }
+ if v.Bucket != nil {
+ if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil {
+ return err
+ }
+ }
+
+ if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 {
+ locationName := "X-Amz-Expected-Bucket-Owner"
+ encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner)
+ }
+
+ if v.Id != nil {
+ encoder.SetQuery("id").String(*v.Id)
+ }
+
+ return nil
+}
+
+type awsRestxml_serializeOpPutBucketLifecycleConfiguration struct {
+}
+
+func (*awsRestxml_serializeOpPutBucketLifecycleConfiguration) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsRestxml_serializeOpPutBucketLifecycleConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*PutBucketLifecycleConfigurationInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ opPath, opQuery := httpbinding.SplitURI("/{Bucket}?lifecycle")
+ request.URL.Path = opPath
+ if len(request.URL.RawQuery) > 0 {
+ request.URL.RawQuery = "&" + opQuery
+ } else {
+ request.URL.RawQuery = opQuery
+ }
+
+ request.Method = "PUT"
+ restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if err := awsRestxml_serializeOpHttpBindingsPutBucketLifecycleConfigurationInput(input, restEncoder); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if input.LifecycleConfiguration != nil {
+ if !restEncoder.HasHeader("Content-Type") {
+ restEncoder.SetHeader("Content-Type").String("application/xml")
+ }
+
+ xmlEncoder := smithyxml.NewEncoder(bytes.NewBuffer(nil))
+ payloadRootAttr := []smithyxml.Attr{}
+ payloadRoot := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "BucketLifecycleConfiguration",
+ },
+ Attr: payloadRootAttr,
+ }
+ payloadRoot.Attr = append(payloadRoot.Attr, smithyxml.NewNamespaceAttribute("", "http://s3.amazonaws.com/doc/2006-03-01/"))
+ if err := awsRestxml_serializeDocumentBucketLifecycleConfiguration(input.LifecycleConfiguration, xmlEncoder.RootElement(payloadRoot)); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ payload := bytes.NewReader(xmlEncoder.Bytes())
+ if request, err = request.SetStream(payload); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ }
+
+ if request.Request, err = restEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ return next.HandleSerialize(ctx, in)
+}
+func awsRestxml_serializeOpHttpBindingsPutBucketLifecycleConfigurationInput(v *PutBucketLifecycleConfigurationInput, encoder *httpbinding.Encoder) error {
+ if v == nil {
+ return fmt.Errorf("unsupported serialization of nil %T", v)
+ }
+
+ if v.Bucket == nil || len(*v.Bucket) == 0 {
+ return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")}
+ }
+ if v.Bucket != nil {
+ if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil {
+ return err
+ }
+ }
+
+ if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 {
+ locationName := "X-Amz-Expected-Bucket-Owner"
+ encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner)
+ }
+
+ return nil
+}
+
+type awsRestxml_serializeOpPutBucketLogging struct {
+}
+
+func (*awsRestxml_serializeOpPutBucketLogging) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsRestxml_serializeOpPutBucketLogging) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*PutBucketLoggingInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ opPath, opQuery := httpbinding.SplitURI("/{Bucket}?logging")
+ request.URL.Path = opPath
+ if len(request.URL.RawQuery) > 0 {
+ request.URL.RawQuery = "&" + opQuery
+ } else {
+ request.URL.RawQuery = opQuery
+ }
+
+ request.Method = "PUT"
+ restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if err := awsRestxml_serializeOpHttpBindingsPutBucketLoggingInput(input, restEncoder); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if input.BucketLoggingStatus != nil {
+ if !restEncoder.HasHeader("Content-Type") {
+ restEncoder.SetHeader("Content-Type").String("application/xml")
+ }
+
+ xmlEncoder := smithyxml.NewEncoder(bytes.NewBuffer(nil))
+ payloadRootAttr := []smithyxml.Attr{}
+ payloadRoot := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "BucketLoggingStatus",
+ },
+ Attr: payloadRootAttr,
+ }
+ payloadRoot.Attr = append(payloadRoot.Attr, smithyxml.NewNamespaceAttribute("", "http://s3.amazonaws.com/doc/2006-03-01/"))
+ if err := awsRestxml_serializeDocumentBucketLoggingStatus(input.BucketLoggingStatus, xmlEncoder.RootElement(payloadRoot)); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ payload := bytes.NewReader(xmlEncoder.Bytes())
+ if request, err = request.SetStream(payload); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ }
+
+ if request.Request, err = restEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ return next.HandleSerialize(ctx, in)
+}
+func awsRestxml_serializeOpHttpBindingsPutBucketLoggingInput(v *PutBucketLoggingInput, encoder *httpbinding.Encoder) error {
+ if v == nil {
+ return fmt.Errorf("unsupported serialization of nil %T", v)
+ }
+
+ if v.Bucket == nil || len(*v.Bucket) == 0 {
+ return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")}
+ }
+ if v.Bucket != nil {
+ if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil {
+ return err
+ }
+ }
+
+ if v.ContentMD5 != nil && len(*v.ContentMD5) > 0 {
+ locationName := "Content-Md5"
+ encoder.SetHeader(locationName).String(*v.ContentMD5)
+ }
+
+ if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 {
+ locationName := "X-Amz-Expected-Bucket-Owner"
+ encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner)
+ }
+
+ return nil
+}
+
+type awsRestxml_serializeOpPutBucketMetricsConfiguration struct {
+}
+
+func (*awsRestxml_serializeOpPutBucketMetricsConfiguration) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsRestxml_serializeOpPutBucketMetricsConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*PutBucketMetricsConfigurationInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ opPath, opQuery := httpbinding.SplitURI("/{Bucket}?metrics")
+ request.URL.Path = opPath
+ if len(request.URL.RawQuery) > 0 {
+ request.URL.RawQuery = "&" + opQuery
+ } else {
+ request.URL.RawQuery = opQuery
+ }
+
+ request.Method = "PUT"
+ restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if err := awsRestxml_serializeOpHttpBindingsPutBucketMetricsConfigurationInput(input, restEncoder); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if input.MetricsConfiguration != nil {
+ if !restEncoder.HasHeader("Content-Type") {
+ restEncoder.SetHeader("Content-Type").String("application/xml")
+ }
+
+ xmlEncoder := smithyxml.NewEncoder(bytes.NewBuffer(nil))
+ payloadRootAttr := []smithyxml.Attr{}
+ payloadRoot := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "MetricsConfiguration",
+ },
+ Attr: payloadRootAttr,
+ }
+ payloadRoot.Attr = append(payloadRoot.Attr, smithyxml.NewNamespaceAttribute("", "http://s3.amazonaws.com/doc/2006-03-01/"))
+ if err := awsRestxml_serializeDocumentMetricsConfiguration(input.MetricsConfiguration, xmlEncoder.RootElement(payloadRoot)); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ payload := bytes.NewReader(xmlEncoder.Bytes())
+ if request, err = request.SetStream(payload); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ }
+
+ if request.Request, err = restEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ return next.HandleSerialize(ctx, in)
+}
+func awsRestxml_serializeOpHttpBindingsPutBucketMetricsConfigurationInput(v *PutBucketMetricsConfigurationInput, encoder *httpbinding.Encoder) error {
+ if v == nil {
+ return fmt.Errorf("unsupported serialization of nil %T", v)
+ }
+
+ if v.Bucket == nil || len(*v.Bucket) == 0 {
+ return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")}
+ }
+ if v.Bucket != nil {
+ if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil {
+ return err
+ }
+ }
+
+ if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 {
+ locationName := "X-Amz-Expected-Bucket-Owner"
+ encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner)
+ }
+
+ if v.Id != nil {
+ encoder.SetQuery("id").String(*v.Id)
+ }
+
+ return nil
+}
+
+type awsRestxml_serializeOpPutBucketNotificationConfiguration struct {
+}
+
+func (*awsRestxml_serializeOpPutBucketNotificationConfiguration) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsRestxml_serializeOpPutBucketNotificationConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*PutBucketNotificationConfigurationInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ opPath, opQuery := httpbinding.SplitURI("/{Bucket}?notification")
+ request.URL.Path = opPath
+ if len(request.URL.RawQuery) > 0 {
+ request.URL.RawQuery = "&" + opQuery
+ } else {
+ request.URL.RawQuery = opQuery
+ }
+
+ request.Method = "PUT"
+ restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if err := awsRestxml_serializeOpHttpBindingsPutBucketNotificationConfigurationInput(input, restEncoder); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if input.NotificationConfiguration != nil {
+ if !restEncoder.HasHeader("Content-Type") {
+ restEncoder.SetHeader("Content-Type").String("application/xml")
+ }
+
+ xmlEncoder := smithyxml.NewEncoder(bytes.NewBuffer(nil))
+ payloadRootAttr := []smithyxml.Attr{}
+ payloadRoot := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "NotificationConfiguration",
+ },
+ Attr: payloadRootAttr,
+ }
+ payloadRoot.Attr = append(payloadRoot.Attr, smithyxml.NewNamespaceAttribute("", "http://s3.amazonaws.com/doc/2006-03-01/"))
+ if err := awsRestxml_serializeDocumentNotificationConfiguration(input.NotificationConfiguration, xmlEncoder.RootElement(payloadRoot)); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ payload := bytes.NewReader(xmlEncoder.Bytes())
+ if request, err = request.SetStream(payload); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ }
+
+ if request.Request, err = restEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ return next.HandleSerialize(ctx, in)
+}
+func awsRestxml_serializeOpHttpBindingsPutBucketNotificationConfigurationInput(v *PutBucketNotificationConfigurationInput, encoder *httpbinding.Encoder) error {
+ if v == nil {
+ return fmt.Errorf("unsupported serialization of nil %T", v)
+ }
+
+ if v.Bucket == nil || len(*v.Bucket) == 0 {
+ return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")}
+ }
+ if v.Bucket != nil {
+ if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil {
+ return err
+ }
+ }
+
+ if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 {
+ locationName := "X-Amz-Expected-Bucket-Owner"
+ encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner)
+ }
+
+ return nil
+}
+
+type awsRestxml_serializeOpPutBucketOwnershipControls struct {
+}
+
+func (*awsRestxml_serializeOpPutBucketOwnershipControls) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsRestxml_serializeOpPutBucketOwnershipControls) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*PutBucketOwnershipControlsInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ opPath, opQuery := httpbinding.SplitURI("/{Bucket}?ownershipControls")
+ request.URL.Path = opPath
+ if len(request.URL.RawQuery) > 0 {
+ request.URL.RawQuery = "&" + opQuery
+ } else {
+ request.URL.RawQuery = opQuery
+ }
+
+ request.Method = "PUT"
+ restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if err := awsRestxml_serializeOpHttpBindingsPutBucketOwnershipControlsInput(input, restEncoder); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if input.OwnershipControls != nil {
+ if !restEncoder.HasHeader("Content-Type") {
+ restEncoder.SetHeader("Content-Type").String("application/xml")
+ }
+
+ xmlEncoder := smithyxml.NewEncoder(bytes.NewBuffer(nil))
+ payloadRootAttr := []smithyxml.Attr{}
+ payloadRoot := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "OwnershipControls",
+ },
+ Attr: payloadRootAttr,
+ }
+ payloadRoot.Attr = append(payloadRoot.Attr, smithyxml.NewNamespaceAttribute("", "http://s3.amazonaws.com/doc/2006-03-01/"))
+ if err := awsRestxml_serializeDocumentOwnershipControls(input.OwnershipControls, xmlEncoder.RootElement(payloadRoot)); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ payload := bytes.NewReader(xmlEncoder.Bytes())
+ if request, err = request.SetStream(payload); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ }
+
+ if request.Request, err = restEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ return next.HandleSerialize(ctx, in)
+}
+func awsRestxml_serializeOpHttpBindingsPutBucketOwnershipControlsInput(v *PutBucketOwnershipControlsInput, encoder *httpbinding.Encoder) error {
+ if v == nil {
+ return fmt.Errorf("unsupported serialization of nil %T", v)
+ }
+
+ if v.Bucket == nil || len(*v.Bucket) == 0 {
+ return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")}
+ }
+ if v.Bucket != nil {
+ if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil {
+ return err
+ }
+ }
+
+ if v.ContentMD5 != nil && len(*v.ContentMD5) > 0 {
+ locationName := "Content-Md5"
+ encoder.SetHeader(locationName).String(*v.ContentMD5)
+ }
+
+ if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 {
+ locationName := "X-Amz-Expected-Bucket-Owner"
+ encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner)
+ }
+
+ return nil
+}
+
+type awsRestxml_serializeOpPutBucketPolicy struct {
+}
+
+func (*awsRestxml_serializeOpPutBucketPolicy) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsRestxml_serializeOpPutBucketPolicy) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*PutBucketPolicyInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ opPath, opQuery := httpbinding.SplitURI("/{Bucket}?policy")
+ request.URL.Path = opPath
+ if len(request.URL.RawQuery) > 0 {
+ request.URL.RawQuery = "&" + opQuery
+ } else {
+ request.URL.RawQuery = opQuery
+ }
+
+ request.Method = "PUT"
+ restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if err := awsRestxml_serializeOpHttpBindingsPutBucketPolicyInput(input, restEncoder); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if input.Policy != nil {
+ if !restEncoder.HasHeader("Content-Type") {
+ restEncoder.SetHeader("Content-Type").String("text/plain")
+ }
+
+ payload := strings.NewReader(*input.Policy)
+ if request, err = request.SetStream(payload); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ }
+
+ if request.Request, err = restEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ return next.HandleSerialize(ctx, in)
+}
+func awsRestxml_serializeOpHttpBindingsPutBucketPolicyInput(v *PutBucketPolicyInput, encoder *httpbinding.Encoder) error {
+ if v == nil {
+ return fmt.Errorf("unsupported serialization of nil %T", v)
+ }
+
+ if v.Bucket == nil || len(*v.Bucket) == 0 {
+ return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")}
+ }
+ if v.Bucket != nil {
+ if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil {
+ return err
+ }
+ }
+
+ if v.ConfirmRemoveSelfBucketAccess {
+ locationName := "X-Amz-Confirm-Remove-Self-Bucket-Access"
+ encoder.SetHeader(locationName).Boolean(v.ConfirmRemoveSelfBucketAccess)
+ }
+
+ if v.ContentMD5 != nil && len(*v.ContentMD5) > 0 {
+ locationName := "Content-Md5"
+ encoder.SetHeader(locationName).String(*v.ContentMD5)
+ }
+
+ if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 {
+ locationName := "X-Amz-Expected-Bucket-Owner"
+ encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner)
+ }
+
+ return nil
+}
+
+type awsRestxml_serializeOpPutBucketReplication struct {
+}
+
+func (*awsRestxml_serializeOpPutBucketReplication) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsRestxml_serializeOpPutBucketReplication) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*PutBucketReplicationInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ opPath, opQuery := httpbinding.SplitURI("/{Bucket}?replication")
+ request.URL.Path = opPath
+ if len(request.URL.RawQuery) > 0 {
+ request.URL.RawQuery = "&" + opQuery
+ } else {
+ request.URL.RawQuery = opQuery
+ }
+
+ request.Method = "PUT"
+ restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if err := awsRestxml_serializeOpHttpBindingsPutBucketReplicationInput(input, restEncoder); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if input.ReplicationConfiguration != nil {
+ if !restEncoder.HasHeader("Content-Type") {
+ restEncoder.SetHeader("Content-Type").String("application/xml")
+ }
+
+ xmlEncoder := smithyxml.NewEncoder(bytes.NewBuffer(nil))
+ payloadRootAttr := []smithyxml.Attr{}
+ payloadRoot := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "ReplicationConfiguration",
+ },
+ Attr: payloadRootAttr,
+ }
+ payloadRoot.Attr = append(payloadRoot.Attr, smithyxml.NewNamespaceAttribute("", "http://s3.amazonaws.com/doc/2006-03-01/"))
+ if err := awsRestxml_serializeDocumentReplicationConfiguration(input.ReplicationConfiguration, xmlEncoder.RootElement(payloadRoot)); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ payload := bytes.NewReader(xmlEncoder.Bytes())
+ if request, err = request.SetStream(payload); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ }
+
+ if request.Request, err = restEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ return next.HandleSerialize(ctx, in)
+}
+func awsRestxml_serializeOpHttpBindingsPutBucketReplicationInput(v *PutBucketReplicationInput, encoder *httpbinding.Encoder) error {
+ if v == nil {
+ return fmt.Errorf("unsupported serialization of nil %T", v)
+ }
+
+ if v.Bucket == nil || len(*v.Bucket) == 0 {
+ return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")}
+ }
+ if v.Bucket != nil {
+ if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil {
+ return err
+ }
+ }
+
+ if v.ContentMD5 != nil && len(*v.ContentMD5) > 0 {
+ locationName := "Content-Md5"
+ encoder.SetHeader(locationName).String(*v.ContentMD5)
+ }
+
+ if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 {
+ locationName := "X-Amz-Expected-Bucket-Owner"
+ encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner)
+ }
+
+ if v.Token != nil && len(*v.Token) > 0 {
+ locationName := "X-Amz-Bucket-Object-Lock-Token"
+ encoder.SetHeader(locationName).String(*v.Token)
+ }
+
+ return nil
+}
+
+type awsRestxml_serializeOpPutBucketRequestPayment struct {
+}
+
+func (*awsRestxml_serializeOpPutBucketRequestPayment) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsRestxml_serializeOpPutBucketRequestPayment) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*PutBucketRequestPaymentInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ opPath, opQuery := httpbinding.SplitURI("/{Bucket}?requestPayment")
+ request.URL.Path = opPath
+ if len(request.URL.RawQuery) > 0 {
+ request.URL.RawQuery = "&" + opQuery
+ } else {
+ request.URL.RawQuery = opQuery
+ }
+
+ request.Method = "PUT"
+ restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if err := awsRestxml_serializeOpHttpBindingsPutBucketRequestPaymentInput(input, restEncoder); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if input.RequestPaymentConfiguration != nil {
+ if !restEncoder.HasHeader("Content-Type") {
+ restEncoder.SetHeader("Content-Type").String("application/xml")
+ }
+
+ xmlEncoder := smithyxml.NewEncoder(bytes.NewBuffer(nil))
+ payloadRootAttr := []smithyxml.Attr{}
+ payloadRoot := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "RequestPaymentConfiguration",
+ },
+ Attr: payloadRootAttr,
+ }
+ payloadRoot.Attr = append(payloadRoot.Attr, smithyxml.NewNamespaceAttribute("", "http://s3.amazonaws.com/doc/2006-03-01/"))
+ if err := awsRestxml_serializeDocumentRequestPaymentConfiguration(input.RequestPaymentConfiguration, xmlEncoder.RootElement(payloadRoot)); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ payload := bytes.NewReader(xmlEncoder.Bytes())
+ if request, err = request.SetStream(payload); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ }
+
+ if request.Request, err = restEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ return next.HandleSerialize(ctx, in)
+}
+func awsRestxml_serializeOpHttpBindingsPutBucketRequestPaymentInput(v *PutBucketRequestPaymentInput, encoder *httpbinding.Encoder) error {
+ if v == nil {
+ return fmt.Errorf("unsupported serialization of nil %T", v)
+ }
+
+ if v.Bucket == nil || len(*v.Bucket) == 0 {
+ return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")}
+ }
+ if v.Bucket != nil {
+ if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil {
+ return err
+ }
+ }
+
+ if v.ContentMD5 != nil && len(*v.ContentMD5) > 0 {
+ locationName := "Content-Md5"
+ encoder.SetHeader(locationName).String(*v.ContentMD5)
+ }
+
+ if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 {
+ locationName := "X-Amz-Expected-Bucket-Owner"
+ encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner)
+ }
+
+ return nil
+}
+
+type awsRestxml_serializeOpPutBucketTagging struct {
+}
+
+func (*awsRestxml_serializeOpPutBucketTagging) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsRestxml_serializeOpPutBucketTagging) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*PutBucketTaggingInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ opPath, opQuery := httpbinding.SplitURI("/{Bucket}?tagging")
+ request.URL.Path = opPath
+ if len(request.URL.RawQuery) > 0 {
+ request.URL.RawQuery = "&" + opQuery
+ } else {
+ request.URL.RawQuery = opQuery
+ }
+
+ request.Method = "PUT"
+ restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if err := awsRestxml_serializeOpHttpBindingsPutBucketTaggingInput(input, restEncoder); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if input.Tagging != nil {
+ if !restEncoder.HasHeader("Content-Type") {
+ restEncoder.SetHeader("Content-Type").String("application/xml")
+ }
+
+ xmlEncoder := smithyxml.NewEncoder(bytes.NewBuffer(nil))
+ payloadRootAttr := []smithyxml.Attr{}
+ payloadRoot := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Tagging",
+ },
+ Attr: payloadRootAttr,
+ }
+ payloadRoot.Attr = append(payloadRoot.Attr, smithyxml.NewNamespaceAttribute("", "http://s3.amazonaws.com/doc/2006-03-01/"))
+ if err := awsRestxml_serializeDocumentTagging(input.Tagging, xmlEncoder.RootElement(payloadRoot)); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ payload := bytes.NewReader(xmlEncoder.Bytes())
+ if request, err = request.SetStream(payload); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ }
+
+ if request.Request, err = restEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ return next.HandleSerialize(ctx, in)
+}
+func awsRestxml_serializeOpHttpBindingsPutBucketTaggingInput(v *PutBucketTaggingInput, encoder *httpbinding.Encoder) error {
+ if v == nil {
+ return fmt.Errorf("unsupported serialization of nil %T", v)
+ }
+
+ if v.Bucket == nil || len(*v.Bucket) == 0 {
+ return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")}
+ }
+ if v.Bucket != nil {
+ if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil {
+ return err
+ }
+ }
+
+ if v.ContentMD5 != nil && len(*v.ContentMD5) > 0 {
+ locationName := "Content-Md5"
+ encoder.SetHeader(locationName).String(*v.ContentMD5)
+ }
+
+ if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 {
+ locationName := "X-Amz-Expected-Bucket-Owner"
+ encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner)
+ }
+
+ return nil
+}
+
+type awsRestxml_serializeOpPutBucketVersioning struct {
+}
+
+func (*awsRestxml_serializeOpPutBucketVersioning) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsRestxml_serializeOpPutBucketVersioning) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*PutBucketVersioningInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ opPath, opQuery := httpbinding.SplitURI("/{Bucket}?versioning")
+ request.URL.Path = opPath
+ if len(request.URL.RawQuery) > 0 {
+ request.URL.RawQuery = "&" + opQuery
+ } else {
+ request.URL.RawQuery = opQuery
+ }
+
+ request.Method = "PUT"
+ restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if err := awsRestxml_serializeOpHttpBindingsPutBucketVersioningInput(input, restEncoder); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if input.VersioningConfiguration != nil {
+ if !restEncoder.HasHeader("Content-Type") {
+ restEncoder.SetHeader("Content-Type").String("application/xml")
+ }
+
+ xmlEncoder := smithyxml.NewEncoder(bytes.NewBuffer(nil))
+ payloadRootAttr := []smithyxml.Attr{}
+ payloadRoot := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "VersioningConfiguration",
+ },
+ Attr: payloadRootAttr,
+ }
+ payloadRoot.Attr = append(payloadRoot.Attr, smithyxml.NewNamespaceAttribute("", "http://s3.amazonaws.com/doc/2006-03-01/"))
+ if err := awsRestxml_serializeDocumentVersioningConfiguration(input.VersioningConfiguration, xmlEncoder.RootElement(payloadRoot)); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ payload := bytes.NewReader(xmlEncoder.Bytes())
+ if request, err = request.SetStream(payload); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ }
+
+ if request.Request, err = restEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ return next.HandleSerialize(ctx, in)
+}
+func awsRestxml_serializeOpHttpBindingsPutBucketVersioningInput(v *PutBucketVersioningInput, encoder *httpbinding.Encoder) error {
+ if v == nil {
+ return fmt.Errorf("unsupported serialization of nil %T", v)
+ }
+
+ if v.Bucket == nil || len(*v.Bucket) == 0 {
+ return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")}
+ }
+ if v.Bucket != nil {
+ if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil {
+ return err
+ }
+ }
+
+ if v.ContentMD5 != nil && len(*v.ContentMD5) > 0 {
+ locationName := "Content-Md5"
+ encoder.SetHeader(locationName).String(*v.ContentMD5)
+ }
+
+ if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 {
+ locationName := "X-Amz-Expected-Bucket-Owner"
+ encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner)
+ }
+
+ if v.MFA != nil && len(*v.MFA) > 0 {
+ locationName := "X-Amz-Mfa"
+ encoder.SetHeader(locationName).String(*v.MFA)
+ }
+
+ return nil
+}
+
+type awsRestxml_serializeOpPutBucketWebsite struct {
+}
+
+func (*awsRestxml_serializeOpPutBucketWebsite) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsRestxml_serializeOpPutBucketWebsite) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*PutBucketWebsiteInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ opPath, opQuery := httpbinding.SplitURI("/{Bucket}?website")
+ request.URL.Path = opPath
+ if len(request.URL.RawQuery) > 0 {
+ request.URL.RawQuery = "&" + opQuery
+ } else {
+ request.URL.RawQuery = opQuery
+ }
+
+ request.Method = "PUT"
+ restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if err := awsRestxml_serializeOpHttpBindingsPutBucketWebsiteInput(input, restEncoder); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if input.WebsiteConfiguration != nil {
+ if !restEncoder.HasHeader("Content-Type") {
+ restEncoder.SetHeader("Content-Type").String("application/xml")
+ }
+
+ xmlEncoder := smithyxml.NewEncoder(bytes.NewBuffer(nil))
+ payloadRootAttr := []smithyxml.Attr{}
+ payloadRoot := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "WebsiteConfiguration",
+ },
+ Attr: payloadRootAttr,
+ }
+ payloadRoot.Attr = append(payloadRoot.Attr, smithyxml.NewNamespaceAttribute("", "http://s3.amazonaws.com/doc/2006-03-01/"))
+ if err := awsRestxml_serializeDocumentWebsiteConfiguration(input.WebsiteConfiguration, xmlEncoder.RootElement(payloadRoot)); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ payload := bytes.NewReader(xmlEncoder.Bytes())
+ if request, err = request.SetStream(payload); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ }
+
+ if request.Request, err = restEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ return next.HandleSerialize(ctx, in)
+}
+func awsRestxml_serializeOpHttpBindingsPutBucketWebsiteInput(v *PutBucketWebsiteInput, encoder *httpbinding.Encoder) error {
+ if v == nil {
+ return fmt.Errorf("unsupported serialization of nil %T", v)
+ }
+
+ if v.Bucket == nil || len(*v.Bucket) == 0 {
+ return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")}
+ }
+ if v.Bucket != nil {
+ if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil {
+ return err
+ }
+ }
+
+ if v.ContentMD5 != nil && len(*v.ContentMD5) > 0 {
+ locationName := "Content-Md5"
+ encoder.SetHeader(locationName).String(*v.ContentMD5)
+ }
+
+ if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 {
+ locationName := "X-Amz-Expected-Bucket-Owner"
+ encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner)
+ }
+
+ return nil
+}
+
+type awsRestxml_serializeOpPutObject struct {
+}
+
+func (*awsRestxml_serializeOpPutObject) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsRestxml_serializeOpPutObject) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*PutObjectInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ opPath, opQuery := httpbinding.SplitURI("/{Bucket}/{Key+}?x-id=PutObject")
+ request.URL.Path = opPath
+ if len(request.URL.RawQuery) > 0 {
+ request.URL.RawQuery = "&" + opQuery
+ } else {
+ request.URL.RawQuery = opQuery
+ }
+
+ request.Method = "PUT"
+ restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if err := awsRestxml_serializeOpHttpBindingsPutObjectInput(input, restEncoder); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if input.Body != nil {
+ if !restEncoder.HasHeader("Content-Type") {
+ restEncoder.SetHeader("Content-Type").String("application/octet-stream")
+ }
+
+ payload := input.Body
+ if request, err = request.SetStream(payload); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ }
+
+ if request.Request, err = restEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ return next.HandleSerialize(ctx, in)
+}
+func awsRestxml_serializeOpHttpBindingsPutObjectInput(v *PutObjectInput, encoder *httpbinding.Encoder) error {
+ if v == nil {
+ return fmt.Errorf("unsupported serialization of nil %T", v)
+ }
+
+ if len(v.ACL) > 0 {
+ locationName := "X-Amz-Acl"
+ encoder.SetHeader(locationName).String(string(v.ACL))
+ }
+
+ if v.Bucket == nil || len(*v.Bucket) == 0 {
+ return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")}
+ }
+ if v.Bucket != nil {
+ if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil {
+ return err
+ }
+ }
+
+ if v.BucketKeyEnabled {
+ locationName := "X-Amz-Server-Side-Encryption-Bucket-Key-Enabled"
+ encoder.SetHeader(locationName).Boolean(v.BucketKeyEnabled)
+ }
+
+ if v.CacheControl != nil && len(*v.CacheControl) > 0 {
+ locationName := "Cache-Control"
+ encoder.SetHeader(locationName).String(*v.CacheControl)
+ }
+
+ if v.ContentDisposition != nil && len(*v.ContentDisposition) > 0 {
+ locationName := "Content-Disposition"
+ encoder.SetHeader(locationName).String(*v.ContentDisposition)
+ }
+
+ if v.ContentEncoding != nil && len(*v.ContentEncoding) > 0 {
+ locationName := "Content-Encoding"
+ encoder.SetHeader(locationName).String(*v.ContentEncoding)
+ }
+
+ if v.ContentLanguage != nil && len(*v.ContentLanguage) > 0 {
+ locationName := "Content-Language"
+ encoder.SetHeader(locationName).String(*v.ContentLanguage)
+ }
+
+ if v.ContentLength != 0 {
+ locationName := "Content-Length"
+ encoder.SetHeader(locationName).Long(v.ContentLength)
+ }
+
+ if v.ContentMD5 != nil && len(*v.ContentMD5) > 0 {
+ locationName := "Content-Md5"
+ encoder.SetHeader(locationName).String(*v.ContentMD5)
+ }
+
+ if v.ContentType != nil && len(*v.ContentType) > 0 {
+ locationName := "Content-Type"
+ encoder.SetHeader(locationName).String(*v.ContentType)
+ }
+
+ if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 {
+ locationName := "X-Amz-Expected-Bucket-Owner"
+ encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner)
+ }
+
+ if v.Expires != nil {
+ locationName := "Expires"
+ encoder.SetHeader(locationName).String(smithytime.FormatHTTPDate(*v.Expires))
+ }
+
+ if v.GrantFullControl != nil && len(*v.GrantFullControl) > 0 {
+ locationName := "X-Amz-Grant-Full-Control"
+ encoder.SetHeader(locationName).String(*v.GrantFullControl)
+ }
+
+ if v.GrantRead != nil && len(*v.GrantRead) > 0 {
+ locationName := "X-Amz-Grant-Read"
+ encoder.SetHeader(locationName).String(*v.GrantRead)
+ }
+
+ if v.GrantReadACP != nil && len(*v.GrantReadACP) > 0 {
+ locationName := "X-Amz-Grant-Read-Acp"
+ encoder.SetHeader(locationName).String(*v.GrantReadACP)
+ }
+
+ if v.GrantWriteACP != nil && len(*v.GrantWriteACP) > 0 {
+ locationName := "X-Amz-Grant-Write-Acp"
+ encoder.SetHeader(locationName).String(*v.GrantWriteACP)
+ }
+
+ if v.Key == nil || len(*v.Key) == 0 {
+ return &smithy.SerializationError{Err: fmt.Errorf("input member Key must not be empty")}
+ }
+ if v.Key != nil {
+ if err := encoder.SetURI("Key").String(*v.Key); err != nil {
+ return err
+ }
+ }
+
+ if v.Metadata != nil {
+ hv := encoder.Headers("X-Amz-Meta-")
+ for mapKey, mapVal := range v.Metadata {
+ if len(mapVal) > 0 {
+ hv.SetHeader(http.CanonicalHeaderKey(mapKey)).String(mapVal)
+ }
+ }
+ }
+
+ if len(v.ObjectLockLegalHoldStatus) > 0 {
+ locationName := "X-Amz-Object-Lock-Legal-Hold"
+ encoder.SetHeader(locationName).String(string(v.ObjectLockLegalHoldStatus))
+ }
+
+ if len(v.ObjectLockMode) > 0 {
+ locationName := "X-Amz-Object-Lock-Mode"
+ encoder.SetHeader(locationName).String(string(v.ObjectLockMode))
+ }
+
+ if v.ObjectLockRetainUntilDate != nil {
+ locationName := "X-Amz-Object-Lock-Retain-Until-Date"
+ encoder.SetHeader(locationName).String(smithytime.FormatDateTime(*v.ObjectLockRetainUntilDate))
+ }
+
+ if len(v.RequestPayer) > 0 {
+ locationName := "X-Amz-Request-Payer"
+ encoder.SetHeader(locationName).String(string(v.RequestPayer))
+ }
+
+ if len(v.ServerSideEncryption) > 0 {
+ locationName := "X-Amz-Server-Side-Encryption"
+ encoder.SetHeader(locationName).String(string(v.ServerSideEncryption))
+ }
+
+ if v.SSECustomerAlgorithm != nil && len(*v.SSECustomerAlgorithm) > 0 {
+ locationName := "X-Amz-Server-Side-Encryption-Customer-Algorithm"
+ encoder.SetHeader(locationName).String(*v.SSECustomerAlgorithm)
+ }
+
+ if v.SSECustomerKey != nil && len(*v.SSECustomerKey) > 0 {
+ locationName := "X-Amz-Server-Side-Encryption-Customer-Key"
+ encoder.SetHeader(locationName).String(*v.SSECustomerKey)
+ }
+
+ if v.SSECustomerKeyMD5 != nil && len(*v.SSECustomerKeyMD5) > 0 {
+ locationName := "X-Amz-Server-Side-Encryption-Customer-Key-Md5"
+ encoder.SetHeader(locationName).String(*v.SSECustomerKeyMD5)
+ }
+
+ if v.SSEKMSEncryptionContext != nil && len(*v.SSEKMSEncryptionContext) > 0 {
+ locationName := "X-Amz-Server-Side-Encryption-Context"
+ encoder.SetHeader(locationName).String(*v.SSEKMSEncryptionContext)
+ }
+
+ if v.SSEKMSKeyId != nil && len(*v.SSEKMSKeyId) > 0 {
+ locationName := "X-Amz-Server-Side-Encryption-Aws-Kms-Key-Id"
+ encoder.SetHeader(locationName).String(*v.SSEKMSKeyId)
+ }
+
+ if len(v.StorageClass) > 0 {
+ locationName := "X-Amz-Storage-Class"
+ encoder.SetHeader(locationName).String(string(v.StorageClass))
+ }
+
+ if v.Tagging != nil && len(*v.Tagging) > 0 {
+ locationName := "X-Amz-Tagging"
+ encoder.SetHeader(locationName).String(*v.Tagging)
+ }
+
+ if v.WebsiteRedirectLocation != nil && len(*v.WebsiteRedirectLocation) > 0 {
+ locationName := "X-Amz-Website-Redirect-Location"
+ encoder.SetHeader(locationName).String(*v.WebsiteRedirectLocation)
+ }
+
+ return nil
+}
+
+type awsRestxml_serializeOpPutObjectAcl struct {
+}
+
+func (*awsRestxml_serializeOpPutObjectAcl) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsRestxml_serializeOpPutObjectAcl) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*PutObjectAclInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ opPath, opQuery := httpbinding.SplitURI("/{Bucket}/{Key+}?acl")
+ request.URL.Path = opPath
+ if len(request.URL.RawQuery) > 0 {
+ request.URL.RawQuery = "&" + opQuery
+ } else {
+ request.URL.RawQuery = opQuery
+ }
+
+ request.Method = "PUT"
+ restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if err := awsRestxml_serializeOpHttpBindingsPutObjectAclInput(input, restEncoder); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if input.AccessControlPolicy != nil {
+ if !restEncoder.HasHeader("Content-Type") {
+ restEncoder.SetHeader("Content-Type").String("application/xml")
+ }
+
+ xmlEncoder := smithyxml.NewEncoder(bytes.NewBuffer(nil))
+ payloadRootAttr := []smithyxml.Attr{}
+ payloadRoot := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "AccessControlPolicy",
+ },
+ Attr: payloadRootAttr,
+ }
+ payloadRoot.Attr = append(payloadRoot.Attr, smithyxml.NewNamespaceAttribute("", "http://s3.amazonaws.com/doc/2006-03-01/"))
+ if err := awsRestxml_serializeDocumentAccessControlPolicy(input.AccessControlPolicy, xmlEncoder.RootElement(payloadRoot)); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ payload := bytes.NewReader(xmlEncoder.Bytes())
+ if request, err = request.SetStream(payload); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ }
+
+ if request.Request, err = restEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ return next.HandleSerialize(ctx, in)
+}
+func awsRestxml_serializeOpHttpBindingsPutObjectAclInput(v *PutObjectAclInput, encoder *httpbinding.Encoder) error {
+ if v == nil {
+ return fmt.Errorf("unsupported serialization of nil %T", v)
+ }
+
+ if len(v.ACL) > 0 {
+ locationName := "X-Amz-Acl"
+ encoder.SetHeader(locationName).String(string(v.ACL))
+ }
+
+ if v.Bucket == nil || len(*v.Bucket) == 0 {
+ return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")}
+ }
+ if v.Bucket != nil {
+ if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil {
+ return err
+ }
+ }
+
+ if v.ContentMD5 != nil && len(*v.ContentMD5) > 0 {
+ locationName := "Content-Md5"
+ encoder.SetHeader(locationName).String(*v.ContentMD5)
+ }
+
+ if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 {
+ locationName := "X-Amz-Expected-Bucket-Owner"
+ encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner)
+ }
+
+ if v.GrantFullControl != nil && len(*v.GrantFullControl) > 0 {
+ locationName := "X-Amz-Grant-Full-Control"
+ encoder.SetHeader(locationName).String(*v.GrantFullControl)
+ }
+
+ if v.GrantRead != nil && len(*v.GrantRead) > 0 {
+ locationName := "X-Amz-Grant-Read"
+ encoder.SetHeader(locationName).String(*v.GrantRead)
+ }
+
+ if v.GrantReadACP != nil && len(*v.GrantReadACP) > 0 {
+ locationName := "X-Amz-Grant-Read-Acp"
+ encoder.SetHeader(locationName).String(*v.GrantReadACP)
+ }
+
+ if v.GrantWrite != nil && len(*v.GrantWrite) > 0 {
+ locationName := "X-Amz-Grant-Write"
+ encoder.SetHeader(locationName).String(*v.GrantWrite)
+ }
+
+ if v.GrantWriteACP != nil && len(*v.GrantWriteACP) > 0 {
+ locationName := "X-Amz-Grant-Write-Acp"
+ encoder.SetHeader(locationName).String(*v.GrantWriteACP)
+ }
+
+ if v.Key == nil || len(*v.Key) == 0 {
+ return &smithy.SerializationError{Err: fmt.Errorf("input member Key must not be empty")}
+ }
+ if v.Key != nil {
+ if err := encoder.SetURI("Key").String(*v.Key); err != nil {
+ return err
+ }
+ }
+
+ if len(v.RequestPayer) > 0 {
+ locationName := "X-Amz-Request-Payer"
+ encoder.SetHeader(locationName).String(string(v.RequestPayer))
+ }
+
+ if v.VersionId != nil {
+ encoder.SetQuery("versionId").String(*v.VersionId)
+ }
+
+ return nil
+}
+
+type awsRestxml_serializeOpPutObjectLegalHold struct {
+}
+
+func (*awsRestxml_serializeOpPutObjectLegalHold) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsRestxml_serializeOpPutObjectLegalHold) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*PutObjectLegalHoldInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ opPath, opQuery := httpbinding.SplitURI("/{Bucket}/{Key+}?legal-hold")
+ request.URL.Path = opPath
+ if len(request.URL.RawQuery) > 0 {
+ request.URL.RawQuery = "&" + opQuery
+ } else {
+ request.URL.RawQuery = opQuery
+ }
+
+ request.Method = "PUT"
+ restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if err := awsRestxml_serializeOpHttpBindingsPutObjectLegalHoldInput(input, restEncoder); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if input.LegalHold != nil {
+ if !restEncoder.HasHeader("Content-Type") {
+ restEncoder.SetHeader("Content-Type").String("application/xml")
+ }
+
+ xmlEncoder := smithyxml.NewEncoder(bytes.NewBuffer(nil))
+ payloadRootAttr := []smithyxml.Attr{}
+ payloadRoot := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "ObjectLockLegalHold",
+ },
+ Attr: payloadRootAttr,
+ }
+ payloadRoot.Attr = append(payloadRoot.Attr, smithyxml.NewNamespaceAttribute("", "http://s3.amazonaws.com/doc/2006-03-01/"))
+ if err := awsRestxml_serializeDocumentObjectLockLegalHold(input.LegalHold, xmlEncoder.RootElement(payloadRoot)); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ payload := bytes.NewReader(xmlEncoder.Bytes())
+ if request, err = request.SetStream(payload); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ }
+
+ if request.Request, err = restEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ return next.HandleSerialize(ctx, in)
+}
+func awsRestxml_serializeOpHttpBindingsPutObjectLegalHoldInput(v *PutObjectLegalHoldInput, encoder *httpbinding.Encoder) error {
+ if v == nil {
+ return fmt.Errorf("unsupported serialization of nil %T", v)
+ }
+
+ if v.Bucket == nil || len(*v.Bucket) == 0 {
+ return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")}
+ }
+ if v.Bucket != nil {
+ if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil {
+ return err
+ }
+ }
+
+ if v.ContentMD5 != nil && len(*v.ContentMD5) > 0 {
+ locationName := "Content-Md5"
+ encoder.SetHeader(locationName).String(*v.ContentMD5)
+ }
+
+ if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 {
+ locationName := "X-Amz-Expected-Bucket-Owner"
+ encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner)
+ }
+
+ if v.Key == nil || len(*v.Key) == 0 {
+ return &smithy.SerializationError{Err: fmt.Errorf("input member Key must not be empty")}
+ }
+ if v.Key != nil {
+ if err := encoder.SetURI("Key").String(*v.Key); err != nil {
+ return err
+ }
+ }
+
+ if len(v.RequestPayer) > 0 {
+ locationName := "X-Amz-Request-Payer"
+ encoder.SetHeader(locationName).String(string(v.RequestPayer))
+ }
+
+ if v.VersionId != nil {
+ encoder.SetQuery("versionId").String(*v.VersionId)
+ }
+
+ return nil
+}
+
+type awsRestxml_serializeOpPutObjectLockConfiguration struct {
+}
+
+func (*awsRestxml_serializeOpPutObjectLockConfiguration) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsRestxml_serializeOpPutObjectLockConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*PutObjectLockConfigurationInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ opPath, opQuery := httpbinding.SplitURI("/{Bucket}?object-lock")
+ request.URL.Path = opPath
+ if len(request.URL.RawQuery) > 0 {
+ request.URL.RawQuery = "&" + opQuery
+ } else {
+ request.URL.RawQuery = opQuery
+ }
+
+ request.Method = "PUT"
+ restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if err := awsRestxml_serializeOpHttpBindingsPutObjectLockConfigurationInput(input, restEncoder); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if input.ObjectLockConfiguration != nil {
+ if !restEncoder.HasHeader("Content-Type") {
+ restEncoder.SetHeader("Content-Type").String("application/xml")
+ }
+
+ xmlEncoder := smithyxml.NewEncoder(bytes.NewBuffer(nil))
+ payloadRootAttr := []smithyxml.Attr{}
+ payloadRoot := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "ObjectLockConfiguration",
+ },
+ Attr: payloadRootAttr,
+ }
+ payloadRoot.Attr = append(payloadRoot.Attr, smithyxml.NewNamespaceAttribute("", "http://s3.amazonaws.com/doc/2006-03-01/"))
+ if err := awsRestxml_serializeDocumentObjectLockConfiguration(input.ObjectLockConfiguration, xmlEncoder.RootElement(payloadRoot)); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ payload := bytes.NewReader(xmlEncoder.Bytes())
+ if request, err = request.SetStream(payload); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ }
+
+ if request.Request, err = restEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ return next.HandleSerialize(ctx, in)
+}
+func awsRestxml_serializeOpHttpBindingsPutObjectLockConfigurationInput(v *PutObjectLockConfigurationInput, encoder *httpbinding.Encoder) error {
+ if v == nil {
+ return fmt.Errorf("unsupported serialization of nil %T", v)
+ }
+
+ if v.Bucket == nil || len(*v.Bucket) == 0 {
+ return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")}
+ }
+ if v.Bucket != nil {
+ if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil {
+ return err
+ }
+ }
+
+ if v.ContentMD5 != nil && len(*v.ContentMD5) > 0 {
+ locationName := "Content-Md5"
+ encoder.SetHeader(locationName).String(*v.ContentMD5)
+ }
+
+ if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 {
+ locationName := "X-Amz-Expected-Bucket-Owner"
+ encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner)
+ }
+
+ if len(v.RequestPayer) > 0 {
+ locationName := "X-Amz-Request-Payer"
+ encoder.SetHeader(locationName).String(string(v.RequestPayer))
+ }
+
+ if v.Token != nil && len(*v.Token) > 0 {
+ locationName := "X-Amz-Bucket-Object-Lock-Token"
+ encoder.SetHeader(locationName).String(*v.Token)
+ }
+
+ return nil
+}
+
+type awsRestxml_serializeOpPutObjectRetention struct {
+}
+
+func (*awsRestxml_serializeOpPutObjectRetention) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsRestxml_serializeOpPutObjectRetention) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*PutObjectRetentionInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ opPath, opQuery := httpbinding.SplitURI("/{Bucket}/{Key+}?retention")
+ request.URL.Path = opPath
+ if len(request.URL.RawQuery) > 0 {
+ request.URL.RawQuery = "&" + opQuery
+ } else {
+ request.URL.RawQuery = opQuery
+ }
+
+ request.Method = "PUT"
+ restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if err := awsRestxml_serializeOpHttpBindingsPutObjectRetentionInput(input, restEncoder); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if input.Retention != nil {
+ if !restEncoder.HasHeader("Content-Type") {
+ restEncoder.SetHeader("Content-Type").String("application/xml")
+ }
+
+ xmlEncoder := smithyxml.NewEncoder(bytes.NewBuffer(nil))
+ payloadRootAttr := []smithyxml.Attr{}
+ payloadRoot := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "ObjectLockRetention",
+ },
+ Attr: payloadRootAttr,
+ }
+ payloadRoot.Attr = append(payloadRoot.Attr, smithyxml.NewNamespaceAttribute("", "http://s3.amazonaws.com/doc/2006-03-01/"))
+ if err := awsRestxml_serializeDocumentObjectLockRetention(input.Retention, xmlEncoder.RootElement(payloadRoot)); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ payload := bytes.NewReader(xmlEncoder.Bytes())
+ if request, err = request.SetStream(payload); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ }
+
+ if request.Request, err = restEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ return next.HandleSerialize(ctx, in)
+}
+func awsRestxml_serializeOpHttpBindingsPutObjectRetentionInput(v *PutObjectRetentionInput, encoder *httpbinding.Encoder) error {
+ if v == nil {
+ return fmt.Errorf("unsupported serialization of nil %T", v)
+ }
+
+ if v.Bucket == nil || len(*v.Bucket) == 0 {
+ return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")}
+ }
+ if v.Bucket != nil {
+ if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil {
+ return err
+ }
+ }
+
+ if v.BypassGovernanceRetention {
+ locationName := "X-Amz-Bypass-Governance-Retention"
+ encoder.SetHeader(locationName).Boolean(v.BypassGovernanceRetention)
+ }
+
+ if v.ContentMD5 != nil && len(*v.ContentMD5) > 0 {
+ locationName := "Content-Md5"
+ encoder.SetHeader(locationName).String(*v.ContentMD5)
+ }
+
+ if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 {
+ locationName := "X-Amz-Expected-Bucket-Owner"
+ encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner)
+ }
+
+ if v.Key == nil || len(*v.Key) == 0 {
+ return &smithy.SerializationError{Err: fmt.Errorf("input member Key must not be empty")}
+ }
+ if v.Key != nil {
+ if err := encoder.SetURI("Key").String(*v.Key); err != nil {
+ return err
+ }
+ }
+
+ if len(v.RequestPayer) > 0 {
+ locationName := "X-Amz-Request-Payer"
+ encoder.SetHeader(locationName).String(string(v.RequestPayer))
+ }
+
+ if v.VersionId != nil {
+ encoder.SetQuery("versionId").String(*v.VersionId)
+ }
+
+ return nil
+}
+
+type awsRestxml_serializeOpPutObjectTagging struct {
+}
+
+func (*awsRestxml_serializeOpPutObjectTagging) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsRestxml_serializeOpPutObjectTagging) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*PutObjectTaggingInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ opPath, opQuery := httpbinding.SplitURI("/{Bucket}/{Key+}?tagging")
+ request.URL.Path = opPath
+ if len(request.URL.RawQuery) > 0 {
+ request.URL.RawQuery = "&" + opQuery
+ } else {
+ request.URL.RawQuery = opQuery
+ }
+
+ request.Method = "PUT"
+ restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if err := awsRestxml_serializeOpHttpBindingsPutObjectTaggingInput(input, restEncoder); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if input.Tagging != nil {
+ if !restEncoder.HasHeader("Content-Type") {
+ restEncoder.SetHeader("Content-Type").String("application/xml")
+ }
+
+ xmlEncoder := smithyxml.NewEncoder(bytes.NewBuffer(nil))
+ payloadRootAttr := []smithyxml.Attr{}
+ payloadRoot := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Tagging",
+ },
+ Attr: payloadRootAttr,
+ }
+ payloadRoot.Attr = append(payloadRoot.Attr, smithyxml.NewNamespaceAttribute("", "http://s3.amazonaws.com/doc/2006-03-01/"))
+ if err := awsRestxml_serializeDocumentTagging(input.Tagging, xmlEncoder.RootElement(payloadRoot)); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ payload := bytes.NewReader(xmlEncoder.Bytes())
+ if request, err = request.SetStream(payload); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ }
+
+ if request.Request, err = restEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ return next.HandleSerialize(ctx, in)
+}
+func awsRestxml_serializeOpHttpBindingsPutObjectTaggingInput(v *PutObjectTaggingInput, encoder *httpbinding.Encoder) error {
+ if v == nil {
+ return fmt.Errorf("unsupported serialization of nil %T", v)
+ }
+
+ if v.Bucket == nil || len(*v.Bucket) == 0 {
+ return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")}
+ }
+ if v.Bucket != nil {
+ if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil {
+ return err
+ }
+ }
+
+ if v.ContentMD5 != nil && len(*v.ContentMD5) > 0 {
+ locationName := "Content-Md5"
+ encoder.SetHeader(locationName).String(*v.ContentMD5)
+ }
+
+ if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 {
+ locationName := "X-Amz-Expected-Bucket-Owner"
+ encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner)
+ }
+
+ if v.Key == nil || len(*v.Key) == 0 {
+ return &smithy.SerializationError{Err: fmt.Errorf("input member Key must not be empty")}
+ }
+ if v.Key != nil {
+ if err := encoder.SetURI("Key").String(*v.Key); err != nil {
+ return err
+ }
+ }
+
+ if v.VersionId != nil {
+ encoder.SetQuery("versionId").String(*v.VersionId)
+ }
+
+ return nil
+}
+
+type awsRestxml_serializeOpPutPublicAccessBlock struct {
+}
+
+func (*awsRestxml_serializeOpPutPublicAccessBlock) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsRestxml_serializeOpPutPublicAccessBlock) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*PutPublicAccessBlockInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ opPath, opQuery := httpbinding.SplitURI("/{Bucket}?publicAccessBlock")
+ request.URL.Path = opPath
+ if len(request.URL.RawQuery) > 0 {
+ request.URL.RawQuery = "&" + opQuery
+ } else {
+ request.URL.RawQuery = opQuery
+ }
+
+ request.Method = "PUT"
+ restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if err := awsRestxml_serializeOpHttpBindingsPutPublicAccessBlockInput(input, restEncoder); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if input.PublicAccessBlockConfiguration != nil {
+ if !restEncoder.HasHeader("Content-Type") {
+ restEncoder.SetHeader("Content-Type").String("application/xml")
+ }
+
+ xmlEncoder := smithyxml.NewEncoder(bytes.NewBuffer(nil))
+ payloadRootAttr := []smithyxml.Attr{}
+ payloadRoot := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "PublicAccessBlockConfiguration",
+ },
+ Attr: payloadRootAttr,
+ }
+ payloadRoot.Attr = append(payloadRoot.Attr, smithyxml.NewNamespaceAttribute("", "http://s3.amazonaws.com/doc/2006-03-01/"))
+ if err := awsRestxml_serializeDocumentPublicAccessBlockConfiguration(input.PublicAccessBlockConfiguration, xmlEncoder.RootElement(payloadRoot)); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ payload := bytes.NewReader(xmlEncoder.Bytes())
+ if request, err = request.SetStream(payload); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ }
+
+ if request.Request, err = restEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ return next.HandleSerialize(ctx, in)
+}
+func awsRestxml_serializeOpHttpBindingsPutPublicAccessBlockInput(v *PutPublicAccessBlockInput, encoder *httpbinding.Encoder) error {
+ if v == nil {
+ return fmt.Errorf("unsupported serialization of nil %T", v)
+ }
+
+ if v.Bucket == nil || len(*v.Bucket) == 0 {
+ return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")}
+ }
+ if v.Bucket != nil {
+ if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil {
+ return err
+ }
+ }
+
+ if v.ContentMD5 != nil && len(*v.ContentMD5) > 0 {
+ locationName := "Content-Md5"
+ encoder.SetHeader(locationName).String(*v.ContentMD5)
+ }
+
+ if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 {
+ locationName := "X-Amz-Expected-Bucket-Owner"
+ encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner)
+ }
+
+ return nil
+}
+
+type awsRestxml_serializeOpRestoreObject struct {
+}
+
+func (*awsRestxml_serializeOpRestoreObject) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsRestxml_serializeOpRestoreObject) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*RestoreObjectInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ opPath, opQuery := httpbinding.SplitURI("/{Bucket}/{Key+}?restore")
+ request.URL.Path = opPath
+ if len(request.URL.RawQuery) > 0 {
+ request.URL.RawQuery = "&" + opQuery
+ } else {
+ request.URL.RawQuery = opQuery
+ }
+
+ request.Method = "POST"
+ restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if err := awsRestxml_serializeOpHttpBindingsRestoreObjectInput(input, restEncoder); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if input.RestoreRequest != nil {
+ if !restEncoder.HasHeader("Content-Type") {
+ restEncoder.SetHeader("Content-Type").String("application/xml")
+ }
+
+ xmlEncoder := smithyxml.NewEncoder(bytes.NewBuffer(nil))
+ payloadRootAttr := []smithyxml.Attr{}
+ payloadRoot := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "RestoreRequest",
+ },
+ Attr: payloadRootAttr,
+ }
+ payloadRoot.Attr = append(payloadRoot.Attr, smithyxml.NewNamespaceAttribute("", "http://s3.amazonaws.com/doc/2006-03-01/"))
+ if err := awsRestxml_serializeDocumentRestoreRequest(input.RestoreRequest, xmlEncoder.RootElement(payloadRoot)); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ payload := bytes.NewReader(xmlEncoder.Bytes())
+ if request, err = request.SetStream(payload); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ }
+
+ if request.Request, err = restEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ return next.HandleSerialize(ctx, in)
+}
+func awsRestxml_serializeOpHttpBindingsRestoreObjectInput(v *RestoreObjectInput, encoder *httpbinding.Encoder) error {
+ if v == nil {
+ return fmt.Errorf("unsupported serialization of nil %T", v)
+ }
+
+ if v.Bucket == nil || len(*v.Bucket) == 0 {
+ return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")}
+ }
+ if v.Bucket != nil {
+ if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil {
+ return err
+ }
+ }
+
+ if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 {
+ locationName := "X-Amz-Expected-Bucket-Owner"
+ encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner)
+ }
+
+ if v.Key == nil || len(*v.Key) == 0 {
+ return &smithy.SerializationError{Err: fmt.Errorf("input member Key must not be empty")}
+ }
+ if v.Key != nil {
+ if err := encoder.SetURI("Key").String(*v.Key); err != nil {
+ return err
+ }
+ }
+
+ if len(v.RequestPayer) > 0 {
+ locationName := "X-Amz-Request-Payer"
+ encoder.SetHeader(locationName).String(string(v.RequestPayer))
+ }
+
+ if v.VersionId != nil {
+ encoder.SetQuery("versionId").String(*v.VersionId)
+ }
+
+ return nil
+}
+
+type awsRestxml_serializeOpUploadPart struct {
+}
+
+func (*awsRestxml_serializeOpUploadPart) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsRestxml_serializeOpUploadPart) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*UploadPartInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ opPath, opQuery := httpbinding.SplitURI("/{Bucket}/{Key+}?x-id=UploadPart")
+ request.URL.Path = opPath
+ if len(request.URL.RawQuery) > 0 {
+ request.URL.RawQuery = "&" + opQuery
+ } else {
+ request.URL.RawQuery = opQuery
+ }
+
+ request.Method = "PUT"
+ restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if err := awsRestxml_serializeOpHttpBindingsUploadPartInput(input, restEncoder); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if input.Body != nil {
+ if !restEncoder.HasHeader("Content-Type") {
+ restEncoder.SetHeader("Content-Type").String("application/octet-stream")
+ }
+
+ payload := input.Body
+ if request, err = request.SetStream(payload); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ }
+
+ if request.Request, err = restEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ return next.HandleSerialize(ctx, in)
+}
+func awsRestxml_serializeOpHttpBindingsUploadPartInput(v *UploadPartInput, encoder *httpbinding.Encoder) error {
+ if v == nil {
+ return fmt.Errorf("unsupported serialization of nil %T", v)
+ }
+
+ if v.Bucket == nil || len(*v.Bucket) == 0 {
+ return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")}
+ }
+ if v.Bucket != nil {
+ if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil {
+ return err
+ }
+ }
+
+ if v.ContentLength != 0 {
+ locationName := "Content-Length"
+ encoder.SetHeader(locationName).Long(v.ContentLength)
+ }
+
+ if v.ContentMD5 != nil && len(*v.ContentMD5) > 0 {
+ locationName := "Content-Md5"
+ encoder.SetHeader(locationName).String(*v.ContentMD5)
+ }
+
+ if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 {
+ locationName := "X-Amz-Expected-Bucket-Owner"
+ encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner)
+ }
+
+ if v.Key == nil || len(*v.Key) == 0 {
+ return &smithy.SerializationError{Err: fmt.Errorf("input member Key must not be empty")}
+ }
+ if v.Key != nil {
+ if err := encoder.SetURI("Key").String(*v.Key); err != nil {
+ return err
+ }
+ }
+
+ {
+ encoder.SetQuery("partNumber").Integer(v.PartNumber)
+ }
+
+ if len(v.RequestPayer) > 0 {
+ locationName := "X-Amz-Request-Payer"
+ encoder.SetHeader(locationName).String(string(v.RequestPayer))
+ }
+
+ if v.SSECustomerAlgorithm != nil && len(*v.SSECustomerAlgorithm) > 0 {
+ locationName := "X-Amz-Server-Side-Encryption-Customer-Algorithm"
+ encoder.SetHeader(locationName).String(*v.SSECustomerAlgorithm)
+ }
+
+ if v.SSECustomerKey != nil && len(*v.SSECustomerKey) > 0 {
+ locationName := "X-Amz-Server-Side-Encryption-Customer-Key"
+ encoder.SetHeader(locationName).String(*v.SSECustomerKey)
+ }
+
+ if v.SSECustomerKeyMD5 != nil && len(*v.SSECustomerKeyMD5) > 0 {
+ locationName := "X-Amz-Server-Side-Encryption-Customer-Key-Md5"
+ encoder.SetHeader(locationName).String(*v.SSECustomerKeyMD5)
+ }
+
+ if v.UploadId != nil {
+ encoder.SetQuery("uploadId").String(*v.UploadId)
+ }
+
+ return nil
+}
+
+type awsRestxml_serializeOpUploadPartCopy struct {
+}
+
+func (*awsRestxml_serializeOpUploadPartCopy) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsRestxml_serializeOpUploadPartCopy) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*UploadPartCopyInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ opPath, opQuery := httpbinding.SplitURI("/{Bucket}/{Key+}?x-id=UploadPartCopy")
+ request.URL.Path = opPath
+ if len(request.URL.RawQuery) > 0 {
+ request.URL.RawQuery = "&" + opQuery
+ } else {
+ request.URL.RawQuery = opQuery
+ }
+
+ request.Method = "PUT"
+ restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if err := awsRestxml_serializeOpHttpBindingsUploadPartCopyInput(input, restEncoder); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request.Request, err = restEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ return next.HandleSerialize(ctx, in)
+}
+func awsRestxml_serializeOpHttpBindingsUploadPartCopyInput(v *UploadPartCopyInput, encoder *httpbinding.Encoder) error {
+ if v == nil {
+ return fmt.Errorf("unsupported serialization of nil %T", v)
+ }
+
+ if v.Bucket == nil || len(*v.Bucket) == 0 {
+ return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")}
+ }
+ if v.Bucket != nil {
+ if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil {
+ return err
+ }
+ }
+
+ if v.CopySource != nil && len(*v.CopySource) > 0 {
+ locationName := "X-Amz-Copy-Source"
+ encoder.SetHeader(locationName).String(*v.CopySource)
+ }
+
+ if v.CopySourceIfMatch != nil && len(*v.CopySourceIfMatch) > 0 {
+ locationName := "X-Amz-Copy-Source-If-Match"
+ encoder.SetHeader(locationName).String(*v.CopySourceIfMatch)
+ }
+
+ if v.CopySourceIfModifiedSince != nil {
+ locationName := "X-Amz-Copy-Source-If-Modified-Since"
+ encoder.SetHeader(locationName).String(smithytime.FormatHTTPDate(*v.CopySourceIfModifiedSince))
+ }
+
+ if v.CopySourceIfNoneMatch != nil && len(*v.CopySourceIfNoneMatch) > 0 {
+ locationName := "X-Amz-Copy-Source-If-None-Match"
+ encoder.SetHeader(locationName).String(*v.CopySourceIfNoneMatch)
+ }
+
+ if v.CopySourceIfUnmodifiedSince != nil {
+ locationName := "X-Amz-Copy-Source-If-Unmodified-Since"
+ encoder.SetHeader(locationName).String(smithytime.FormatHTTPDate(*v.CopySourceIfUnmodifiedSince))
+ }
+
+ if v.CopySourceRange != nil && len(*v.CopySourceRange) > 0 {
+ locationName := "X-Amz-Copy-Source-Range"
+ encoder.SetHeader(locationName).String(*v.CopySourceRange)
+ }
+
+ if v.CopySourceSSECustomerAlgorithm != nil && len(*v.CopySourceSSECustomerAlgorithm) > 0 {
+ locationName := "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm"
+ encoder.SetHeader(locationName).String(*v.CopySourceSSECustomerAlgorithm)
+ }
+
+ if v.CopySourceSSECustomerKey != nil && len(*v.CopySourceSSECustomerKey) > 0 {
+ locationName := "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key"
+ encoder.SetHeader(locationName).String(*v.CopySourceSSECustomerKey)
+ }
+
+ if v.CopySourceSSECustomerKeyMD5 != nil && len(*v.CopySourceSSECustomerKeyMD5) > 0 {
+ locationName := "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key-Md5"
+ encoder.SetHeader(locationName).String(*v.CopySourceSSECustomerKeyMD5)
+ }
+
+ if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 {
+ locationName := "X-Amz-Expected-Bucket-Owner"
+ encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner)
+ }
+
+ if v.ExpectedSourceBucketOwner != nil && len(*v.ExpectedSourceBucketOwner) > 0 {
+ locationName := "X-Amz-Source-Expected-Bucket-Owner"
+ encoder.SetHeader(locationName).String(*v.ExpectedSourceBucketOwner)
+ }
+
+ if v.Key == nil || len(*v.Key) == 0 {
+ return &smithy.SerializationError{Err: fmt.Errorf("input member Key must not be empty")}
+ }
+ if v.Key != nil {
+ if err := encoder.SetURI("Key").String(*v.Key); err != nil {
+ return err
+ }
+ }
+
+ {
+ encoder.SetQuery("partNumber").Integer(v.PartNumber)
+ }
+
+ if len(v.RequestPayer) > 0 {
+ locationName := "X-Amz-Request-Payer"
+ encoder.SetHeader(locationName).String(string(v.RequestPayer))
+ }
+
+ if v.SSECustomerAlgorithm != nil && len(*v.SSECustomerAlgorithm) > 0 {
+ locationName := "X-Amz-Server-Side-Encryption-Customer-Algorithm"
+ encoder.SetHeader(locationName).String(*v.SSECustomerAlgorithm)
+ }
+
+ if v.SSECustomerKey != nil && len(*v.SSECustomerKey) > 0 {
+ locationName := "X-Amz-Server-Side-Encryption-Customer-Key"
+ encoder.SetHeader(locationName).String(*v.SSECustomerKey)
+ }
+
+ if v.SSECustomerKeyMD5 != nil && len(*v.SSECustomerKeyMD5) > 0 {
+ locationName := "X-Amz-Server-Side-Encryption-Customer-Key-Md5"
+ encoder.SetHeader(locationName).String(*v.SSECustomerKeyMD5)
+ }
+
+ if v.UploadId != nil {
+ encoder.SetQuery("uploadId").String(*v.UploadId)
+ }
+
+ return nil
+}
+
+func awsRestxml_serializeDocumentAbortIncompleteMultipartUpload(v *types.AbortIncompleteMultipartUpload, value smithyxml.Value) error {
+ defer value.Close()
+ if v.DaysAfterInitiation != 0 {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "DaysAfterInitiation",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.Integer(v.DaysAfterInitiation)
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentAccelerateConfiguration(v *types.AccelerateConfiguration, value smithyxml.Value) error {
+ defer value.Close()
+ if len(v.Status) > 0 {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Status",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(string(v.Status))
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentAccessControlPolicy(v *types.AccessControlPolicy, value smithyxml.Value) error {
+ defer value.Close()
+ if v.Grants != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "AccessControlList",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ if err := awsRestxml_serializeDocumentGrants(v.Grants, el); err != nil {
+ return err
+ }
+ }
+ if v.Owner != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Owner",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ if err := awsRestxml_serializeDocumentOwner(v.Owner, el); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentAccessControlTranslation(v *types.AccessControlTranslation, value smithyxml.Value) error {
+ defer value.Close()
+ if len(v.Owner) > 0 {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Owner",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(string(v.Owner))
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentAllowedHeaders(v []string, value smithyxml.Value) error {
+ var array *smithyxml.Array
+ if !value.IsFlattened() {
+ defer value.Close()
+ }
+ array = value.Array()
+ for i := range v {
+ am := array.Member()
+ am.String(v[i])
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentAllowedMethods(v []string, value smithyxml.Value) error {
+ var array *smithyxml.Array
+ if !value.IsFlattened() {
+ defer value.Close()
+ }
+ array = value.Array()
+ for i := range v {
+ am := array.Member()
+ am.String(v[i])
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentAllowedOrigins(v []string, value smithyxml.Value) error {
+ var array *smithyxml.Array
+ if !value.IsFlattened() {
+ defer value.Close()
+ }
+ array = value.Array()
+ for i := range v {
+ am := array.Member()
+ am.String(v[i])
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentAnalyticsAndOperator(v *types.AnalyticsAndOperator, value smithyxml.Value) error {
+ defer value.Close()
+ if v.Prefix != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Prefix",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(*v.Prefix)
+ }
+ if v.Tags != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Tag",
+ },
+ Attr: rootAttr,
+ }
+ el := value.FlattenedElement(root)
+ if err := awsRestxml_serializeDocumentTagSet(v.Tags, el); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentAnalyticsConfiguration(v *types.AnalyticsConfiguration, value smithyxml.Value) error {
+ defer value.Close()
+ if v.Filter != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Filter",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ if err := awsRestxml_serializeDocumentAnalyticsFilter(v.Filter, el); err != nil {
+ return err
+ }
+ }
+ if v.Id != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Id",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(*v.Id)
+ }
+ if v.StorageClassAnalysis != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "StorageClassAnalysis",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ if err := awsRestxml_serializeDocumentStorageClassAnalysis(v.StorageClassAnalysis, el); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentAnalyticsExportDestination(v *types.AnalyticsExportDestination, value smithyxml.Value) error {
+ defer value.Close()
+ if v.S3BucketDestination != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "S3BucketDestination",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ if err := awsRestxml_serializeDocumentAnalyticsS3BucketDestination(v.S3BucketDestination, el); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentAnalyticsFilter(v types.AnalyticsFilter, value smithyxml.Value) error {
+ defer value.Close()
+ switch uv := v.(type) {
+ case *types.AnalyticsFilterMemberAnd:
+ customMemberNameAttr := []smithyxml.Attr{}
+ customMemberName := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "And",
+ },
+ Attr: customMemberNameAttr,
+ }
+ av := value.MemberElement(customMemberName)
+ if err := awsRestxml_serializeDocumentAnalyticsAndOperator(&uv.Value, av); err != nil {
+ return err
+ }
+
+ case *types.AnalyticsFilterMemberPrefix:
+ customMemberNameAttr := []smithyxml.Attr{}
+ customMemberName := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Prefix",
+ },
+ Attr: customMemberNameAttr,
+ }
+ av := value.MemberElement(customMemberName)
+ av.String(uv.Value)
+
+ case *types.AnalyticsFilterMemberTag:
+ customMemberNameAttr := []smithyxml.Attr{}
+ customMemberName := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Tag",
+ },
+ Attr: customMemberNameAttr,
+ }
+ av := value.MemberElement(customMemberName)
+ if err := awsRestxml_serializeDocumentTag(&uv.Value, av); err != nil {
+ return err
+ }
+
+ default:
+ return fmt.Errorf("attempted to serialize unknown member type %T for union %T", uv, v)
+
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentAnalyticsS3BucketDestination(v *types.AnalyticsS3BucketDestination, value smithyxml.Value) error {
+ defer value.Close()
+ if v.Bucket != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Bucket",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(*v.Bucket)
+ }
+ if v.BucketAccountId != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "BucketAccountId",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(*v.BucketAccountId)
+ }
+ if len(v.Format) > 0 {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Format",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(string(v.Format))
+ }
+ if v.Prefix != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Prefix",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(*v.Prefix)
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentBucketLifecycleConfiguration(v *types.BucketLifecycleConfiguration, value smithyxml.Value) error {
+ defer value.Close()
+ if v.Rules != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Rule",
+ },
+ Attr: rootAttr,
+ }
+ el := value.FlattenedElement(root)
+ if err := awsRestxml_serializeDocumentLifecycleRules(v.Rules, el); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentBucketLoggingStatus(v *types.BucketLoggingStatus, value smithyxml.Value) error {
+ defer value.Close()
+ if v.LoggingEnabled != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "LoggingEnabled",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ if err := awsRestxml_serializeDocumentLoggingEnabled(v.LoggingEnabled, el); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentCompletedMultipartUpload(v *types.CompletedMultipartUpload, value smithyxml.Value) error {
+ defer value.Close()
+ if v.Parts != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Part",
+ },
+ Attr: rootAttr,
+ }
+ el := value.FlattenedElement(root)
+ if err := awsRestxml_serializeDocumentCompletedPartList(v.Parts, el); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentCompletedPart(v *types.CompletedPart, value smithyxml.Value) error {
+ defer value.Close()
+ if v.ETag != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "ETag",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(*v.ETag)
+ }
+ if v.PartNumber != 0 {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "PartNumber",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.Integer(v.PartNumber)
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentCompletedPartList(v []types.CompletedPart, value smithyxml.Value) error {
+ var array *smithyxml.Array
+ if !value.IsFlattened() {
+ defer value.Close()
+ }
+ array = value.Array()
+ for i := range v {
+ am := array.Member()
+ if err := awsRestxml_serializeDocumentCompletedPart(&v[i], am); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentCondition(v *types.Condition, value smithyxml.Value) error {
+ defer value.Close()
+ if v.HttpErrorCodeReturnedEquals != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "HttpErrorCodeReturnedEquals",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(*v.HttpErrorCodeReturnedEquals)
+ }
+ if v.KeyPrefixEquals != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "KeyPrefixEquals",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(*v.KeyPrefixEquals)
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentCORSConfiguration(v *types.CORSConfiguration, value smithyxml.Value) error {
+ defer value.Close()
+ if v.CORSRules != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "CORSRule",
+ },
+ Attr: rootAttr,
+ }
+ el := value.FlattenedElement(root)
+ if err := awsRestxml_serializeDocumentCORSRules(v.CORSRules, el); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentCORSRule(v *types.CORSRule, value smithyxml.Value) error {
+ defer value.Close()
+ if v.AllowedHeaders != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "AllowedHeader",
+ },
+ Attr: rootAttr,
+ }
+ el := value.FlattenedElement(root)
+ if err := awsRestxml_serializeDocumentAllowedHeaders(v.AllowedHeaders, el); err != nil {
+ return err
+ }
+ }
+ if v.AllowedMethods != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "AllowedMethod",
+ },
+ Attr: rootAttr,
+ }
+ el := value.FlattenedElement(root)
+ if err := awsRestxml_serializeDocumentAllowedMethods(v.AllowedMethods, el); err != nil {
+ return err
+ }
+ }
+ if v.AllowedOrigins != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "AllowedOrigin",
+ },
+ Attr: rootAttr,
+ }
+ el := value.FlattenedElement(root)
+ if err := awsRestxml_serializeDocumentAllowedOrigins(v.AllowedOrigins, el); err != nil {
+ return err
+ }
+ }
+ if v.ExposeHeaders != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "ExposeHeader",
+ },
+ Attr: rootAttr,
+ }
+ el := value.FlattenedElement(root)
+ if err := awsRestxml_serializeDocumentExposeHeaders(v.ExposeHeaders, el); err != nil {
+ return err
+ }
+ }
+ if v.MaxAgeSeconds != 0 {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "MaxAgeSeconds",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.Integer(v.MaxAgeSeconds)
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentCORSRules(v []types.CORSRule, value smithyxml.Value) error {
+ var array *smithyxml.Array
+ if !value.IsFlattened() {
+ defer value.Close()
+ }
+ array = value.Array()
+ for i := range v {
+ am := array.Member()
+ if err := awsRestxml_serializeDocumentCORSRule(&v[i], am); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentCreateBucketConfiguration(v *types.CreateBucketConfiguration, value smithyxml.Value) error {
+ defer value.Close()
+ if len(v.LocationConstraint) > 0 {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "LocationConstraint",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(string(v.LocationConstraint))
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentCSVInput(v *types.CSVInput, value smithyxml.Value) error {
+ defer value.Close()
+ if v.AllowQuotedRecordDelimiter {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "AllowQuotedRecordDelimiter",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.Boolean(v.AllowQuotedRecordDelimiter)
+ }
+ if v.Comments != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Comments",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(*v.Comments)
+ }
+ if v.FieldDelimiter != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "FieldDelimiter",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(*v.FieldDelimiter)
+ }
+ if len(v.FileHeaderInfo) > 0 {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "FileHeaderInfo",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(string(v.FileHeaderInfo))
+ }
+ if v.QuoteCharacter != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "QuoteCharacter",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(*v.QuoteCharacter)
+ }
+ if v.QuoteEscapeCharacter != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "QuoteEscapeCharacter",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(*v.QuoteEscapeCharacter)
+ }
+ if v.RecordDelimiter != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "RecordDelimiter",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(*v.RecordDelimiter)
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentCSVOutput(v *types.CSVOutput, value smithyxml.Value) error {
+ defer value.Close()
+ if v.FieldDelimiter != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "FieldDelimiter",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(*v.FieldDelimiter)
+ }
+ if v.QuoteCharacter != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "QuoteCharacter",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(*v.QuoteCharacter)
+ }
+ if v.QuoteEscapeCharacter != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "QuoteEscapeCharacter",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(*v.QuoteEscapeCharacter)
+ }
+ if len(v.QuoteFields) > 0 {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "QuoteFields",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(string(v.QuoteFields))
+ }
+ if v.RecordDelimiter != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "RecordDelimiter",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(*v.RecordDelimiter)
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentDefaultRetention(v *types.DefaultRetention, value smithyxml.Value) error {
+ defer value.Close()
+ if v.Days != 0 {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Days",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.Integer(v.Days)
+ }
+ if len(v.Mode) > 0 {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Mode",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(string(v.Mode))
+ }
+ if v.Years != 0 {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Years",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.Integer(v.Years)
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentDelete(v *types.Delete, value smithyxml.Value) error {
+ defer value.Close()
+ if v.Objects != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Object",
+ },
+ Attr: rootAttr,
+ }
+ el := value.FlattenedElement(root)
+ if err := awsRestxml_serializeDocumentObjectIdentifierList(v.Objects, el); err != nil {
+ return err
+ }
+ }
+ if v.Quiet {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Quiet",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.Boolean(v.Quiet)
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentDeleteMarkerReplication(v *types.DeleteMarkerReplication, value smithyxml.Value) error {
+ defer value.Close()
+ if len(v.Status) > 0 {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Status",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(string(v.Status))
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentDestination(v *types.Destination, value smithyxml.Value) error {
+ defer value.Close()
+ if v.AccessControlTranslation != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "AccessControlTranslation",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ if err := awsRestxml_serializeDocumentAccessControlTranslation(v.AccessControlTranslation, el); err != nil {
+ return err
+ }
+ }
+ if v.Account != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Account",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(*v.Account)
+ }
+ if v.Bucket != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Bucket",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(*v.Bucket)
+ }
+ if v.EncryptionConfiguration != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "EncryptionConfiguration",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ if err := awsRestxml_serializeDocumentEncryptionConfiguration(v.EncryptionConfiguration, el); err != nil {
+ return err
+ }
+ }
+ if v.Metrics != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Metrics",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ if err := awsRestxml_serializeDocumentMetrics(v.Metrics, el); err != nil {
+ return err
+ }
+ }
+ if v.ReplicationTime != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "ReplicationTime",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ if err := awsRestxml_serializeDocumentReplicationTime(v.ReplicationTime, el); err != nil {
+ return err
+ }
+ }
+ if len(v.StorageClass) > 0 {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "StorageClass",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(string(v.StorageClass))
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentEncryption(v *types.Encryption, value smithyxml.Value) error {
+ defer value.Close()
+ if len(v.EncryptionType) > 0 {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "EncryptionType",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(string(v.EncryptionType))
+ }
+ if v.KMSContext != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "KMSContext",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(*v.KMSContext)
+ }
+ if v.KMSKeyId != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "KMSKeyId",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(*v.KMSKeyId)
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentEncryptionConfiguration(v *types.EncryptionConfiguration, value smithyxml.Value) error {
+ defer value.Close()
+ if v.ReplicaKmsKeyID != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "ReplicaKmsKeyID",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(*v.ReplicaKmsKeyID)
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentErrorDocument(v *types.ErrorDocument, value smithyxml.Value) error {
+ defer value.Close()
+ if v.Key != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Key",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(*v.Key)
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentEventList(v []types.Event, value smithyxml.Value) error {
+ var array *smithyxml.Array
+ if !value.IsFlattened() {
+ defer value.Close()
+ }
+ array = value.Array()
+ for i := range v {
+ am := array.Member()
+ am.String(string(v[i]))
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentExistingObjectReplication(v *types.ExistingObjectReplication, value smithyxml.Value) error {
+ defer value.Close()
+ if len(v.Status) > 0 {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Status",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(string(v.Status))
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentExposeHeaders(v []string, value smithyxml.Value) error {
+ var array *smithyxml.Array
+ if !value.IsFlattened() {
+ defer value.Close()
+ }
+ array = value.Array()
+ for i := range v {
+ am := array.Member()
+ am.String(v[i])
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentFilterRule(v *types.FilterRule, value smithyxml.Value) error {
+ defer value.Close()
+ if len(v.Name) > 0 {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Name",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(string(v.Name))
+ }
+ if v.Value != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Value",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(*v.Value)
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentFilterRuleList(v []types.FilterRule, value smithyxml.Value) error {
+ var array *smithyxml.Array
+ if !value.IsFlattened() {
+ defer value.Close()
+ }
+ array = value.Array()
+ for i := range v {
+ am := array.Member()
+ if err := awsRestxml_serializeDocumentFilterRule(&v[i], am); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentGlacierJobParameters(v *types.GlacierJobParameters, value smithyxml.Value) error {
+ defer value.Close()
+ if len(v.Tier) > 0 {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Tier",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(string(v.Tier))
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentGrant(v *types.Grant, value smithyxml.Value) error {
+ defer value.Close()
+ if v.Grantee != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Grantee",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ if err := awsRestxml_serializeDocumentGrantee(v.Grantee, el); err != nil {
+ return err
+ }
+ }
+ if len(v.Permission) > 0 {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Permission",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(string(v.Permission))
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentGrantee(v *types.Grantee, value smithyxml.Value) error {
+ defer value.Close()
+ if v.DisplayName != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "DisplayName",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(*v.DisplayName)
+ }
+ if v.EmailAddress != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "EmailAddress",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(*v.EmailAddress)
+ }
+ if v.ID != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "ID",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(*v.ID)
+ }
+ if v.URI != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "URI",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(*v.URI)
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentGrants(v []types.Grant, value smithyxml.Value) error {
+ var array *smithyxml.Array
+ if !value.IsFlattened() {
+ defer value.Close()
+ }
+ customMemberNameAttr := []smithyxml.Attr{}
+ customMemberName := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Grant",
+ },
+ Attr: customMemberNameAttr,
+ }
+ array = value.ArrayWithCustomName(customMemberName)
+ for i := range v {
+ am := array.Member()
+ if err := awsRestxml_serializeDocumentGrant(&v[i], am); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentIndexDocument(v *types.IndexDocument, value smithyxml.Value) error {
+ defer value.Close()
+ if v.Suffix != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Suffix",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(*v.Suffix)
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentInputSerialization(v *types.InputSerialization, value smithyxml.Value) error {
+ defer value.Close()
+ if len(v.CompressionType) > 0 {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "CompressionType",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(string(v.CompressionType))
+ }
+ if v.CSV != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "CSV",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ if err := awsRestxml_serializeDocumentCSVInput(v.CSV, el); err != nil {
+ return err
+ }
+ }
+ if v.JSON != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "JSON",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ if err := awsRestxml_serializeDocumentJSONInput(v.JSON, el); err != nil {
+ return err
+ }
+ }
+ if v.Parquet != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Parquet",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ if err := awsRestxml_serializeDocumentParquetInput(v.Parquet, el); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentIntelligentTieringAndOperator(v *types.IntelligentTieringAndOperator, value smithyxml.Value) error {
+ defer value.Close()
+ if v.Prefix != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Prefix",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(*v.Prefix)
+ }
+ if v.Tags != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Tag",
+ },
+ Attr: rootAttr,
+ }
+ el := value.FlattenedElement(root)
+ if err := awsRestxml_serializeDocumentTagSet(v.Tags, el); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentIntelligentTieringConfiguration(v *types.IntelligentTieringConfiguration, value smithyxml.Value) error {
+ defer value.Close()
+ if v.Filter != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Filter",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ if err := awsRestxml_serializeDocumentIntelligentTieringFilter(v.Filter, el); err != nil {
+ return err
+ }
+ }
+ if v.Id != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Id",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(*v.Id)
+ }
+ if len(v.Status) > 0 {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Status",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(string(v.Status))
+ }
+ if v.Tierings != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Tiering",
+ },
+ Attr: rootAttr,
+ }
+ el := value.FlattenedElement(root)
+ if err := awsRestxml_serializeDocumentTieringList(v.Tierings, el); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentIntelligentTieringFilter(v *types.IntelligentTieringFilter, value smithyxml.Value) error {
+ defer value.Close()
+ if v.And != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "And",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ if err := awsRestxml_serializeDocumentIntelligentTieringAndOperator(v.And, el); err != nil {
+ return err
+ }
+ }
+ if v.Prefix != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Prefix",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(*v.Prefix)
+ }
+ if v.Tag != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Tag",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ if err := awsRestxml_serializeDocumentTag(v.Tag, el); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentInventoryConfiguration(v *types.InventoryConfiguration, value smithyxml.Value) error {
+ defer value.Close()
+ if v.Destination != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Destination",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ if err := awsRestxml_serializeDocumentInventoryDestination(v.Destination, el); err != nil {
+ return err
+ }
+ }
+ if v.Filter != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Filter",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ if err := awsRestxml_serializeDocumentInventoryFilter(v.Filter, el); err != nil {
+ return err
+ }
+ }
+ if v.Id != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Id",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(*v.Id)
+ }
+ if len(v.IncludedObjectVersions) > 0 {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "IncludedObjectVersions",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(string(v.IncludedObjectVersions))
+ }
+ {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "IsEnabled",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.Boolean(v.IsEnabled)
+ }
+ if v.OptionalFields != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "OptionalFields",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ if err := awsRestxml_serializeDocumentInventoryOptionalFields(v.OptionalFields, el); err != nil {
+ return err
+ }
+ }
+ if v.Schedule != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Schedule",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ if err := awsRestxml_serializeDocumentInventorySchedule(v.Schedule, el); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentInventoryDestination(v *types.InventoryDestination, value smithyxml.Value) error {
+ defer value.Close()
+ if v.S3BucketDestination != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "S3BucketDestination",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ if err := awsRestxml_serializeDocumentInventoryS3BucketDestination(v.S3BucketDestination, el); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentInventoryEncryption(v *types.InventoryEncryption, value smithyxml.Value) error {
+ defer value.Close()
+ if v.SSEKMS != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "SSE-KMS",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ if err := awsRestxml_serializeDocumentSSEKMS(v.SSEKMS, el); err != nil {
+ return err
+ }
+ }
+ if v.SSES3 != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "SSE-S3",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ if err := awsRestxml_serializeDocumentSSES3(v.SSES3, el); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentInventoryFilter(v *types.InventoryFilter, value smithyxml.Value) error {
+ defer value.Close()
+ if v.Prefix != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Prefix",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(*v.Prefix)
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentInventoryOptionalFields(v []types.InventoryOptionalField, value smithyxml.Value) error {
+ var array *smithyxml.Array
+ if !value.IsFlattened() {
+ defer value.Close()
+ }
+ customMemberNameAttr := []smithyxml.Attr{}
+ customMemberName := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Field",
+ },
+ Attr: customMemberNameAttr,
+ }
+ array = value.ArrayWithCustomName(customMemberName)
+ for i := range v {
+ am := array.Member()
+ am.String(string(v[i]))
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentInventoryS3BucketDestination(v *types.InventoryS3BucketDestination, value smithyxml.Value) error {
+ defer value.Close()
+ if v.AccountId != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "AccountId",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(*v.AccountId)
+ }
+ if v.Bucket != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Bucket",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(*v.Bucket)
+ }
+ if v.Encryption != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Encryption",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ if err := awsRestxml_serializeDocumentInventoryEncryption(v.Encryption, el); err != nil {
+ return err
+ }
+ }
+ if len(v.Format) > 0 {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Format",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(string(v.Format))
+ }
+ if v.Prefix != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Prefix",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(*v.Prefix)
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentInventorySchedule(v *types.InventorySchedule, value smithyxml.Value) error {
+ defer value.Close()
+ if len(v.Frequency) > 0 {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Frequency",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(string(v.Frequency))
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentJSONInput(v *types.JSONInput, value smithyxml.Value) error {
+ defer value.Close()
+ if len(v.Type) > 0 {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Type",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(string(v.Type))
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentJSONOutput(v *types.JSONOutput, value smithyxml.Value) error {
+ defer value.Close()
+ if v.RecordDelimiter != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "RecordDelimiter",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(*v.RecordDelimiter)
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentLambdaFunctionConfiguration(v *types.LambdaFunctionConfiguration, value smithyxml.Value) error {
+ defer value.Close()
+ if v.Events != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Event",
+ },
+ Attr: rootAttr,
+ }
+ el := value.FlattenedElement(root)
+ if err := awsRestxml_serializeDocumentEventList(v.Events, el); err != nil {
+ return err
+ }
+ }
+ if v.Filter != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Filter",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ if err := awsRestxml_serializeDocumentNotificationConfigurationFilter(v.Filter, el); err != nil {
+ return err
+ }
+ }
+ if v.Id != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Id",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(*v.Id)
+ }
+ if v.LambdaFunctionArn != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "CloudFunction",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(*v.LambdaFunctionArn)
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentLambdaFunctionConfigurationList(v []types.LambdaFunctionConfiguration, value smithyxml.Value) error {
+ var array *smithyxml.Array
+ if !value.IsFlattened() {
+ defer value.Close()
+ }
+ array = value.Array()
+ for i := range v {
+ am := array.Member()
+ if err := awsRestxml_serializeDocumentLambdaFunctionConfiguration(&v[i], am); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentLifecycleExpiration(v *types.LifecycleExpiration, value smithyxml.Value) error {
+ defer value.Close()
+ if v.Date != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Date",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(smithytime.FormatDateTime(*v.Date))
+ }
+ if v.Days != 0 {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Days",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.Integer(v.Days)
+ }
+ if v.ExpiredObjectDeleteMarker {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "ExpiredObjectDeleteMarker",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.Boolean(v.ExpiredObjectDeleteMarker)
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentLifecycleRule(v *types.LifecycleRule, value smithyxml.Value) error {
+ defer value.Close()
+ if v.AbortIncompleteMultipartUpload != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "AbortIncompleteMultipartUpload",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ if err := awsRestxml_serializeDocumentAbortIncompleteMultipartUpload(v.AbortIncompleteMultipartUpload, el); err != nil {
+ return err
+ }
+ }
+ if v.Expiration != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Expiration",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ if err := awsRestxml_serializeDocumentLifecycleExpiration(v.Expiration, el); err != nil {
+ return err
+ }
+ }
+ if v.Filter != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Filter",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ if err := awsRestxml_serializeDocumentLifecycleRuleFilter(v.Filter, el); err != nil {
+ return err
+ }
+ }
+ if v.ID != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "ID",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(*v.ID)
+ }
+ if v.NoncurrentVersionExpiration != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "NoncurrentVersionExpiration",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ if err := awsRestxml_serializeDocumentNoncurrentVersionExpiration(v.NoncurrentVersionExpiration, el); err != nil {
+ return err
+ }
+ }
+ if v.NoncurrentVersionTransitions != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "NoncurrentVersionTransition",
+ },
+ Attr: rootAttr,
+ }
+ el := value.FlattenedElement(root)
+ if err := awsRestxml_serializeDocumentNoncurrentVersionTransitionList(v.NoncurrentVersionTransitions, el); err != nil {
+ return err
+ }
+ }
+ if v.Prefix != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Prefix",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(*v.Prefix)
+ }
+ if len(v.Status) > 0 {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Status",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(string(v.Status))
+ }
+ if v.Transitions != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Transition",
+ },
+ Attr: rootAttr,
+ }
+ el := value.FlattenedElement(root)
+ if err := awsRestxml_serializeDocumentTransitionList(v.Transitions, el); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentLifecycleRuleAndOperator(v *types.LifecycleRuleAndOperator, value smithyxml.Value) error {
+ defer value.Close()
+ if v.Prefix != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Prefix",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(*v.Prefix)
+ }
+ if v.Tags != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Tag",
+ },
+ Attr: rootAttr,
+ }
+ el := value.FlattenedElement(root)
+ if err := awsRestxml_serializeDocumentTagSet(v.Tags, el); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentLifecycleRuleFilter(v types.LifecycleRuleFilter, value smithyxml.Value) error {
+ defer value.Close()
+ switch uv := v.(type) {
+ case *types.LifecycleRuleFilterMemberAnd:
+ customMemberNameAttr := []smithyxml.Attr{}
+ customMemberName := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "And",
+ },
+ Attr: customMemberNameAttr,
+ }
+ av := value.MemberElement(customMemberName)
+ if err := awsRestxml_serializeDocumentLifecycleRuleAndOperator(&uv.Value, av); err != nil {
+ return err
+ }
+
+ case *types.LifecycleRuleFilterMemberPrefix:
+ customMemberNameAttr := []smithyxml.Attr{}
+ customMemberName := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Prefix",
+ },
+ Attr: customMemberNameAttr,
+ }
+ av := value.MemberElement(customMemberName)
+ av.String(uv.Value)
+
+ case *types.LifecycleRuleFilterMemberTag:
+ customMemberNameAttr := []smithyxml.Attr{}
+ customMemberName := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Tag",
+ },
+ Attr: customMemberNameAttr,
+ }
+ av := value.MemberElement(customMemberName)
+ if err := awsRestxml_serializeDocumentTag(&uv.Value, av); err != nil {
+ return err
+ }
+
+ default:
+ return fmt.Errorf("attempted to serialize unknown member type %T for union %T", uv, v)
+
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentLifecycleRules(v []types.LifecycleRule, value smithyxml.Value) error {
+ var array *smithyxml.Array
+ if !value.IsFlattened() {
+ defer value.Close()
+ }
+ array = value.Array()
+ for i := range v {
+ am := array.Member()
+ if err := awsRestxml_serializeDocumentLifecycleRule(&v[i], am); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentLoggingEnabled(v *types.LoggingEnabled, value smithyxml.Value) error {
+ defer value.Close()
+ if v.TargetBucket != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "TargetBucket",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(*v.TargetBucket)
+ }
+ if v.TargetGrants != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "TargetGrants",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ if err := awsRestxml_serializeDocumentTargetGrants(v.TargetGrants, el); err != nil {
+ return err
+ }
+ }
+ if v.TargetPrefix != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "TargetPrefix",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(*v.TargetPrefix)
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentMetadataEntry(v *types.MetadataEntry, value smithyxml.Value) error {
+ defer value.Close()
+ if v.Name != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Name",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(*v.Name)
+ }
+ if v.Value != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Value",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(*v.Value)
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentMetrics(v *types.Metrics, value smithyxml.Value) error {
+ defer value.Close()
+ if v.EventThreshold != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "EventThreshold",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ if err := awsRestxml_serializeDocumentReplicationTimeValue(v.EventThreshold, el); err != nil {
+ return err
+ }
+ }
+ if len(v.Status) > 0 {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Status",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(string(v.Status))
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentMetricsAndOperator(v *types.MetricsAndOperator, value smithyxml.Value) error {
+ defer value.Close()
+ if v.Prefix != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Prefix",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(*v.Prefix)
+ }
+ if v.Tags != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Tag",
+ },
+ Attr: rootAttr,
+ }
+ el := value.FlattenedElement(root)
+ if err := awsRestxml_serializeDocumentTagSet(v.Tags, el); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentMetricsConfiguration(v *types.MetricsConfiguration, value smithyxml.Value) error {
+ defer value.Close()
+ if v.Filter != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Filter",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ if err := awsRestxml_serializeDocumentMetricsFilter(v.Filter, el); err != nil {
+ return err
+ }
+ }
+ if v.Id != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Id",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(*v.Id)
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentMetricsFilter(v types.MetricsFilter, value smithyxml.Value) error {
+ defer value.Close()
+ switch uv := v.(type) {
+ case *types.MetricsFilterMemberAnd:
+ customMemberNameAttr := []smithyxml.Attr{}
+ customMemberName := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "And",
+ },
+ Attr: customMemberNameAttr,
+ }
+ av := value.MemberElement(customMemberName)
+ if err := awsRestxml_serializeDocumentMetricsAndOperator(&uv.Value, av); err != nil {
+ return err
+ }
+
+ case *types.MetricsFilterMemberPrefix:
+ customMemberNameAttr := []smithyxml.Attr{}
+ customMemberName := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Prefix",
+ },
+ Attr: customMemberNameAttr,
+ }
+ av := value.MemberElement(customMemberName)
+ av.String(uv.Value)
+
+ case *types.MetricsFilterMemberTag:
+ customMemberNameAttr := []smithyxml.Attr{}
+ customMemberName := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Tag",
+ },
+ Attr: customMemberNameAttr,
+ }
+ av := value.MemberElement(customMemberName)
+ if err := awsRestxml_serializeDocumentTag(&uv.Value, av); err != nil {
+ return err
+ }
+
+ default:
+ return fmt.Errorf("attempted to serialize unknown member type %T for union %T", uv, v)
+
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentNoncurrentVersionExpiration(v *types.NoncurrentVersionExpiration, value smithyxml.Value) error {
+ defer value.Close()
+ if v.NoncurrentDays != 0 {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "NoncurrentDays",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.Integer(v.NoncurrentDays)
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentNoncurrentVersionTransition(v *types.NoncurrentVersionTransition, value smithyxml.Value) error {
+ defer value.Close()
+ if v.NoncurrentDays != 0 {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "NoncurrentDays",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.Integer(v.NoncurrentDays)
+ }
+ if len(v.StorageClass) > 0 {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "StorageClass",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(string(v.StorageClass))
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentNoncurrentVersionTransitionList(v []types.NoncurrentVersionTransition, value smithyxml.Value) error {
+ var array *smithyxml.Array
+ if !value.IsFlattened() {
+ defer value.Close()
+ }
+ array = value.Array()
+ for i := range v {
+ am := array.Member()
+ if err := awsRestxml_serializeDocumentNoncurrentVersionTransition(&v[i], am); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentNotificationConfiguration(v *types.NotificationConfiguration, value smithyxml.Value) error {
+ defer value.Close()
+ if v.LambdaFunctionConfigurations != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "CloudFunctionConfiguration",
+ },
+ Attr: rootAttr,
+ }
+ el := value.FlattenedElement(root)
+ if err := awsRestxml_serializeDocumentLambdaFunctionConfigurationList(v.LambdaFunctionConfigurations, el); err != nil {
+ return err
+ }
+ }
+ if v.QueueConfigurations != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "QueueConfiguration",
+ },
+ Attr: rootAttr,
+ }
+ el := value.FlattenedElement(root)
+ if err := awsRestxml_serializeDocumentQueueConfigurationList(v.QueueConfigurations, el); err != nil {
+ return err
+ }
+ }
+ if v.TopicConfigurations != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "TopicConfiguration",
+ },
+ Attr: rootAttr,
+ }
+ el := value.FlattenedElement(root)
+ if err := awsRestxml_serializeDocumentTopicConfigurationList(v.TopicConfigurations, el); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentNotificationConfigurationFilter(v *types.NotificationConfigurationFilter, value smithyxml.Value) error {
+ defer value.Close()
+ if v.Key != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "S3Key",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ if err := awsRestxml_serializeDocumentS3KeyFilter(v.Key, el); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentObjectIdentifier(v *types.ObjectIdentifier, value smithyxml.Value) error {
+ defer value.Close()
+ if v.Key != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Key",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(*v.Key)
+ }
+ if v.VersionId != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "VersionId",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(*v.VersionId)
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentObjectIdentifierList(v []types.ObjectIdentifier, value smithyxml.Value) error {
+ var array *smithyxml.Array
+ if !value.IsFlattened() {
+ defer value.Close()
+ }
+ array = value.Array()
+ for i := range v {
+ am := array.Member()
+ if err := awsRestxml_serializeDocumentObjectIdentifier(&v[i], am); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentObjectLockConfiguration(v *types.ObjectLockConfiguration, value smithyxml.Value) error {
+ defer value.Close()
+ if len(v.ObjectLockEnabled) > 0 {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "ObjectLockEnabled",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(string(v.ObjectLockEnabled))
+ }
+ if v.Rule != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Rule",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ if err := awsRestxml_serializeDocumentObjectLockRule(v.Rule, el); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentObjectLockLegalHold(v *types.ObjectLockLegalHold, value smithyxml.Value) error {
+ defer value.Close()
+ if len(v.Status) > 0 {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Status",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(string(v.Status))
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentObjectLockRetention(v *types.ObjectLockRetention, value smithyxml.Value) error {
+ defer value.Close()
+ if len(v.Mode) > 0 {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Mode",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(string(v.Mode))
+ }
+ if v.RetainUntilDate != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "RetainUntilDate",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(smithytime.FormatDateTime(*v.RetainUntilDate))
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentObjectLockRule(v *types.ObjectLockRule, value smithyxml.Value) error {
+ defer value.Close()
+ if v.DefaultRetention != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "DefaultRetention",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ if err := awsRestxml_serializeDocumentDefaultRetention(v.DefaultRetention, el); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentOutputLocation(v *types.OutputLocation, value smithyxml.Value) error {
+ defer value.Close()
+ if v.S3 != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "S3",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ if err := awsRestxml_serializeDocumentS3Location(v.S3, el); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentOutputSerialization(v *types.OutputSerialization, value smithyxml.Value) error {
+ defer value.Close()
+ if v.CSV != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "CSV",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ if err := awsRestxml_serializeDocumentCSVOutput(v.CSV, el); err != nil {
+ return err
+ }
+ }
+ if v.JSON != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "JSON",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ if err := awsRestxml_serializeDocumentJSONOutput(v.JSON, el); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentOwner(v *types.Owner, value smithyxml.Value) error {
+ defer value.Close()
+ if v.DisplayName != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "DisplayName",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(*v.DisplayName)
+ }
+ if v.ID != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "ID",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(*v.ID)
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentOwnershipControls(v *types.OwnershipControls, value smithyxml.Value) error {
+ defer value.Close()
+ if v.Rules != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Rule",
+ },
+ Attr: rootAttr,
+ }
+ el := value.FlattenedElement(root)
+ if err := awsRestxml_serializeDocumentOwnershipControlsRules(v.Rules, el); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentOwnershipControlsRule(v *types.OwnershipControlsRule, value smithyxml.Value) error {
+ defer value.Close()
+ if len(v.ObjectOwnership) > 0 {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "ObjectOwnership",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(string(v.ObjectOwnership))
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentOwnershipControlsRules(v []types.OwnershipControlsRule, value smithyxml.Value) error {
+ var array *smithyxml.Array
+ if !value.IsFlattened() {
+ defer value.Close()
+ }
+ array = value.Array()
+ for i := range v {
+ am := array.Member()
+ if err := awsRestxml_serializeDocumentOwnershipControlsRule(&v[i], am); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentParquetInput(v *types.ParquetInput, value smithyxml.Value) error {
+ defer value.Close()
+ return nil
+}
+
+func awsRestxml_serializeDocumentPublicAccessBlockConfiguration(v *types.PublicAccessBlockConfiguration, value smithyxml.Value) error {
+ defer value.Close()
+ if v.BlockPublicAcls {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "BlockPublicAcls",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.Boolean(v.BlockPublicAcls)
+ }
+ if v.BlockPublicPolicy {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "BlockPublicPolicy",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.Boolean(v.BlockPublicPolicy)
+ }
+ if v.IgnorePublicAcls {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "IgnorePublicAcls",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.Boolean(v.IgnorePublicAcls)
+ }
+ if v.RestrictPublicBuckets {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "RestrictPublicBuckets",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.Boolean(v.RestrictPublicBuckets)
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentQueueConfiguration(v *types.QueueConfiguration, value smithyxml.Value) error {
+ defer value.Close()
+ if v.Events != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Event",
+ },
+ Attr: rootAttr,
+ }
+ el := value.FlattenedElement(root)
+ if err := awsRestxml_serializeDocumentEventList(v.Events, el); err != nil {
+ return err
+ }
+ }
+ if v.Filter != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Filter",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ if err := awsRestxml_serializeDocumentNotificationConfigurationFilter(v.Filter, el); err != nil {
+ return err
+ }
+ }
+ if v.Id != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Id",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(*v.Id)
+ }
+ if v.QueueArn != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Queue",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(*v.QueueArn)
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentQueueConfigurationList(v []types.QueueConfiguration, value smithyxml.Value) error {
+ var array *smithyxml.Array
+ if !value.IsFlattened() {
+ defer value.Close()
+ }
+ array = value.Array()
+ for i := range v {
+ am := array.Member()
+ if err := awsRestxml_serializeDocumentQueueConfiguration(&v[i], am); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentRedirect(v *types.Redirect, value smithyxml.Value) error {
+ defer value.Close()
+ if v.HostName != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "HostName",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(*v.HostName)
+ }
+ if v.HttpRedirectCode != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "HttpRedirectCode",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(*v.HttpRedirectCode)
+ }
+ if len(v.Protocol) > 0 {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Protocol",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(string(v.Protocol))
+ }
+ if v.ReplaceKeyPrefixWith != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "ReplaceKeyPrefixWith",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(*v.ReplaceKeyPrefixWith)
+ }
+ if v.ReplaceKeyWith != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "ReplaceKeyWith",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(*v.ReplaceKeyWith)
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentRedirectAllRequestsTo(v *types.RedirectAllRequestsTo, value smithyxml.Value) error {
+ defer value.Close()
+ if v.HostName != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "HostName",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(*v.HostName)
+ }
+ if len(v.Protocol) > 0 {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Protocol",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(string(v.Protocol))
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentReplicaModifications(v *types.ReplicaModifications, value smithyxml.Value) error {
+ defer value.Close()
+ if len(v.Status) > 0 {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Status",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(string(v.Status))
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentReplicationConfiguration(v *types.ReplicationConfiguration, value smithyxml.Value) error {
+ defer value.Close()
+ if v.Role != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Role",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(*v.Role)
+ }
+ if v.Rules != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Rule",
+ },
+ Attr: rootAttr,
+ }
+ el := value.FlattenedElement(root)
+ if err := awsRestxml_serializeDocumentReplicationRules(v.Rules, el); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentReplicationRule(v *types.ReplicationRule, value smithyxml.Value) error {
+ defer value.Close()
+ if v.DeleteMarkerReplication != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "DeleteMarkerReplication",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ if err := awsRestxml_serializeDocumentDeleteMarkerReplication(v.DeleteMarkerReplication, el); err != nil {
+ return err
+ }
+ }
+ if v.Destination != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Destination",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ if err := awsRestxml_serializeDocumentDestination(v.Destination, el); err != nil {
+ return err
+ }
+ }
+ if v.ExistingObjectReplication != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "ExistingObjectReplication",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ if err := awsRestxml_serializeDocumentExistingObjectReplication(v.ExistingObjectReplication, el); err != nil {
+ return err
+ }
+ }
+ if v.Filter != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Filter",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ if err := awsRestxml_serializeDocumentReplicationRuleFilter(v.Filter, el); err != nil {
+ return err
+ }
+ }
+ if v.ID != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "ID",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(*v.ID)
+ }
+ if v.Prefix != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Prefix",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(*v.Prefix)
+ }
+ if v.Priority != 0 {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Priority",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.Integer(v.Priority)
+ }
+ if v.SourceSelectionCriteria != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "SourceSelectionCriteria",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ if err := awsRestxml_serializeDocumentSourceSelectionCriteria(v.SourceSelectionCriteria, el); err != nil {
+ return err
+ }
+ }
+ if len(v.Status) > 0 {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Status",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(string(v.Status))
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentReplicationRuleAndOperator(v *types.ReplicationRuleAndOperator, value smithyxml.Value) error {
+ defer value.Close()
+ if v.Prefix != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Prefix",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(*v.Prefix)
+ }
+ if v.Tags != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Tag",
+ },
+ Attr: rootAttr,
+ }
+ el := value.FlattenedElement(root)
+ if err := awsRestxml_serializeDocumentTagSet(v.Tags, el); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentReplicationRuleFilter(v types.ReplicationRuleFilter, value smithyxml.Value) error {
+ defer value.Close()
+ switch uv := v.(type) {
+ case *types.ReplicationRuleFilterMemberAnd:
+ customMemberNameAttr := []smithyxml.Attr{}
+ customMemberName := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "And",
+ },
+ Attr: customMemberNameAttr,
+ }
+ av := value.MemberElement(customMemberName)
+ if err := awsRestxml_serializeDocumentReplicationRuleAndOperator(&uv.Value, av); err != nil {
+ return err
+ }
+
+ case *types.ReplicationRuleFilterMemberPrefix:
+ customMemberNameAttr := []smithyxml.Attr{}
+ customMemberName := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Prefix",
+ },
+ Attr: customMemberNameAttr,
+ }
+ av := value.MemberElement(customMemberName)
+ av.String(uv.Value)
+
+ case *types.ReplicationRuleFilterMemberTag:
+ customMemberNameAttr := []smithyxml.Attr{}
+ customMemberName := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Tag",
+ },
+ Attr: customMemberNameAttr,
+ }
+ av := value.MemberElement(customMemberName)
+ if err := awsRestxml_serializeDocumentTag(&uv.Value, av); err != nil {
+ return err
+ }
+
+ default:
+ return fmt.Errorf("attempted to serialize unknown member type %T for union %T", uv, v)
+
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentReplicationRules(v []types.ReplicationRule, value smithyxml.Value) error {
+ var array *smithyxml.Array
+ if !value.IsFlattened() {
+ defer value.Close()
+ }
+ array = value.Array()
+ for i := range v {
+ am := array.Member()
+ if err := awsRestxml_serializeDocumentReplicationRule(&v[i], am); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentReplicationTime(v *types.ReplicationTime, value smithyxml.Value) error {
+ defer value.Close()
+ if len(v.Status) > 0 {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Status",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(string(v.Status))
+ }
+ if v.Time != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Time",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ if err := awsRestxml_serializeDocumentReplicationTimeValue(v.Time, el); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentReplicationTimeValue(v *types.ReplicationTimeValue, value smithyxml.Value) error {
+ defer value.Close()
+ if v.Minutes != 0 {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Minutes",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.Integer(v.Minutes)
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentRequestPaymentConfiguration(v *types.RequestPaymentConfiguration, value smithyxml.Value) error {
+ defer value.Close()
+ if len(v.Payer) > 0 {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Payer",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(string(v.Payer))
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentRestoreRequest(v *types.RestoreRequest, value smithyxml.Value) error {
+ defer value.Close()
+ if v.Days != 0 {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Days",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.Integer(v.Days)
+ }
+ if v.Description != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Description",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(*v.Description)
+ }
+ if v.GlacierJobParameters != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "GlacierJobParameters",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ if err := awsRestxml_serializeDocumentGlacierJobParameters(v.GlacierJobParameters, el); err != nil {
+ return err
+ }
+ }
+ if v.OutputLocation != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "OutputLocation",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ if err := awsRestxml_serializeDocumentOutputLocation(v.OutputLocation, el); err != nil {
+ return err
+ }
+ }
+ if v.SelectParameters != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "SelectParameters",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ if err := awsRestxml_serializeDocumentSelectParameters(v.SelectParameters, el); err != nil {
+ return err
+ }
+ }
+ if len(v.Tier) > 0 {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Tier",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(string(v.Tier))
+ }
+ if len(v.Type) > 0 {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Type",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(string(v.Type))
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentRoutingRule(v *types.RoutingRule, value smithyxml.Value) error {
+ defer value.Close()
+ if v.Condition != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Condition",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ if err := awsRestxml_serializeDocumentCondition(v.Condition, el); err != nil {
+ return err
+ }
+ }
+ if v.Redirect != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Redirect",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ if err := awsRestxml_serializeDocumentRedirect(v.Redirect, el); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentRoutingRules(v []types.RoutingRule, value smithyxml.Value) error {
+ var array *smithyxml.Array
+ if !value.IsFlattened() {
+ defer value.Close()
+ }
+ customMemberNameAttr := []smithyxml.Attr{}
+ customMemberName := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "RoutingRule",
+ },
+ Attr: customMemberNameAttr,
+ }
+ array = value.ArrayWithCustomName(customMemberName)
+ for i := range v {
+ am := array.Member()
+ if err := awsRestxml_serializeDocumentRoutingRule(&v[i], am); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentS3KeyFilter(v *types.S3KeyFilter, value smithyxml.Value) error {
+ defer value.Close()
+ if v.FilterRules != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "FilterRule",
+ },
+ Attr: rootAttr,
+ }
+ el := value.FlattenedElement(root)
+ if err := awsRestxml_serializeDocumentFilterRuleList(v.FilterRules, el); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentS3Location(v *types.S3Location, value smithyxml.Value) error {
+ defer value.Close()
+ if v.AccessControlList != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "AccessControlList",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ if err := awsRestxml_serializeDocumentGrants(v.AccessControlList, el); err != nil {
+ return err
+ }
+ }
+ if v.BucketName != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "BucketName",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(*v.BucketName)
+ }
+ if len(v.CannedACL) > 0 {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "CannedACL",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(string(v.CannedACL))
+ }
+ if v.Encryption != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Encryption",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ if err := awsRestxml_serializeDocumentEncryption(v.Encryption, el); err != nil {
+ return err
+ }
+ }
+ if v.Prefix != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Prefix",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(*v.Prefix)
+ }
+ if len(v.StorageClass) > 0 {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "StorageClass",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(string(v.StorageClass))
+ }
+ if v.Tagging != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Tagging",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ if err := awsRestxml_serializeDocumentTagging(v.Tagging, el); err != nil {
+ return err
+ }
+ }
+ if v.UserMetadata != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "UserMetadata",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ if err := awsRestxml_serializeDocumentUserMetadata(v.UserMetadata, el); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentSelectParameters(v *types.SelectParameters, value smithyxml.Value) error {
+ defer value.Close()
+ if v.Expression != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Expression",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(*v.Expression)
+ }
+ if len(v.ExpressionType) > 0 {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "ExpressionType",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(string(v.ExpressionType))
+ }
+ if v.InputSerialization != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "InputSerialization",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ if err := awsRestxml_serializeDocumentInputSerialization(v.InputSerialization, el); err != nil {
+ return err
+ }
+ }
+ if v.OutputSerialization != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "OutputSerialization",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ if err := awsRestxml_serializeDocumentOutputSerialization(v.OutputSerialization, el); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentServerSideEncryptionByDefault(v *types.ServerSideEncryptionByDefault, value smithyxml.Value) error {
+ defer value.Close()
+ if v.KMSMasterKeyID != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "KMSMasterKeyID",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(*v.KMSMasterKeyID)
+ }
+ if len(v.SSEAlgorithm) > 0 {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "SSEAlgorithm",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(string(v.SSEAlgorithm))
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentServerSideEncryptionConfiguration(v *types.ServerSideEncryptionConfiguration, value smithyxml.Value) error {
+ defer value.Close()
+ if v.Rules != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Rule",
+ },
+ Attr: rootAttr,
+ }
+ el := value.FlattenedElement(root)
+ if err := awsRestxml_serializeDocumentServerSideEncryptionRules(v.Rules, el); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentServerSideEncryptionRule(v *types.ServerSideEncryptionRule, value smithyxml.Value) error {
+ defer value.Close()
+ if v.ApplyServerSideEncryptionByDefault != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "ApplyServerSideEncryptionByDefault",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ if err := awsRestxml_serializeDocumentServerSideEncryptionByDefault(v.ApplyServerSideEncryptionByDefault, el); err != nil {
+ return err
+ }
+ }
+ if v.BucketKeyEnabled {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "BucketKeyEnabled",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.Boolean(v.BucketKeyEnabled)
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentServerSideEncryptionRules(v []types.ServerSideEncryptionRule, value smithyxml.Value) error {
+ var array *smithyxml.Array
+ if !value.IsFlattened() {
+ defer value.Close()
+ }
+ array = value.Array()
+ for i := range v {
+ am := array.Member()
+ if err := awsRestxml_serializeDocumentServerSideEncryptionRule(&v[i], am); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentSourceSelectionCriteria(v *types.SourceSelectionCriteria, value smithyxml.Value) error {
+ defer value.Close()
+ if v.ReplicaModifications != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "ReplicaModifications",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ if err := awsRestxml_serializeDocumentReplicaModifications(v.ReplicaModifications, el); err != nil {
+ return err
+ }
+ }
+ if v.SseKmsEncryptedObjects != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "SseKmsEncryptedObjects",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ if err := awsRestxml_serializeDocumentSseKmsEncryptedObjects(v.SseKmsEncryptedObjects, el); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentSSEKMS(v *types.SSEKMS, value smithyxml.Value) error {
+ defer value.Close()
+ if v.KeyId != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "KeyId",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(*v.KeyId)
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentSseKmsEncryptedObjects(v *types.SseKmsEncryptedObjects, value smithyxml.Value) error {
+ defer value.Close()
+ if len(v.Status) > 0 {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Status",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(string(v.Status))
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentSSES3(v *types.SSES3, value smithyxml.Value) error {
+ defer value.Close()
+ return nil
+}
+
+func awsRestxml_serializeDocumentStorageClassAnalysis(v *types.StorageClassAnalysis, value smithyxml.Value) error {
+ defer value.Close()
+ if v.DataExport != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "DataExport",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ if err := awsRestxml_serializeDocumentStorageClassAnalysisDataExport(v.DataExport, el); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentStorageClassAnalysisDataExport(v *types.StorageClassAnalysisDataExport, value smithyxml.Value) error {
+ defer value.Close()
+ if v.Destination != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Destination",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ if err := awsRestxml_serializeDocumentAnalyticsExportDestination(v.Destination, el); err != nil {
+ return err
+ }
+ }
+ if len(v.OutputSchemaVersion) > 0 {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "OutputSchemaVersion",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(string(v.OutputSchemaVersion))
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentTag(v *types.Tag, value smithyxml.Value) error {
+ defer value.Close()
+ if v.Key != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Key",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(*v.Key)
+ }
+ if v.Value != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Value",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(*v.Value)
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentTagging(v *types.Tagging, value smithyxml.Value) error {
+ defer value.Close()
+ if v.TagSet != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "TagSet",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ if err := awsRestxml_serializeDocumentTagSet(v.TagSet, el); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentTagSet(v []types.Tag, value smithyxml.Value) error {
+ var array *smithyxml.Array
+ if !value.IsFlattened() {
+ defer value.Close()
+ }
+ customMemberNameAttr := []smithyxml.Attr{}
+ customMemberName := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Tag",
+ },
+ Attr: customMemberNameAttr,
+ }
+ array = value.ArrayWithCustomName(customMemberName)
+ for i := range v {
+ am := array.Member()
+ if err := awsRestxml_serializeDocumentTag(&v[i], am); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentTargetGrant(v *types.TargetGrant, value smithyxml.Value) error {
+ defer value.Close()
+ if v.Grantee != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Grantee",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ if err := awsRestxml_serializeDocumentGrantee(v.Grantee, el); err != nil {
+ return err
+ }
+ }
+ if len(v.Permission) > 0 {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Permission",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(string(v.Permission))
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentTargetGrants(v []types.TargetGrant, value smithyxml.Value) error {
+ var array *smithyxml.Array
+ if !value.IsFlattened() {
+ defer value.Close()
+ }
+ customMemberNameAttr := []smithyxml.Attr{}
+ customMemberName := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Grant",
+ },
+ Attr: customMemberNameAttr,
+ }
+ array = value.ArrayWithCustomName(customMemberName)
+ for i := range v {
+ am := array.Member()
+ if err := awsRestxml_serializeDocumentTargetGrant(&v[i], am); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentTiering(v *types.Tiering, value smithyxml.Value) error {
+ defer value.Close()
+ if len(v.AccessTier) > 0 {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "AccessTier",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(string(v.AccessTier))
+ }
+ {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Days",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.Integer(v.Days)
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentTieringList(v []types.Tiering, value smithyxml.Value) error {
+ var array *smithyxml.Array
+ if !value.IsFlattened() {
+ defer value.Close()
+ }
+ array = value.Array()
+ for i := range v {
+ am := array.Member()
+ if err := awsRestxml_serializeDocumentTiering(&v[i], am); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentTopicConfiguration(v *types.TopicConfiguration, value smithyxml.Value) error {
+ defer value.Close()
+ if v.Events != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Event",
+ },
+ Attr: rootAttr,
+ }
+ el := value.FlattenedElement(root)
+ if err := awsRestxml_serializeDocumentEventList(v.Events, el); err != nil {
+ return err
+ }
+ }
+ if v.Filter != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Filter",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ if err := awsRestxml_serializeDocumentNotificationConfigurationFilter(v.Filter, el); err != nil {
+ return err
+ }
+ }
+ if v.Id != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Id",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(*v.Id)
+ }
+ if v.TopicArn != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Topic",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(*v.TopicArn)
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentTopicConfigurationList(v []types.TopicConfiguration, value smithyxml.Value) error {
+ var array *smithyxml.Array
+ if !value.IsFlattened() {
+ defer value.Close()
+ }
+ array = value.Array()
+ for i := range v {
+ am := array.Member()
+ if err := awsRestxml_serializeDocumentTopicConfiguration(&v[i], am); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentTransition(v *types.Transition, value smithyxml.Value) error {
+ defer value.Close()
+ if v.Date != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Date",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(smithytime.FormatDateTime(*v.Date))
+ }
+ if v.Days != 0 {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Days",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.Integer(v.Days)
+ }
+ if len(v.StorageClass) > 0 {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "StorageClass",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(string(v.StorageClass))
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentTransitionList(v []types.Transition, value smithyxml.Value) error {
+ var array *smithyxml.Array
+ if !value.IsFlattened() {
+ defer value.Close()
+ }
+ array = value.Array()
+ for i := range v {
+ am := array.Member()
+ if err := awsRestxml_serializeDocumentTransition(&v[i], am); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentUserMetadata(v []types.MetadataEntry, value smithyxml.Value) error {
+ var array *smithyxml.Array
+ if !value.IsFlattened() {
+ defer value.Close()
+ }
+ customMemberNameAttr := []smithyxml.Attr{}
+ customMemberName := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "MetadataEntry",
+ },
+ Attr: customMemberNameAttr,
+ }
+ array = value.ArrayWithCustomName(customMemberName)
+ for i := range v {
+ am := array.Member()
+ if err := awsRestxml_serializeDocumentMetadataEntry(&v[i], am); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentVersioningConfiguration(v *types.VersioningConfiguration, value smithyxml.Value) error {
+ defer value.Close()
+ if len(v.MFADelete) > 0 {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "MfaDelete",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(string(v.MFADelete))
+ }
+ if len(v.Status) > 0 {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Status",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(string(v.Status))
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentWebsiteConfiguration(v *types.WebsiteConfiguration, value smithyxml.Value) error {
+ defer value.Close()
+ if v.ErrorDocument != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "ErrorDocument",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ if err := awsRestxml_serializeDocumentErrorDocument(v.ErrorDocument, el); err != nil {
+ return err
+ }
+ }
+ if v.IndexDocument != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "IndexDocument",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ if err := awsRestxml_serializeDocumentIndexDocument(v.IndexDocument, el); err != nil {
+ return err
+ }
+ }
+ if v.RedirectAllRequestsTo != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "RedirectAllRequestsTo",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ if err := awsRestxml_serializeDocumentRedirectAllRequestsTo(v.RedirectAllRequestsTo, el); err != nil {
+ return err
+ }
+ }
+ if v.RoutingRules != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "RoutingRules",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ if err := awsRestxml_serializeDocumentRoutingRules(v.RoutingRules, el); err != nil {
+ return err
+ }
+ }
+ return nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/types/enums.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/types/enums.go
new file mode 100644
index 000000000..41fe45df9
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/types/enums.go
@@ -0,0 +1,1116 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package types
+
+type AnalyticsS3ExportFileFormat string
+
+// Enum values for AnalyticsS3ExportFileFormat
+const (
+ AnalyticsS3ExportFileFormatCsv AnalyticsS3ExportFileFormat = "CSV"
+)
+
+// Values returns all known values for AnalyticsS3ExportFileFormat. Note that this
+// can be expanded in the future, and so it is only as up to date as the client.
+// The ordering of this slice is not guaranteed to be stable across updates.
+func (AnalyticsS3ExportFileFormat) Values() []AnalyticsS3ExportFileFormat {
+ return []AnalyticsS3ExportFileFormat{
+ "CSV",
+ }
+}
+
+type ArchiveStatus string
+
+// Enum values for ArchiveStatus
+const (
+ ArchiveStatusArchiveAccess ArchiveStatus = "ARCHIVE_ACCESS"
+ ArchiveStatusDeepArchiveAccess ArchiveStatus = "DEEP_ARCHIVE_ACCESS"
+)
+
+// Values returns all known values for ArchiveStatus. Note that this can be
+// expanded in the future, and so it is only as up to date as the client. The
+// ordering of this slice is not guaranteed to be stable across updates.
+func (ArchiveStatus) Values() []ArchiveStatus {
+ return []ArchiveStatus{
+ "ARCHIVE_ACCESS",
+ "DEEP_ARCHIVE_ACCESS",
+ }
+}
+
+type BucketAccelerateStatus string
+
+// Enum values for BucketAccelerateStatus
+const (
+ BucketAccelerateStatusEnabled BucketAccelerateStatus = "Enabled"
+ BucketAccelerateStatusSuspended BucketAccelerateStatus = "Suspended"
+)
+
+// Values returns all known values for BucketAccelerateStatus. Note that this can
+// be expanded in the future, and so it is only as up to date as the client. The
+// ordering of this slice is not guaranteed to be stable across updates.
+func (BucketAccelerateStatus) Values() []BucketAccelerateStatus {
+ return []BucketAccelerateStatus{
+ "Enabled",
+ "Suspended",
+ }
+}
+
+type BucketCannedACL string
+
+// Enum values for BucketCannedACL
+const (
+ BucketCannedACLPrivate BucketCannedACL = "private"
+ BucketCannedACLPublicRead BucketCannedACL = "public-read"
+ BucketCannedACLPublicReadWrite BucketCannedACL = "public-read-write"
+ BucketCannedACLAuthenticatedRead BucketCannedACL = "authenticated-read"
+)
+
+// Values returns all known values for BucketCannedACL. Note that this can be
+// expanded in the future, and so it is only as up to date as the client. The
+// ordering of this slice is not guaranteed to be stable across updates.
+func (BucketCannedACL) Values() []BucketCannedACL {
+ return []BucketCannedACL{
+ "private",
+ "public-read",
+ "public-read-write",
+ "authenticated-read",
+ }
+}
+
+type BucketLocationConstraint string
+
+// Enum values for BucketLocationConstraint
+const (
+ BucketLocationConstraintAfSouth1 BucketLocationConstraint = "af-south-1"
+ BucketLocationConstraintApEast1 BucketLocationConstraint = "ap-east-1"
+ BucketLocationConstraintApNortheast1 BucketLocationConstraint = "ap-northeast-1"
+ BucketLocationConstraintApNortheast2 BucketLocationConstraint = "ap-northeast-2"
+ BucketLocationConstraintApNortheast3 BucketLocationConstraint = "ap-northeast-3"
+ BucketLocationConstraintApSouth1 BucketLocationConstraint = "ap-south-1"
+ BucketLocationConstraintApSoutheast1 BucketLocationConstraint = "ap-southeast-1"
+ BucketLocationConstraintApSoutheast2 BucketLocationConstraint = "ap-southeast-2"
+ BucketLocationConstraintCaCentral1 BucketLocationConstraint = "ca-central-1"
+ BucketLocationConstraintCnNorth1 BucketLocationConstraint = "cn-north-1"
+ BucketLocationConstraintCnNorthwest1 BucketLocationConstraint = "cn-northwest-1"
+ BucketLocationConstraintEu BucketLocationConstraint = "EU"
+ BucketLocationConstraintEuCentral1 BucketLocationConstraint = "eu-central-1"
+ BucketLocationConstraintEuNorth1 BucketLocationConstraint = "eu-north-1"
+ BucketLocationConstraintEuSouth1 BucketLocationConstraint = "eu-south-1"
+ BucketLocationConstraintEuWest1 BucketLocationConstraint = "eu-west-1"
+ BucketLocationConstraintEuWest2 BucketLocationConstraint = "eu-west-2"
+ BucketLocationConstraintEuWest3 BucketLocationConstraint = "eu-west-3"
+ BucketLocationConstraintMeSouth1 BucketLocationConstraint = "me-south-1"
+ BucketLocationConstraintSaEast1 BucketLocationConstraint = "sa-east-1"
+ BucketLocationConstraintUsEast2 BucketLocationConstraint = "us-east-2"
+ BucketLocationConstraintUsGovEast1 BucketLocationConstraint = "us-gov-east-1"
+ BucketLocationConstraintUsGovWest1 BucketLocationConstraint = "us-gov-west-1"
+ BucketLocationConstraintUsWest1 BucketLocationConstraint = "us-west-1"
+ BucketLocationConstraintUsWest2 BucketLocationConstraint = "us-west-2"
+)
+
+// Values returns all known values for BucketLocationConstraint. Note that this can
+// be expanded in the future, and so it is only as up to date as the client. The
+// ordering of this slice is not guaranteed to be stable across updates.
+func (BucketLocationConstraint) Values() []BucketLocationConstraint {
+ return []BucketLocationConstraint{
+ "af-south-1",
+ "ap-east-1",
+ "ap-northeast-1",
+ "ap-northeast-2",
+ "ap-northeast-3",
+ "ap-south-1",
+ "ap-southeast-1",
+ "ap-southeast-2",
+ "ca-central-1",
+ "cn-north-1",
+ "cn-northwest-1",
+ "EU",
+ "eu-central-1",
+ "eu-north-1",
+ "eu-south-1",
+ "eu-west-1",
+ "eu-west-2",
+ "eu-west-3",
+ "me-south-1",
+ "sa-east-1",
+ "us-east-2",
+ "us-gov-east-1",
+ "us-gov-west-1",
+ "us-west-1",
+ "us-west-2",
+ }
+}
+
+type BucketLogsPermission string
+
+// Enum values for BucketLogsPermission
+const (
+ BucketLogsPermissionFullControl BucketLogsPermission = "FULL_CONTROL"
+ BucketLogsPermissionRead BucketLogsPermission = "READ"
+ BucketLogsPermissionWrite BucketLogsPermission = "WRITE"
+)
+
+// Values returns all known values for BucketLogsPermission. Note that this can be
+// expanded in the future, and so it is only as up to date as the client. The
+// ordering of this slice is not guaranteed to be stable across updates.
+func (BucketLogsPermission) Values() []BucketLogsPermission {
+ return []BucketLogsPermission{
+ "FULL_CONTROL",
+ "READ",
+ "WRITE",
+ }
+}
+
+type BucketVersioningStatus string
+
+// Enum values for BucketVersioningStatus
+const (
+ BucketVersioningStatusEnabled BucketVersioningStatus = "Enabled"
+ BucketVersioningStatusSuspended BucketVersioningStatus = "Suspended"
+)
+
+// Values returns all known values for BucketVersioningStatus. Note that this can
+// be expanded in the future, and so it is only as up to date as the client. The
+// ordering of this slice is not guaranteed to be stable across updates.
+func (BucketVersioningStatus) Values() []BucketVersioningStatus {
+ return []BucketVersioningStatus{
+ "Enabled",
+ "Suspended",
+ }
+}
+
+type CompressionType string
+
+// Enum values for CompressionType
+const (
+ CompressionTypeNone CompressionType = "NONE"
+ CompressionTypeGzip CompressionType = "GZIP"
+ CompressionTypeBzip2 CompressionType = "BZIP2"
+)
+
+// Values returns all known values for CompressionType. Note that this can be
+// expanded in the future, and so it is only as up to date as the client. The
+// ordering of this slice is not guaranteed to be stable across updates.
+func (CompressionType) Values() []CompressionType {
+ return []CompressionType{
+ "NONE",
+ "GZIP",
+ "BZIP2",
+ }
+}
+
+type DeleteMarkerReplicationStatus string
+
+// Enum values for DeleteMarkerReplicationStatus
+const (
+ DeleteMarkerReplicationStatusEnabled DeleteMarkerReplicationStatus = "Enabled"
+ DeleteMarkerReplicationStatusDisabled DeleteMarkerReplicationStatus = "Disabled"
+)
+
+// Values returns all known values for DeleteMarkerReplicationStatus. Note that
+// this can be expanded in the future, and so it is only as up to date as the
+// client. The ordering of this slice is not guaranteed to be stable across
+// updates.
+func (DeleteMarkerReplicationStatus) Values() []DeleteMarkerReplicationStatus {
+ return []DeleteMarkerReplicationStatus{
+ "Enabled",
+ "Disabled",
+ }
+}
+
+type EncodingType string
+
+// Enum values for EncodingType
+const (
+ EncodingTypeUrl EncodingType = "url"
+)
+
+// Values returns all known values for EncodingType. Note that this can be expanded
+// in the future, and so it is only as up to date as the client. The ordering of
+// this slice is not guaranteed to be stable across updates.
+func (EncodingType) Values() []EncodingType {
+ return []EncodingType{
+ "url",
+ }
+}
+
+type Event string
+
+// Values returns all known values for Event. Note that this can be expanded in the
+// future, and so it is only as up to date as the client. The ordering of this
+// slice is not guaranteed to be stable across updates.
+func (Event) Values() []Event {
+ return []Event{
+ "s3:ReducedRedundancyLostObject",
+ "s3:ObjectCreated:*",
+ "s3:ObjectCreated:Put",
+ "s3:ObjectCreated:Post",
+ "s3:ObjectCreated:Copy",
+ "s3:ObjectCreated:CompleteMultipartUpload",
+ "s3:ObjectRemoved:*",
+ "s3:ObjectRemoved:Delete",
+ "s3:ObjectRemoved:DeleteMarkerCreated",
+ "s3:ObjectRestore:*",
+ "s3:ObjectRestore:Post",
+ "s3:ObjectRestore:Completed",
+ "s3:Replication:*",
+ "s3:Replication:OperationFailedReplication",
+ "s3:Replication:OperationNotTracked",
+ "s3:Replication:OperationMissedThreshold",
+ "s3:Replication:OperationReplicatedAfterThreshold",
+ }
+}
+
+type ExistingObjectReplicationStatus string
+
+// Enum values for ExistingObjectReplicationStatus
+const (
+ ExistingObjectReplicationStatusEnabled ExistingObjectReplicationStatus = "Enabled"
+ ExistingObjectReplicationStatusDisabled ExistingObjectReplicationStatus = "Disabled"
+)
+
+// Values returns all known values for ExistingObjectReplicationStatus. Note that
+// this can be expanded in the future, and so it is only as up to date as the
+// client. The ordering of this slice is not guaranteed to be stable across
+// updates.
+func (ExistingObjectReplicationStatus) Values() []ExistingObjectReplicationStatus {
+ return []ExistingObjectReplicationStatus{
+ "Enabled",
+ "Disabled",
+ }
+}
+
+type ExpirationStatus string
+
+// Enum values for ExpirationStatus
+const (
+ ExpirationStatusEnabled ExpirationStatus = "Enabled"
+ ExpirationStatusDisabled ExpirationStatus = "Disabled"
+)
+
+// Values returns all known values for ExpirationStatus. Note that this can be
+// expanded in the future, and so it is only as up to date as the client. The
+// ordering of this slice is not guaranteed to be stable across updates.
+func (ExpirationStatus) Values() []ExpirationStatus {
+ return []ExpirationStatus{
+ "Enabled",
+ "Disabled",
+ }
+}
+
+type ExpressionType string
+
+// Enum values for ExpressionType
+const (
+ ExpressionTypeSql ExpressionType = "SQL"
+)
+
+// Values returns all known values for ExpressionType. Note that this can be
+// expanded in the future, and so it is only as up to date as the client. The
+// ordering of this slice is not guaranteed to be stable across updates.
+func (ExpressionType) Values() []ExpressionType {
+ return []ExpressionType{
+ "SQL",
+ }
+}
+
+type FileHeaderInfo string
+
+// Enum values for FileHeaderInfo
+const (
+ FileHeaderInfoUse FileHeaderInfo = "USE"
+ FileHeaderInfoIgnore FileHeaderInfo = "IGNORE"
+ FileHeaderInfoNone FileHeaderInfo = "NONE"
+)
+
+// Values returns all known values for FileHeaderInfo. Note that this can be
+// expanded in the future, and so it is only as up to date as the client. The
+// ordering of this slice is not guaranteed to be stable across updates.
+func (FileHeaderInfo) Values() []FileHeaderInfo {
+ return []FileHeaderInfo{
+ "USE",
+ "IGNORE",
+ "NONE",
+ }
+}
+
+type FilterRuleName string
+
+// Enum values for FilterRuleName
+const (
+ FilterRuleNamePrefix FilterRuleName = "prefix"
+ FilterRuleNameSuffix FilterRuleName = "suffix"
+)
+
+// Values returns all known values for FilterRuleName. Note that this can be
+// expanded in the future, and so it is only as up to date as the client. The
+// ordering of this slice is not guaranteed to be stable across updates.
+func (FilterRuleName) Values() []FilterRuleName {
+ return []FilterRuleName{
+ "prefix",
+ "suffix",
+ }
+}
+
+type IntelligentTieringAccessTier string
+
+// Enum values for IntelligentTieringAccessTier
+const (
+ IntelligentTieringAccessTierArchiveAccess IntelligentTieringAccessTier = "ARCHIVE_ACCESS"
+ IntelligentTieringAccessTierDeepArchiveAccess IntelligentTieringAccessTier = "DEEP_ARCHIVE_ACCESS"
+)
+
+// Values returns all known values for IntelligentTieringAccessTier. Note that this
+// can be expanded in the future, and so it is only as up to date as the client.
+// The ordering of this slice is not guaranteed to be stable across updates.
+func (IntelligentTieringAccessTier) Values() []IntelligentTieringAccessTier {
+ return []IntelligentTieringAccessTier{
+ "ARCHIVE_ACCESS",
+ "DEEP_ARCHIVE_ACCESS",
+ }
+}
+
+type IntelligentTieringStatus string
+
+// Enum values for IntelligentTieringStatus
+const (
+ IntelligentTieringStatusEnabled IntelligentTieringStatus = "Enabled"
+ IntelligentTieringStatusDisabled IntelligentTieringStatus = "Disabled"
+)
+
+// Values returns all known values for IntelligentTieringStatus. Note that this can
+// be expanded in the future, and so it is only as up to date as the client. The
+// ordering of this slice is not guaranteed to be stable across updates.
+func (IntelligentTieringStatus) Values() []IntelligentTieringStatus {
+ return []IntelligentTieringStatus{
+ "Enabled",
+ "Disabled",
+ }
+}
+
+type InventoryFormat string
+
+// Enum values for InventoryFormat
+const (
+ InventoryFormatCsv InventoryFormat = "CSV"
+ InventoryFormatOrc InventoryFormat = "ORC"
+ InventoryFormatParquet InventoryFormat = "Parquet"
+)
+
+// Values returns all known values for InventoryFormat. Note that this can be
+// expanded in the future, and so it is only as up to date as the client. The
+// ordering of this slice is not guaranteed to be stable across updates.
+func (InventoryFormat) Values() []InventoryFormat {
+ return []InventoryFormat{
+ "CSV",
+ "ORC",
+ "Parquet",
+ }
+}
+
+type InventoryFrequency string
+
+// Enum values for InventoryFrequency
+const (
+ InventoryFrequencyDaily InventoryFrequency = "Daily"
+ InventoryFrequencyWeekly InventoryFrequency = "Weekly"
+)
+
+// Values returns all known values for InventoryFrequency. Note that this can be
+// expanded in the future, and so it is only as up to date as the client. The
+// ordering of this slice is not guaranteed to be stable across updates.
+func (InventoryFrequency) Values() []InventoryFrequency {
+ return []InventoryFrequency{
+ "Daily",
+ "Weekly",
+ }
+}
+
+type InventoryIncludedObjectVersions string
+
+// Enum values for InventoryIncludedObjectVersions
+const (
+ InventoryIncludedObjectVersionsAll InventoryIncludedObjectVersions = "All"
+ InventoryIncludedObjectVersionsCurrent InventoryIncludedObjectVersions = "Current"
+)
+
+// Values returns all known values for InventoryIncludedObjectVersions. Note that
+// this can be expanded in the future, and so it is only as up to date as the
+// client. The ordering of this slice is not guaranteed to be stable across
+// updates.
+func (InventoryIncludedObjectVersions) Values() []InventoryIncludedObjectVersions {
+ return []InventoryIncludedObjectVersions{
+ "All",
+ "Current",
+ }
+}
+
+type InventoryOptionalField string
+
+// Enum values for InventoryOptionalField
+const (
+ InventoryOptionalFieldSize InventoryOptionalField = "Size"
+ InventoryOptionalFieldLastModifiedDate InventoryOptionalField = "LastModifiedDate"
+ InventoryOptionalFieldStorageClass InventoryOptionalField = "StorageClass"
+ InventoryOptionalFieldETag InventoryOptionalField = "ETag"
+ InventoryOptionalFieldIsMultipartUploaded InventoryOptionalField = "IsMultipartUploaded"
+ InventoryOptionalFieldReplicationStatus InventoryOptionalField = "ReplicationStatus"
+ InventoryOptionalFieldEncryptionStatus InventoryOptionalField = "EncryptionStatus"
+ InventoryOptionalFieldObjectLockRetainUntilDate InventoryOptionalField = "ObjectLockRetainUntilDate"
+ InventoryOptionalFieldObjectLockMode InventoryOptionalField = "ObjectLockMode"
+ InventoryOptionalFieldObjectLockLegalHoldStatus InventoryOptionalField = "ObjectLockLegalHoldStatus"
+ InventoryOptionalFieldIntelligentTieringAccessTier InventoryOptionalField = "IntelligentTieringAccessTier"
+)
+
+// Values returns all known values for InventoryOptionalField. Note that this can
+// be expanded in the future, and so it is only as up to date as the client. The
+// ordering of this slice is not guaranteed to be stable across updates.
+func (InventoryOptionalField) Values() []InventoryOptionalField {
+ return []InventoryOptionalField{
+ "Size",
+ "LastModifiedDate",
+ "StorageClass",
+ "ETag",
+ "IsMultipartUploaded",
+ "ReplicationStatus",
+ "EncryptionStatus",
+ "ObjectLockRetainUntilDate",
+ "ObjectLockMode",
+ "ObjectLockLegalHoldStatus",
+ "IntelligentTieringAccessTier",
+ }
+}
+
+type JSONType string
+
+// Enum values for JSONType
+const (
+ JSONTypeDocument JSONType = "DOCUMENT"
+ JSONTypeLines JSONType = "LINES"
+)
+
+// Values returns all known values for JSONType. Note that this can be expanded in
+// the future, and so it is only as up to date as the client. The ordering of this
+// slice is not guaranteed to be stable across updates.
+func (JSONType) Values() []JSONType {
+ return []JSONType{
+ "DOCUMENT",
+ "LINES",
+ }
+}
+
+type MetadataDirective string
+
+// Enum values for MetadataDirective
+const (
+ MetadataDirectiveCopy MetadataDirective = "COPY"
+ MetadataDirectiveReplace MetadataDirective = "REPLACE"
+)
+
+// Values returns all known values for MetadataDirective. Note that this can be
+// expanded in the future, and so it is only as up to date as the client. The
+// ordering of this slice is not guaranteed to be stable across updates.
+func (MetadataDirective) Values() []MetadataDirective {
+ return []MetadataDirective{
+ "COPY",
+ "REPLACE",
+ }
+}
+
+type MetricsStatus string
+
+// Enum values for MetricsStatus
+const (
+ MetricsStatusEnabled MetricsStatus = "Enabled"
+ MetricsStatusDisabled MetricsStatus = "Disabled"
+)
+
+// Values returns all known values for MetricsStatus. Note that this can be
+// expanded in the future, and so it is only as up to date as the client. The
+// ordering of this slice is not guaranteed to be stable across updates.
+func (MetricsStatus) Values() []MetricsStatus {
+ return []MetricsStatus{
+ "Enabled",
+ "Disabled",
+ }
+}
+
+type MFADelete string
+
+// Enum values for MFADelete
+const (
+ MFADeleteEnabled MFADelete = "Enabled"
+ MFADeleteDisabled MFADelete = "Disabled"
+)
+
+// Values returns all known values for MFADelete. Note that this can be expanded in
+// the future, and so it is only as up to date as the client. The ordering of this
+// slice is not guaranteed to be stable across updates.
+func (MFADelete) Values() []MFADelete {
+ return []MFADelete{
+ "Enabled",
+ "Disabled",
+ }
+}
+
+type MFADeleteStatus string
+
+// Enum values for MFADeleteStatus
+const (
+ MFADeleteStatusEnabled MFADeleteStatus = "Enabled"
+ MFADeleteStatusDisabled MFADeleteStatus = "Disabled"
+)
+
+// Values returns all known values for MFADeleteStatus. Note that this can be
+// expanded in the future, and so it is only as up to date as the client. The
+// ordering of this slice is not guaranteed to be stable across updates.
+func (MFADeleteStatus) Values() []MFADeleteStatus {
+ return []MFADeleteStatus{
+ "Enabled",
+ "Disabled",
+ }
+}
+
+type ObjectCannedACL string
+
+// Enum values for ObjectCannedACL
+const (
+ ObjectCannedACLPrivate ObjectCannedACL = "private"
+ ObjectCannedACLPublicRead ObjectCannedACL = "public-read"
+ ObjectCannedACLPublicReadWrite ObjectCannedACL = "public-read-write"
+ ObjectCannedACLAuthenticatedRead ObjectCannedACL = "authenticated-read"
+ ObjectCannedACLAwsExecRead ObjectCannedACL = "aws-exec-read"
+ ObjectCannedACLBucketOwnerRead ObjectCannedACL = "bucket-owner-read"
+ ObjectCannedACLBucketOwnerFullControl ObjectCannedACL = "bucket-owner-full-control"
+)
+
+// Values returns all known values for ObjectCannedACL. Note that this can be
+// expanded in the future, and so it is only as up to date as the client. The
+// ordering of this slice is not guaranteed to be stable across updates.
+func (ObjectCannedACL) Values() []ObjectCannedACL {
+ return []ObjectCannedACL{
+ "private",
+ "public-read",
+ "public-read-write",
+ "authenticated-read",
+ "aws-exec-read",
+ "bucket-owner-read",
+ "bucket-owner-full-control",
+ }
+}
+
+type ObjectLockEnabled string
+
+// Enum values for ObjectLockEnabled
+const (
+ ObjectLockEnabledEnabled ObjectLockEnabled = "Enabled"
+)
+
+// Values returns all known values for ObjectLockEnabled. Note that this can be
+// expanded in the future, and so it is only as up to date as the client. The
+// ordering of this slice is not guaranteed to be stable across updates.
+func (ObjectLockEnabled) Values() []ObjectLockEnabled {
+ return []ObjectLockEnabled{
+ "Enabled",
+ }
+}
+
+type ObjectLockLegalHoldStatus string
+
+// Enum values for ObjectLockLegalHoldStatus
+const (
+ ObjectLockLegalHoldStatusOn ObjectLockLegalHoldStatus = "ON"
+ ObjectLockLegalHoldStatusOff ObjectLockLegalHoldStatus = "OFF"
+)
+
+// Values returns all known values for ObjectLockLegalHoldStatus. Note that this
+// can be expanded in the future, and so it is only as up to date as the client.
+// The ordering of this slice is not guaranteed to be stable across updates.
+func (ObjectLockLegalHoldStatus) Values() []ObjectLockLegalHoldStatus {
+ return []ObjectLockLegalHoldStatus{
+ "ON",
+ "OFF",
+ }
+}
+
+type ObjectLockMode string
+
+// Enum values for ObjectLockMode
+const (
+ ObjectLockModeGovernance ObjectLockMode = "GOVERNANCE"
+ ObjectLockModeCompliance ObjectLockMode = "COMPLIANCE"
+)
+
+// Values returns all known values for ObjectLockMode. Note that this can be
+// expanded in the future, and so it is only as up to date as the client. The
+// ordering of this slice is not guaranteed to be stable across updates.
+func (ObjectLockMode) Values() []ObjectLockMode {
+ return []ObjectLockMode{
+ "GOVERNANCE",
+ "COMPLIANCE",
+ }
+}
+
+type ObjectLockRetentionMode string
+
+// Enum values for ObjectLockRetentionMode
+const (
+ ObjectLockRetentionModeGovernance ObjectLockRetentionMode = "GOVERNANCE"
+ ObjectLockRetentionModeCompliance ObjectLockRetentionMode = "COMPLIANCE"
+)
+
+// Values returns all known values for ObjectLockRetentionMode. Note that this can
+// be expanded in the future, and so it is only as up to date as the client. The
+// ordering of this slice is not guaranteed to be stable across updates.
+func (ObjectLockRetentionMode) Values() []ObjectLockRetentionMode {
+ return []ObjectLockRetentionMode{
+ "GOVERNANCE",
+ "COMPLIANCE",
+ }
+}
+
+type ObjectOwnership string
+
+// Enum values for ObjectOwnership
+const (
+ ObjectOwnershipBucketOwnerPreferred ObjectOwnership = "BucketOwnerPreferred"
+ ObjectOwnershipObjectWriter ObjectOwnership = "ObjectWriter"
+)
+
+// Values returns all known values for ObjectOwnership. Note that this can be
+// expanded in the future, and so it is only as up to date as the client. The
+// ordering of this slice is not guaranteed to be stable across updates.
+func (ObjectOwnership) Values() []ObjectOwnership {
+ return []ObjectOwnership{
+ "BucketOwnerPreferred",
+ "ObjectWriter",
+ }
+}
+
+type ObjectStorageClass string
+
+// Enum values for ObjectStorageClass
+const (
+ ObjectStorageClassStandard ObjectStorageClass = "STANDARD"
+ ObjectStorageClassReducedRedundancy ObjectStorageClass = "REDUCED_REDUNDANCY"
+ ObjectStorageClassGlacier ObjectStorageClass = "GLACIER"
+ ObjectStorageClassStandardIa ObjectStorageClass = "STANDARD_IA"
+ ObjectStorageClassOnezoneIa ObjectStorageClass = "ONEZONE_IA"
+ ObjectStorageClassIntelligentTiering ObjectStorageClass = "INTELLIGENT_TIERING"
+ ObjectStorageClassDeepArchive ObjectStorageClass = "DEEP_ARCHIVE"
+ ObjectStorageClassOutposts ObjectStorageClass = "OUTPOSTS"
+)
+
+// Values returns all known values for ObjectStorageClass. Note that this can be
+// expanded in the future, and so it is only as up to date as the client. The
+// ordering of this slice is not guaranteed to be stable across updates.
+func (ObjectStorageClass) Values() []ObjectStorageClass {
+ return []ObjectStorageClass{
+ "STANDARD",
+ "REDUCED_REDUNDANCY",
+ "GLACIER",
+ "STANDARD_IA",
+ "ONEZONE_IA",
+ "INTELLIGENT_TIERING",
+ "DEEP_ARCHIVE",
+ "OUTPOSTS",
+ }
+}
+
+type ObjectVersionStorageClass string
+
+// Enum values for ObjectVersionStorageClass
+const (
+ ObjectVersionStorageClassStandard ObjectVersionStorageClass = "STANDARD"
+)
+
+// Values returns all known values for ObjectVersionStorageClass. Note that this
+// can be expanded in the future, and so it is only as up to date as the client.
+// The ordering of this slice is not guaranteed to be stable across updates.
+func (ObjectVersionStorageClass) Values() []ObjectVersionStorageClass {
+ return []ObjectVersionStorageClass{
+ "STANDARD",
+ }
+}
+
+type OwnerOverride string
+
+// Enum values for OwnerOverride
+const (
+ OwnerOverrideDestination OwnerOverride = "Destination"
+)
+
+// Values returns all known values for OwnerOverride. Note that this can be
+// expanded in the future, and so it is only as up to date as the client. The
+// ordering of this slice is not guaranteed to be stable across updates.
+func (OwnerOverride) Values() []OwnerOverride {
+ return []OwnerOverride{
+ "Destination",
+ }
+}
+
+type Payer string
+
+// Enum values for Payer
+const (
+ PayerRequester Payer = "Requester"
+ PayerBucketOwner Payer = "BucketOwner"
+)
+
+// Values returns all known values for Payer. Note that this can be expanded in the
+// future, and so it is only as up to date as the client. The ordering of this
+// slice is not guaranteed to be stable across updates.
+func (Payer) Values() []Payer {
+ return []Payer{
+ "Requester",
+ "BucketOwner",
+ }
+}
+
+type Permission string
+
+// Enum values for Permission
+const (
+ PermissionFullControl Permission = "FULL_CONTROL"
+ PermissionWrite Permission = "WRITE"
+ PermissionWriteAcp Permission = "WRITE_ACP"
+ PermissionRead Permission = "READ"
+ PermissionReadAcp Permission = "READ_ACP"
+)
+
+// Values returns all known values for Permission. Note that this can be expanded
+// in the future, and so it is only as up to date as the client. The ordering of
+// this slice is not guaranteed to be stable across updates.
+func (Permission) Values() []Permission {
+ return []Permission{
+ "FULL_CONTROL",
+ "WRITE",
+ "WRITE_ACP",
+ "READ",
+ "READ_ACP",
+ }
+}
+
+type Protocol string
+
+// Enum values for Protocol
+const (
+ ProtocolHttp Protocol = "http"
+ ProtocolHttps Protocol = "https"
+)
+
+// Values returns all known values for Protocol. Note that this can be expanded in
+// the future, and so it is only as up to date as the client. The ordering of this
+// slice is not guaranteed to be stable across updates.
+func (Protocol) Values() []Protocol {
+ return []Protocol{
+ "http",
+ "https",
+ }
+}
+
+type QuoteFields string
+
+// Enum values for QuoteFields
+const (
+ QuoteFieldsAlways QuoteFields = "ALWAYS"
+ QuoteFieldsAsneeded QuoteFields = "ASNEEDED"
+)
+
+// Values returns all known values for QuoteFields. Note that this can be expanded
+// in the future, and so it is only as up to date as the client. The ordering of
+// this slice is not guaranteed to be stable across updates.
+func (QuoteFields) Values() []QuoteFields {
+ return []QuoteFields{
+ "ALWAYS",
+ "ASNEEDED",
+ }
+}
+
+type ReplicaModificationsStatus string
+
+// Enum values for ReplicaModificationsStatus
+const (
+ ReplicaModificationsStatusEnabled ReplicaModificationsStatus = "Enabled"
+ ReplicaModificationsStatusDisabled ReplicaModificationsStatus = "Disabled"
+)
+
+// Values returns all known values for ReplicaModificationsStatus. Note that this
+// can be expanded in the future, and so it is only as up to date as the client.
+// The ordering of this slice is not guaranteed to be stable across updates.
+func (ReplicaModificationsStatus) Values() []ReplicaModificationsStatus {
+ return []ReplicaModificationsStatus{
+ "Enabled",
+ "Disabled",
+ }
+}
+
+type ReplicationRuleStatus string
+
+// Enum values for ReplicationRuleStatus
+const (
+ ReplicationRuleStatusEnabled ReplicationRuleStatus = "Enabled"
+ ReplicationRuleStatusDisabled ReplicationRuleStatus = "Disabled"
+)
+
+// Values returns all known values for ReplicationRuleStatus. Note that this can be
+// expanded in the future, and so it is only as up to date as the client. The
+// ordering of this slice is not guaranteed to be stable across updates.
+func (ReplicationRuleStatus) Values() []ReplicationRuleStatus {
+ return []ReplicationRuleStatus{
+ "Enabled",
+ "Disabled",
+ }
+}
+
+type ReplicationStatus string
+
+// Enum values for ReplicationStatus
+const (
+ ReplicationStatusComplete ReplicationStatus = "COMPLETE"
+ ReplicationStatusPending ReplicationStatus = "PENDING"
+ ReplicationStatusFailed ReplicationStatus = "FAILED"
+ ReplicationStatusReplica ReplicationStatus = "REPLICA"
+)
+
+// Values returns all known values for ReplicationStatus. Note that this can be
+// expanded in the future, and so it is only as up to date as the client. The
+// ordering of this slice is not guaranteed to be stable across updates.
+func (ReplicationStatus) Values() []ReplicationStatus {
+ return []ReplicationStatus{
+ "COMPLETE",
+ "PENDING",
+ "FAILED",
+ "REPLICA",
+ }
+}
+
+type ReplicationTimeStatus string
+
+// Enum values for ReplicationTimeStatus
+const (
+ ReplicationTimeStatusEnabled ReplicationTimeStatus = "Enabled"
+ ReplicationTimeStatusDisabled ReplicationTimeStatus = "Disabled"
+)
+
+// Values returns all known values for ReplicationTimeStatus. Note that this can be
+// expanded in the future, and so it is only as up to date as the client. The
+// ordering of this slice is not guaranteed to be stable across updates.
+func (ReplicationTimeStatus) Values() []ReplicationTimeStatus {
+ return []ReplicationTimeStatus{
+ "Enabled",
+ "Disabled",
+ }
+}
+
+type RequestCharged string
+
+// Enum values for RequestCharged
+const (
+ RequestChargedRequester RequestCharged = "requester"
+)
+
+// Values returns all known values for RequestCharged. Note that this can be
+// expanded in the future, and so it is only as up to date as the client. The
+// ordering of this slice is not guaranteed to be stable across updates.
+func (RequestCharged) Values() []RequestCharged {
+ return []RequestCharged{
+ "requester",
+ }
+}
+
+type RequestPayer string
+
+// Enum values for RequestPayer
+const (
+ RequestPayerRequester RequestPayer = "requester"
+)
+
+// Values returns all known values for RequestPayer. Note that this can be expanded
+// in the future, and so it is only as up to date as the client. The ordering of
+// this slice is not guaranteed to be stable across updates.
+func (RequestPayer) Values() []RequestPayer {
+ return []RequestPayer{
+ "requester",
+ }
+}
+
+type RestoreRequestType string
+
+// Enum values for RestoreRequestType
+const (
+ RestoreRequestTypeSelect RestoreRequestType = "SELECT"
+)
+
+// Values returns all known values for RestoreRequestType. Note that this can be
+// expanded in the future, and so it is only as up to date as the client. The
+// ordering of this slice is not guaranteed to be stable across updates.
+func (RestoreRequestType) Values() []RestoreRequestType {
+ return []RestoreRequestType{
+ "SELECT",
+ }
+}
+
+type ServerSideEncryption string
+
+// Enum values for ServerSideEncryption
+const (
+ ServerSideEncryptionAes256 ServerSideEncryption = "AES256"
+ ServerSideEncryptionAwsKms ServerSideEncryption = "aws:kms"
+)
+
+// Values returns all known values for ServerSideEncryption. Note that this can be
+// expanded in the future, and so it is only as up to date as the client. The
+// ordering of this slice is not guaranteed to be stable across updates.
+func (ServerSideEncryption) Values() []ServerSideEncryption {
+ return []ServerSideEncryption{
+ "AES256",
+ "aws:kms",
+ }
+}
+
+type SseKmsEncryptedObjectsStatus string
+
+// Enum values for SseKmsEncryptedObjectsStatus
+const (
+ SseKmsEncryptedObjectsStatusEnabled SseKmsEncryptedObjectsStatus = "Enabled"
+ SseKmsEncryptedObjectsStatusDisabled SseKmsEncryptedObjectsStatus = "Disabled"
+)
+
+// Values returns all known values for SseKmsEncryptedObjectsStatus. Note that this
+// can be expanded in the future, and so it is only as up to date as the client.
+// The ordering of this slice is not guaranteed to be stable across updates.
+func (SseKmsEncryptedObjectsStatus) Values() []SseKmsEncryptedObjectsStatus {
+ return []SseKmsEncryptedObjectsStatus{
+ "Enabled",
+ "Disabled",
+ }
+}
+
+type StorageClass string
+
+// Enum values for StorageClass
+const (
+ StorageClassStandard StorageClass = "STANDARD"
+ StorageClassReducedRedundancy StorageClass = "REDUCED_REDUNDANCY"
+ StorageClassStandardIa StorageClass = "STANDARD_IA"
+ StorageClassOnezoneIa StorageClass = "ONEZONE_IA"
+ StorageClassIntelligentTiering StorageClass = "INTELLIGENT_TIERING"
+ StorageClassGlacier StorageClass = "GLACIER"
+ StorageClassDeepArchive StorageClass = "DEEP_ARCHIVE"
+ StorageClassOutposts StorageClass = "OUTPOSTS"
+)
+
+// Values returns all known values for StorageClass. Note that this can be expanded
+// in the future, and so it is only as up to date as the client. The ordering of
+// this slice is not guaranteed to be stable across updates.
+func (StorageClass) Values() []StorageClass {
+ return []StorageClass{
+ "STANDARD",
+ "REDUCED_REDUNDANCY",
+ "STANDARD_IA",
+ "ONEZONE_IA",
+ "INTELLIGENT_TIERING",
+ "GLACIER",
+ "DEEP_ARCHIVE",
+ "OUTPOSTS",
+ }
+}
+
+type StorageClassAnalysisSchemaVersion string
+
+// Enum values for StorageClassAnalysisSchemaVersion
+const (
+ StorageClassAnalysisSchemaVersionV1 StorageClassAnalysisSchemaVersion = "V_1"
+)
+
+// Values returns all known values for StorageClassAnalysisSchemaVersion. Note that
+// this can be expanded in the future, and so it is only as up to date as the
+// client. The ordering of this slice is not guaranteed to be stable across
+// updates.
+func (StorageClassAnalysisSchemaVersion) Values() []StorageClassAnalysisSchemaVersion {
+ return []StorageClassAnalysisSchemaVersion{
+ "V_1",
+ }
+}
+
+type TaggingDirective string
+
+// Enum values for TaggingDirective
+const (
+ TaggingDirectiveCopy TaggingDirective = "COPY"
+ TaggingDirectiveReplace TaggingDirective = "REPLACE"
+)
+
+// Values returns all known values for TaggingDirective. Note that this can be
+// expanded in the future, and so it is only as up to date as the client. The
+// ordering of this slice is not guaranteed to be stable across updates.
+func (TaggingDirective) Values() []TaggingDirective {
+ return []TaggingDirective{
+ "COPY",
+ "REPLACE",
+ }
+}
+
+type Tier string
+
+// Enum values for Tier
+const (
+ TierStandard Tier = "Standard"
+ TierBulk Tier = "Bulk"
+ TierExpedited Tier = "Expedited"
+)
+
+// Values returns all known values for Tier. Note that this can be expanded in the
+// future, and so it is only as up to date as the client. The ordering of this
+// slice is not guaranteed to be stable across updates.
+func (Tier) Values() []Tier {
+ return []Tier{
+ "Standard",
+ "Bulk",
+ "Expedited",
+ }
+}
+
+type TransitionStorageClass string
+
+// Enum values for TransitionStorageClass
+const (
+ TransitionStorageClassGlacier TransitionStorageClass = "GLACIER"
+ TransitionStorageClassStandardIa TransitionStorageClass = "STANDARD_IA"
+ TransitionStorageClassOnezoneIa TransitionStorageClass = "ONEZONE_IA"
+ TransitionStorageClassIntelligentTiering TransitionStorageClass = "INTELLIGENT_TIERING"
+ TransitionStorageClassDeepArchive TransitionStorageClass = "DEEP_ARCHIVE"
+)
+
+// Values returns all known values for TransitionStorageClass. Note that this can
+// be expanded in the future, and so it is only as up to date as the client. The
+// ordering of this slice is not guaranteed to be stable across updates.
+func (TransitionStorageClass) Values() []TransitionStorageClass {
+ return []TransitionStorageClass{
+ "GLACIER",
+ "STANDARD_IA",
+ "ONEZONE_IA",
+ "INTELLIGENT_TIERING",
+ "DEEP_ARCHIVE",
+ }
+}
+
+type Type string
+
+// Enum values for Type
+const (
+ TypeCanonicalUser Type = "CanonicalUser"
+ TypeAmazonCustomerByEmail Type = "AmazonCustomerByEmail"
+ TypeGroup Type = "Group"
+)
+
+// Values returns all known values for Type. Note that this can be expanded in the
+// future, and so it is only as up to date as the client. The ordering of this
+// slice is not guaranteed to be stable across updates.
+func (Type) Values() []Type {
+ return []Type{
+ "CanonicalUser",
+ "AmazonCustomerByEmail",
+ "Group",
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/types/errors.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/types/errors.go
new file mode 100644
index 000000000..57c599ec3
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/types/errors.go
@@ -0,0 +1,170 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package types
+
+import (
+ "fmt"
+ smithy "github.com/aws/smithy-go"
+)
+
+// The requested bucket name is not available. The bucket namespace is shared by
+// all users of the system. Select a different name and try again.
+type BucketAlreadyExists struct {
+ Message *string
+}
+
+func (e *BucketAlreadyExists) Error() string {
+ return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage())
+}
+func (e *BucketAlreadyExists) ErrorMessage() string {
+ if e.Message == nil {
+ return ""
+ }
+ return *e.Message
+}
+func (e *BucketAlreadyExists) ErrorCode() string { return "BucketAlreadyExists" }
+func (e *BucketAlreadyExists) ErrorFault() smithy.ErrorFault { return smithy.FaultClient }
+
+// The bucket you tried to create already exists, and you own it. Amazon S3 returns
+// this error in all AWS Regions except in the North Virginia Region. For legacy
+// compatibility, if you re-create an existing bucket that you already own in the
+// North Virginia Region, Amazon S3 returns 200 OK and resets the bucket access
+// control lists (ACLs).
+type BucketAlreadyOwnedByYou struct {
+ Message *string
+}
+
+func (e *BucketAlreadyOwnedByYou) Error() string {
+ return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage())
+}
+func (e *BucketAlreadyOwnedByYou) ErrorMessage() string {
+ if e.Message == nil {
+ return ""
+ }
+ return *e.Message
+}
+func (e *BucketAlreadyOwnedByYou) ErrorCode() string { return "BucketAlreadyOwnedByYou" }
+func (e *BucketAlreadyOwnedByYou) ErrorFault() smithy.ErrorFault { return smithy.FaultClient }
+
+// Object is archived and inaccessible until restored.
+type InvalidObjectState struct {
+ Message *string
+
+ StorageClass StorageClass
+ AccessTier IntelligentTieringAccessTier
+}
+
+func (e *InvalidObjectState) Error() string {
+ return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage())
+}
+func (e *InvalidObjectState) ErrorMessage() string {
+ if e.Message == nil {
+ return ""
+ }
+ return *e.Message
+}
+func (e *InvalidObjectState) ErrorCode() string { return "InvalidObjectState" }
+func (e *InvalidObjectState) ErrorFault() smithy.ErrorFault { return smithy.FaultClient }
+
+// The specified bucket does not exist.
+type NoSuchBucket struct {
+ Message *string
+}
+
+func (e *NoSuchBucket) Error() string {
+ return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage())
+}
+func (e *NoSuchBucket) ErrorMessage() string {
+ if e.Message == nil {
+ return ""
+ }
+ return *e.Message
+}
+func (e *NoSuchBucket) ErrorCode() string { return "NoSuchBucket" }
+func (e *NoSuchBucket) ErrorFault() smithy.ErrorFault { return smithy.FaultClient }
+
+// The specified key does not exist.
+type NoSuchKey struct {
+ Message *string
+}
+
+func (e *NoSuchKey) Error() string {
+ return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage())
+}
+func (e *NoSuchKey) ErrorMessage() string {
+ if e.Message == nil {
+ return ""
+ }
+ return *e.Message
+}
+func (e *NoSuchKey) ErrorCode() string { return "NoSuchKey" }
+func (e *NoSuchKey) ErrorFault() smithy.ErrorFault { return smithy.FaultClient }
+
+// The specified multipart upload does not exist.
+type NoSuchUpload struct {
+ Message *string
+}
+
+func (e *NoSuchUpload) Error() string {
+ return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage())
+}
+func (e *NoSuchUpload) ErrorMessage() string {
+ if e.Message == nil {
+ return ""
+ }
+ return *e.Message
+}
+func (e *NoSuchUpload) ErrorCode() string { return "NoSuchUpload" }
+func (e *NoSuchUpload) ErrorFault() smithy.ErrorFault { return smithy.FaultClient }
+
+// The specified content does not exist.
+type NotFound struct {
+ Message *string
+}
+
+func (e *NotFound) Error() string {
+ return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage())
+}
+func (e *NotFound) ErrorMessage() string {
+ if e.Message == nil {
+ return ""
+ }
+ return *e.Message
+}
+func (e *NotFound) ErrorCode() string { return "NotFound" }
+func (e *NotFound) ErrorFault() smithy.ErrorFault { return smithy.FaultClient }
+
+// This operation is not allowed against this storage tier.
+type ObjectAlreadyInActiveTierError struct {
+ Message *string
+}
+
+func (e *ObjectAlreadyInActiveTierError) Error() string {
+ return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage())
+}
+func (e *ObjectAlreadyInActiveTierError) ErrorMessage() string {
+ if e.Message == nil {
+ return ""
+ }
+ return *e.Message
+}
+func (e *ObjectAlreadyInActiveTierError) ErrorCode() string { return "ObjectAlreadyInActiveTierError" }
+func (e *ObjectAlreadyInActiveTierError) ErrorFault() smithy.ErrorFault { return smithy.FaultClient }
+
+// The source object of the COPY operation is not in the active tier and is only
+// stored in Amazon S3 Glacier.
+type ObjectNotInActiveTierError struct {
+ Message *string
+}
+
+func (e *ObjectNotInActiveTierError) Error() string {
+ return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage())
+}
+func (e *ObjectNotInActiveTierError) ErrorMessage() string {
+ if e.Message == nil {
+ return ""
+ }
+ return *e.Message
+}
+func (e *ObjectNotInActiveTierError) ErrorCode() string { return "ObjectNotInActiveTierError" }
+func (e *ObjectNotInActiveTierError) ErrorFault() smithy.ErrorFault { return smithy.FaultClient }
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/types/types.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/types/types.go
new file mode 100644
index 000000000..fca2d4c04
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/types/types.go
@@ -0,0 +1,3081 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package types
+
+import (
+ "time"
+)
+
+// Specifies the days since the initiation of an incomplete multipart upload that
+// Amazon S3 will wait before permanently removing all parts of the upload. For
+// more information, see Aborting Incomplete Multipart Uploads Using a Bucket
+// Lifecycle Policy
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config)
+// in the Amazon Simple Storage Service Developer Guide.
+type AbortIncompleteMultipartUpload struct {
+
+ // Specifies the number of days after which Amazon S3 aborts an incomplete
+ // multipart upload.
+ DaysAfterInitiation int32
+}
+
+// Configures the transfer acceleration state for an Amazon S3 bucket. For more
+// information, see Amazon S3 Transfer Acceleration
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html) in
+// the Amazon Simple Storage Service Developer Guide.
+type AccelerateConfiguration struct {
+
+ // Specifies the transfer acceleration status of the bucket.
+ Status BucketAccelerateStatus
+}
+
+// Contains the elements that set the ACL permissions for an object per grantee.
+type AccessControlPolicy struct {
+
+ // A list of grants.
+ Grants []Grant
+
+ // Container for the bucket owner's display name and ID.
+ Owner *Owner
+}
+
+// A container for information about access control for replicas.
+type AccessControlTranslation struct {
+
+ // Specifies the replica ownership. For default and valid values, see PUT bucket
+ // replication
+ // (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTreplication.html)
+ // in the Amazon Simple Storage Service API Reference.
+ //
+ // This member is required.
+ Owner OwnerOverride
+}
+
+// A conjunction (logical AND) of predicates, which is used in evaluating a metrics
+// filter. The operator must have at least two predicates in any combination, and
+// an object must match all of the predicates for the filter to apply.
+type AnalyticsAndOperator struct {
+
+ // The prefix to use when evaluating an AND predicate: The prefix that an object
+ // must have to be included in the metrics results.
+ Prefix *string
+
+ // The list of tags to use when evaluating an AND predicate.
+ Tags []Tag
+}
+
+// Specifies the configuration and any analyses for the analytics filter of an
+// Amazon S3 bucket.
+type AnalyticsConfiguration struct {
+
+ // The ID that identifies the analytics configuration.
+ //
+ // This member is required.
+ Id *string
+
+ // Contains data related to access patterns to be collected and made available to
+ // analyze the tradeoffs between different storage classes.
+ //
+ // This member is required.
+ StorageClassAnalysis *StorageClassAnalysis
+
+ // The filter used to describe a set of objects for analyses. A filter must have
+ // exactly one prefix, one tag, or one conjunction (AnalyticsAndOperator). If no
+ // filter is provided, all objects will be considered in any analysis.
+ Filter AnalyticsFilter
+}
+
+// Where to publish the analytics results.
+type AnalyticsExportDestination struct {
+
+ // A destination signifying output to an S3 bucket.
+ //
+ // This member is required.
+ S3BucketDestination *AnalyticsS3BucketDestination
+}
+
+// The filter used to describe a set of objects for analyses. A filter must have
+// exactly one prefix, one tag, or one conjunction (AnalyticsAndOperator). If no
+// filter is provided, all objects will be considered in any analysis.
+//
+// The following types satisfy this interface:
+// AnalyticsFilterMemberPrefix
+// AnalyticsFilterMemberTag
+// AnalyticsFilterMemberAnd
+type AnalyticsFilter interface {
+ isAnalyticsFilter()
+}
+
+// The prefix to use when evaluating an analytics filter.
+type AnalyticsFilterMemberPrefix struct {
+ Value string
+}
+
+func (*AnalyticsFilterMemberPrefix) isAnalyticsFilter() {}
+
+// The tag to use when evaluating an analytics filter.
+type AnalyticsFilterMemberTag struct {
+ Value Tag
+}
+
+func (*AnalyticsFilterMemberTag) isAnalyticsFilter() {}
+
+// A conjunction (logical AND) of predicates, which is used in evaluating an
+// analytics filter. The operator must have at least two predicates.
+type AnalyticsFilterMemberAnd struct {
+ Value AnalyticsAndOperator
+}
+
+func (*AnalyticsFilterMemberAnd) isAnalyticsFilter() {}
+
+// Contains information about where to publish the analytics results.
+type AnalyticsS3BucketDestination struct {
+
+ // The Amazon Resource Name (ARN) of the bucket to which data is exported.
+ //
+ // This member is required.
+ Bucket *string
+
+ // Specifies the file format used when exporting data to Amazon S3.
+ //
+ // This member is required.
+ Format AnalyticsS3ExportFileFormat
+
+ // The account ID that owns the destination S3 bucket. If no account ID is
+ // provided, the owner is not validated before exporting data. Although this value
+ // is optional, we strongly recommend that you set it to help prevent problems if
+ // the destination bucket ownership changes.
+ BucketAccountId *string
+
+ // The prefix to use when exporting data. The prefix is prepended to all results.
+ Prefix *string
+}
+
+// In terms of implementation, a Bucket is a resource. An Amazon S3 bucket name is
+// globally unique, and the namespace is shared by all AWS accounts.
+type Bucket struct {
+
+ // Date the bucket was created. This date can change when making changes to your
+ // bucket, such as editing its bucket policy.
+ CreationDate *time.Time
+
+ // The name of the bucket.
+ Name *string
+}
+
+// Specifies the lifecycle configuration for objects in an Amazon S3 bucket. For
+// more information, see Object Lifecycle Management
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html) in
+// the Amazon Simple Storage Service Developer Guide.
+type BucketLifecycleConfiguration struct {
+
+ // A lifecycle rule for individual objects in an Amazon S3 bucket.
+ //
+ // This member is required.
+ Rules []LifecycleRule
+}
+
+// Container for logging status information.
+type BucketLoggingStatus struct {
+
+ // Describes where logs are stored and the prefix that Amazon S3 assigns to all log
+ // object keys for a bucket. For more information, see PUT Bucket logging
+ // (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTlogging.html) in
+ // the Amazon Simple Storage Service API Reference.
+ LoggingEnabled *LoggingEnabled
+}
+
+// Container for all (if there are any) keys between Prefix and the next occurrence
+// of the string specified by a delimiter. CommonPrefixes lists keys that act like
+// subdirectories in the directory specified by Prefix. For example, if the prefix
+// is notes/ and the delimiter is a slash (/) as in notes/summer/july, the common
+// prefix is notes/summer/.
+type CommonPrefix struct {
+
+ // Container for the specified common prefix.
+ Prefix *string
+}
+
+// The container for the completed multipart upload details.
+type CompletedMultipartUpload struct {
+
+ // Array of CompletedPart data types.
+ Parts []CompletedPart
+}
+
+// Details of the parts that were uploaded.
+type CompletedPart struct {
+
+ // Entity tag returned when the part was uploaded.
+ ETag *string
+
+ // Part number that identifies the part. This is a positive integer between 1 and
+ // 10,000.
+ PartNumber int32
+}
+
+// A container for describing a condition that must be met for the specified
+// redirect to apply. For example, 1. If request is for pages in the /docs folder,
+// redirect to the /documents folder. 2. If request results in HTTP error 4xx,
+// redirect request to another host where you might process the error.
+type Condition struct {
+
+ // The HTTP error code when the redirect is applied. In the event of an error, if
+ // the error code equals this value, then the specified redirect is applied.
+ // Required when parent element Condition is specified and sibling KeyPrefixEquals
+ // is not specified. If both are specified, then both must be true for the redirect
+ // to be applied.
+ HttpErrorCodeReturnedEquals *string
+
+ // The object key name prefix when the redirect is applied. For example, to
+ // redirect requests for ExamplePage.html, the key prefix will be ExamplePage.html.
+ // To redirect request for all pages with the prefix docs/, the key prefix will be
+ // /docs, which identifies all objects in the docs/ folder. Required when the
+ // parent element Condition is specified and sibling HttpErrorCodeReturnedEquals is
+ // not specified. If both conditions are specified, both must be true for the
+ // redirect to be applied.
+ KeyPrefixEquals *string
+}
+
+// Container for all response elements.
+type CopyObjectResult struct {
+
+ // Returns the ETag of the new object. The ETag reflects only changes to the
+ // contents of an object, not its metadata. The source and destination ETag is
+ // identical for a successfully copied object.
+ ETag *string
+
+ // Returns the date that the object was last modified.
+ LastModified *time.Time
+}
+
+// Container for all response elements.
+type CopyPartResult struct {
+
+ // Entity tag of the object.
+ ETag *string
+
+ // Date and time at which the object was uploaded.
+ LastModified *time.Time
+}
+
+// Describes the cross-origin access configuration for objects in an Amazon S3
+// bucket. For more information, see Enabling Cross-Origin Resource Sharing
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html) in the Amazon Simple
+// Storage Service Developer Guide.
+type CORSConfiguration struct {
+
+ // A set of origins and methods (cross-origin access that you want to allow). You
+ // can add up to 100 rules to the configuration.
+ //
+ // This member is required.
+ CORSRules []CORSRule
+}
+
+// Specifies a cross-origin access rule for an Amazon S3 bucket.
+type CORSRule struct {
+
+ // An HTTP method that you allow the origin to execute. Valid values are GET, PUT,
+ // HEAD, POST, and DELETE.
+ //
+ // This member is required.
+ AllowedMethods []string
+
+ // One or more origins you want customers to be able to access the bucket from.
+ //
+ // This member is required.
+ AllowedOrigins []string
+
+ // Headers that are specified in the Access-Control-Request-Headers header. These
+ // headers are allowed in a preflight OPTIONS request. In response to any preflight
+ // OPTIONS request, Amazon S3 returns any requested headers that are allowed.
+ AllowedHeaders []string
+
+ // One or more headers in the response that you want customers to be able to access
+ // from their applications (for example, from a JavaScript XMLHttpRequest object).
+ ExposeHeaders []string
+
+ // The time in seconds that your browser is to cache the preflight response for the
+ // specified resource.
+ MaxAgeSeconds int32
+}
+
+// The configuration information for the bucket.
+type CreateBucketConfiguration struct {
+
+ // Specifies the Region where the bucket will be created. If you don't specify a
+ // Region, the bucket is created in the US East (N. Virginia) Region (us-east-1).
+ LocationConstraint BucketLocationConstraint
+}
+
+// Describes how an uncompressed comma-separated values (CSV)-formatted input
+// object is formatted.
+type CSVInput struct {
+
+ // Specifies that CSV field values may contain quoted record delimiters and such
+ // records should be allowed. Default value is FALSE. Setting this value to TRUE
+ // may lower performance.
+ AllowQuotedRecordDelimiter bool
+
+ // A single character used to indicate that a row should be ignored when the
+ // character is present at the start of that row. You can specify any character to
+ // indicate a comment line.
+ Comments *string
+
+ // A single character used to separate individual fields in a record. You can
+ // specify an arbitrary delimiter.
+ FieldDelimiter *string
+
+ // Describes the first line of input. Valid values are:
+ //
+ // * NONE: First line is not
+ // a header.
+ //
+ // * IGNORE: First line is a header, but you can't use the header values
+ // to indicate the column in an expression. You can use column position (such as
+ // _1, _2, …) to indicate the column (SELECT s._1 FROM OBJECT s).
+ //
+ // * Use: First
+ // line is a header, and you can use the header value to identify a column in an
+ // expression (SELECT "name" FROM OBJECT).
+ FileHeaderInfo FileHeaderInfo
+
+ // A single character used for escaping when the field delimiter is part of the
+ // value. For example, if the value is a, b, Amazon S3 wraps this field value in
+ // quotation marks, as follows: " a , b ". Type: String Default: " Ancestors: CSV
+ QuoteCharacter *string
+
+ // A single character used for escaping the quotation mark character inside an
+ // already escaped value. For example, the value """ a , b """ is parsed as " a , b
+ // ".
+ QuoteEscapeCharacter *string
+
+ // A single character used to separate individual records in the input. Instead of
+ // the default value, you can specify an arbitrary delimiter.
+ RecordDelimiter *string
+}
+
+// Describes how uncompressed comma-separated values (CSV)-formatted results are
+// formatted.
+type CSVOutput struct {
+
+ // The value used to separate individual fields in a record. You can specify an
+ // arbitrary delimiter.
+ FieldDelimiter *string
+
+ // A single character used for escaping when the field delimiter is part of the
+ // value. For example, if the value is a, b, Amazon S3 wraps this field value in
+ // quotation marks, as follows: " a , b ".
+ QuoteCharacter *string
+
+ // The single character used for escaping the quote character inside an already
+ // escaped value.
+ QuoteEscapeCharacter *string
+
+ // Indicates whether to use quotation marks around output fields.
+ //
+ // * ALWAYS: Always
+ // use quotation marks for output fields.
+ //
+ // * ASNEEDED: Use quotation marks for
+ // output fields when needed.
+ QuoteFields QuoteFields
+
+ // A single character used to separate individual records in the output. Instead of
+ // the default value, you can specify an arbitrary delimiter.
+ RecordDelimiter *string
+}
+
+// The container element for specifying the default Object Lock retention settings
+// for new objects placed in the specified bucket.
+type DefaultRetention struct {
+
+ // The number of days that you want to specify for the default retention period.
+ Days int32
+
+ // The default Object Lock retention mode you want to apply to new objects placed
+ // in the specified bucket.
+ Mode ObjectLockRetentionMode
+
+ // The number of years that you want to specify for the default retention period.
+ Years int32
+}
+
+// Container for the objects to delete.
+type Delete struct {
+
+ // The objects to delete.
+ //
+ // This member is required.
+ Objects []ObjectIdentifier
+
+ // Element to enable quiet mode for the request. When you add this element, you
+ // must set its value to true.
+ Quiet bool
+}
+
+// Information about the deleted object.
+type DeletedObject struct {
+
+ // Specifies whether the versioned object that was permanently deleted was (true)
+ // or was not (false) a delete marker. In a simple DELETE, this header indicates
+ // whether (true) or not (false) a delete marker was created.
+ DeleteMarker bool
+
+ // The version ID of the delete marker created as a result of the DELETE operation.
+ // If you delete a specific object version, the value returned by this header is
+ // the version ID of the object version deleted.
+ DeleteMarkerVersionId *string
+
+ // The name of the deleted object.
+ Key *string
+
+ // The version ID of the deleted object.
+ VersionId *string
+}
+
+// Information about the delete marker.
+type DeleteMarkerEntry struct {
+
+ // Specifies whether the object is (true) or is not (false) the latest version of
+ // an object.
+ IsLatest bool
+
+ // The object key.
+ Key *string
+
+ // Date and time the object was last modified.
+ LastModified *time.Time
+
+ // The account that created the delete marker.>
+ Owner *Owner
+
+ // Version ID of an object.
+ VersionId *string
+}
+
+// Specifies whether Amazon S3 replicates delete markers. If you specify a Filter
+// in your replication configuration, you must also include a
+// DeleteMarkerReplication element. If your Filter includes a Tag element, the
+// DeleteMarkerReplicationStatus must be set to Disabled, because Amazon S3 does
+// not support replicating delete markers for tag-based rules. For an example
+// configuration, see Basic Rule Configuration
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html#replication-config-min-rule-config).
+// For more information about delete marker replication, see Basic Rule
+// Configuration
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/delete-marker-replication.html).
+// If you are using an earlier version of the replication configuration, Amazon S3
+// handles replication of delete markers differently. For more information, see
+// Backward Compatibility
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html#replication-backward-compat-considerations).
+type DeleteMarkerReplication struct {
+
+ // Indicates whether to replicate delete markers. Indicates whether to replicate
+ // delete markers.
+ Status DeleteMarkerReplicationStatus
+}
+
+// Specifies information about where to publish analysis or configuration results
+// for an Amazon S3 bucket and S3 Replication Time Control (S3 RTC).
+type Destination struct {
+
+ // The Amazon Resource Name (ARN) of the bucket where you want Amazon S3 to store
+ // the results.
+ //
+ // This member is required.
+ Bucket *string
+
+ // Specify this only in a cross-account scenario (where source and destination
+ // bucket owners are not the same), and you want to change replica ownership to the
+ // AWS account that owns the destination bucket. If this is not specified in the
+ // replication configuration, the replicas are owned by same AWS account that owns
+ // the source object.
+ AccessControlTranslation *AccessControlTranslation
+
+ // Destination bucket owner account ID. In a cross-account scenario, if you direct
+ // Amazon S3 to change replica ownership to the AWS account that owns the
+ // destination bucket by specifying the AccessControlTranslation property, this is
+ // the account ID of the destination bucket owner. For more information, see
+ // Replication Additional Configuration: Changing the Replica Owner
+ // (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-change-owner.html)
+ // in the Amazon Simple Storage Service Developer Guide.
+ Account *string
+
+ // A container that provides information about encryption. If
+ // SourceSelectionCriteria is specified, you must specify this element.
+ EncryptionConfiguration *EncryptionConfiguration
+
+ // A container specifying replication metrics-related settings enabling replication
+ // metrics and events.
+ Metrics *Metrics
+
+ // A container specifying S3 Replication Time Control (S3 RTC), including whether
+ // S3 RTC is enabled and the time when all objects and operations on objects must
+ // be replicated. Must be specified together with a Metrics block.
+ ReplicationTime *ReplicationTime
+
+ // The storage class to use when replicating objects, such as S3 Standard or
+ // reduced redundancy. By default, Amazon S3 uses the storage class of the source
+ // object to create the object replica. For valid values, see the StorageClass
+ // element of the PUT Bucket replication
+ // (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTreplication.html)
+ // action in the Amazon Simple Storage Service API Reference.
+ StorageClass StorageClass
+}
+
+// Contains the type of server-side encryption used.
+type Encryption struct {
+
+ // The server-side encryption algorithm used when storing job results in Amazon S3
+ // (for example, AES256, aws:kms).
+ //
+ // This member is required.
+ EncryptionType ServerSideEncryption
+
+ // If the encryption type is aws:kms, this optional value can be used to specify
+ // the encryption context for the restore results.
+ KMSContext *string
+
+ // If the encryption type is aws:kms, this optional value specifies the ID of the
+ // symmetric customer managed AWS KMS CMK to use for encryption of job results.
+ // Amazon S3 only supports symmetric CMKs. For more information, see Using
+ // Symmetric and Asymmetric Keys
+ // (https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html)
+ // in the AWS Key Management Service Developer Guide.
+ KMSKeyId *string
+}
+
+// Specifies encryption-related information for an Amazon S3 bucket that is a
+// destination for replicated objects.
+type EncryptionConfiguration struct {
+
+ // Specifies the ID (Key ARN or Alias ARN) of the customer managed customer master
+ // key (CMK) stored in AWS Key Management Service (KMS) for the destination bucket.
+ // Amazon S3 uses this key to encrypt replica objects. Amazon S3 only supports
+ // symmetric customer managed CMKs. For more information, see Using Symmetric and
+ // Asymmetric Keys
+ // (https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html)
+ // in the AWS Key Management Service Developer Guide.
+ ReplicaKmsKeyID *string
+}
+
+// Container for all error elements.
+type Error struct {
+
+ // The error code is a string that uniquely identifies an error condition. It is
+ // meant to be read and understood by programs that detect and handle errors by
+ // type. Amazon S3 error codes
+ //
+ // * Code: AccessDenied
+ //
+ // * Description: Access
+ // Denied
+ //
+ // * HTTP Status Code: 403 Forbidden
+ //
+ // * SOAP Fault Code Prefix: Client
+ //
+ // *
+ // Code: AccountProblem
+ //
+ // * Description: There is a problem with your AWS account
+ // that prevents the operation from completing successfully. Contact AWS Support
+ // for further assistance.
+ //
+ // * HTTP Status Code: 403 Forbidden
+ //
+ // * SOAP Fault Code
+ // Prefix: Client
+ //
+ // * Code: AllAccessDisabled
+ //
+ // * Description: All access to this
+ // Amazon S3 resource has been disabled. Contact AWS Support for further
+ // assistance.
+ //
+ // * HTTP Status Code: 403 Forbidden
+ //
+ // * SOAP Fault Code Prefix:
+ // Client
+ //
+ // * Code: AmbiguousGrantByEmailAddress
+ //
+ // * Description: The email address
+ // you provided is associated with more than one account.
+ //
+ // * HTTP Status Code: 400
+ // Bad Request
+ //
+ // * SOAP Fault Code Prefix: Client
+ //
+ // * Code:
+ // AuthorizationHeaderMalformed
+ //
+ // * Description: The authorization header you
+ // provided is invalid.
+ //
+ // * HTTP Status Code: 400 Bad Request
+ //
+ // * HTTP Status Code:
+ // N/A
+ //
+ // * Code: BadDigest
+ //
+ // * Description: The Content-MD5 you specified did not
+ // match what we received.
+ //
+ // * HTTP Status Code: 400 Bad Request
+ //
+ // * SOAP Fault Code
+ // Prefix: Client
+ //
+ // * Code: BucketAlreadyExists
+ //
+ // * Description: The requested bucket
+ // name is not available. The bucket namespace is shared by all users of the
+ // system. Please select a different name and try again.
+ //
+ // * HTTP Status Code: 409
+ // Conflict
+ //
+ // * SOAP Fault Code Prefix: Client
+ //
+ // * Code: BucketAlreadyOwnedByYou
+ //
+ // *
+ // Description: The bucket you tried to create already exists, and you own it.
+ // Amazon S3 returns this error in all AWS Regions except in the North Virginia
+ // Region. For legacy compatibility, if you re-create an existing bucket that you
+ // already own in the North Virginia Region, Amazon S3 returns 200 OK and resets
+ // the bucket access control lists (ACLs).
+ //
+ // * Code: 409 Conflict (in all Regions
+ // except the North Virginia Region)
+ //
+ // * SOAP Fault Code Prefix: Client
+ //
+ // * Code:
+ // BucketNotEmpty
+ //
+ // * Description: The bucket you tried to delete is not empty.
+ //
+ // *
+ // HTTP Status Code: 409 Conflict
+ //
+ // * SOAP Fault Code Prefix: Client
+ //
+ // * Code:
+ // CredentialsNotSupported
+ //
+ // * Description: This request does not support
+ // credentials.
+ //
+ // * HTTP Status Code: 400 Bad Request
+ //
+ // * SOAP Fault Code Prefix:
+ // Client
+ //
+ // * Code: CrossLocationLoggingProhibited
+ //
+ // * Description: Cross-location
+ // logging not allowed. Buckets in one geographic location cannot log information
+ // to a bucket in another location.
+ //
+ // * HTTP Status Code: 403 Forbidden
+ //
+ // * SOAP
+ // Fault Code Prefix: Client
+ //
+ // * Code: EntityTooSmall
+ //
+ // * Description: Your proposed
+ // upload is smaller than the minimum allowed object size.
+ //
+ // * HTTP Status Code: 400
+ // Bad Request
+ //
+ // * SOAP Fault Code Prefix: Client
+ //
+ // * Code: EntityTooLarge
+ //
+ // *
+ // Description: Your proposed upload exceeds the maximum allowed object size.
+ //
+ // *
+ // HTTP Status Code: 400 Bad Request
+ //
+ // * SOAP Fault Code Prefix: Client
+ //
+ // * Code:
+ // ExpiredToken
+ //
+ // * Description: The provided token has expired.
+ //
+ // * HTTP Status
+ // Code: 400 Bad Request
+ //
+ // * SOAP Fault Code Prefix: Client
+ //
+ // * Code:
+ // IllegalVersioningConfigurationException
+ //
+ // * Description: Indicates that the
+ // versioning configuration specified in the request is invalid.
+ //
+ // * HTTP Status
+ // Code: 400 Bad Request
+ //
+ // * SOAP Fault Code Prefix: Client
+ //
+ // * Code:
+ // IncompleteBody
+ //
+ // * Description: You did not provide the number of bytes specified
+ // by the Content-Length HTTP header
+ //
+ // * HTTP Status Code: 400 Bad Request
+ //
+ // * SOAP
+ // Fault Code Prefix: Client
+ //
+ // * Code: IncorrectNumberOfFilesInPostRequest
+ //
+ // *
+ // Description: POST requires exactly one file upload per request.
+ //
+ // * HTTP Status
+ // Code: 400 Bad Request
+ //
+ // * SOAP Fault Code Prefix: Client
+ //
+ // * Code:
+ // InlineDataTooLarge
+ //
+ // * Description: Inline data exceeds the maximum allowed
+ // size.
+ //
+ // * HTTP Status Code: 400 Bad Request
+ //
+ // * SOAP Fault Code Prefix: Client
+ //
+ // *
+ // Code: InternalError
+ //
+ // * Description: We encountered an internal error. Please try
+ // again.
+ //
+ // * HTTP Status Code: 500 Internal Server Error
+ //
+ // * SOAP Fault Code Prefix:
+ // Server
+ //
+ // * Code: InvalidAccessKeyId
+ //
+ // * Description: The AWS access key ID you
+ // provided does not exist in our records.
+ //
+ // * HTTP Status Code: 403 Forbidden
+ //
+ // *
+ // SOAP Fault Code Prefix: Client
+ //
+ // * Code: InvalidAddressingHeader
+ //
+ // * Description:
+ // You must specify the Anonymous role.
+ //
+ // * HTTP Status Code: N/A
+ //
+ // * SOAP Fault Code
+ // Prefix: Client
+ //
+ // * Code: InvalidArgument
+ //
+ // * Description: Invalid Argument
+ //
+ // * HTTP
+ // Status Code: 400 Bad Request
+ //
+ // * SOAP Fault Code Prefix: Client
+ //
+ // * Code:
+ // InvalidBucketName
+ //
+ // * Description: The specified bucket is not valid.
+ //
+ // * HTTP
+ // Status Code: 400 Bad Request
+ //
+ // * SOAP Fault Code Prefix: Client
+ //
+ // * Code:
+ // InvalidBucketState
+ //
+ // * Description: The request is not valid with the current
+ // state of the bucket.
+ //
+ // * HTTP Status Code: 409 Conflict
+ //
+ // * SOAP Fault Code
+ // Prefix: Client
+ //
+ // * Code: InvalidDigest
+ //
+ // * Description: The Content-MD5 you
+ // specified is not valid.
+ //
+ // * HTTP Status Code: 400 Bad Request
+ //
+ // * SOAP Fault Code
+ // Prefix: Client
+ //
+ // * Code: InvalidEncryptionAlgorithmError
+ //
+ // * Description: The
+ // encryption request you specified is not valid. The valid value is AES256.
+ //
+ // *
+ // HTTP Status Code: 400 Bad Request
+ //
+ // * SOAP Fault Code Prefix: Client
+ //
+ // * Code:
+ // InvalidLocationConstraint
+ //
+ // * Description: The specified location constraint is
+ // not valid. For more information about Regions, see How to Select a Region for
+ // Your Buckets
+ // (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html#access-bucket-intro).
+ //
+ // *
+ // HTTP Status Code: 400 Bad Request
+ //
+ // * SOAP Fault Code Prefix: Client
+ //
+ // * Code:
+ // InvalidObjectState
+ //
+ // * Description: The operation is not valid for the current
+ // state of the object.
+ //
+ // * HTTP Status Code: 403 Forbidden
+ //
+ // * SOAP Fault Code
+ // Prefix: Client
+ //
+ // * Code: InvalidPart
+ //
+ // * Description: One or more of the specified
+ // parts could not be found. The part might not have been uploaded, or the
+ // specified entity tag might not have matched the part's entity tag.
+ //
+ // * HTTP
+ // Status Code: 400 Bad Request
+ //
+ // * SOAP Fault Code Prefix: Client
+ //
+ // * Code:
+ // InvalidPartOrder
+ //
+ // * Description: The list of parts was not in ascending order.
+ // Parts list must be specified in order by part number.
+ //
+ // * HTTP Status Code: 400
+ // Bad Request
+ //
+ // * SOAP Fault Code Prefix: Client
+ //
+ // * Code: InvalidPayer
+ //
+ // *
+ // Description: All access to this object has been disabled. Please contact AWS
+ // Support for further assistance.
+ //
+ // * HTTP Status Code: 403 Forbidden
+ //
+ // * SOAP Fault
+ // Code Prefix: Client
+ //
+ // * Code: InvalidPolicyDocument
+ //
+ // * Description: The content
+ // of the form does not meet the conditions specified in the policy document.
+ //
+ // *
+ // HTTP Status Code: 400 Bad Request
+ //
+ // * SOAP Fault Code Prefix: Client
+ //
+ // * Code:
+ // InvalidRange
+ //
+ // * Description: The requested range cannot be satisfied.
+ //
+ // * HTTP
+ // Status Code: 416 Requested Range Not Satisfiable
+ //
+ // * SOAP Fault Code Prefix:
+ // Client
+ //
+ // * Code: InvalidRequest
+ //
+ // * Description: Please use AWS4-HMAC-SHA256.
+ //
+ // *
+ // HTTP Status Code: 400 Bad Request
+ //
+ // * Code: N/A
+ //
+ // * Code: InvalidRequest
+ //
+ // *
+ // Description: SOAP requests must be made over an HTTPS connection.
+ //
+ // * HTTP Status
+ // Code: 400 Bad Request
+ //
+ // * SOAP Fault Code Prefix: Client
+ //
+ // * Code:
+ // InvalidRequest
+ //
+ // * Description: Amazon S3 Transfer Acceleration is not supported
+ // for buckets with non-DNS compliant names.
+ //
+ // * HTTP Status Code: 400 Bad
+ // Request
+ //
+ // * Code: N/A
+ //
+ // * Code: InvalidRequest
+ //
+ // * Description: Amazon S3 Transfer
+ // Acceleration is not supported for buckets with periods (.) in their names.
+ //
+ // *
+ // HTTP Status Code: 400 Bad Request
+ //
+ // * Code: N/A
+ //
+ // * Code: InvalidRequest
+ //
+ // *
+ // Description: Amazon S3 Transfer Accelerate endpoint only supports virtual style
+ // requests.
+ //
+ // * HTTP Status Code: 400 Bad Request
+ //
+ // * Code: N/A
+ //
+ // * Code:
+ // InvalidRequest
+ //
+ // * Description: Amazon S3 Transfer Accelerate is not configured
+ // on this bucket.
+ //
+ // * HTTP Status Code: 400 Bad Request
+ //
+ // * Code: N/A
+ //
+ // * Code:
+ // InvalidRequest
+ //
+ // * Description: Amazon S3 Transfer Accelerate is disabled on this
+ // bucket.
+ //
+ // * HTTP Status Code: 400 Bad Request
+ //
+ // * Code: N/A
+ //
+ // * Code:
+ // InvalidRequest
+ //
+ // * Description: Amazon S3 Transfer Acceleration is not supported
+ // on this bucket. Contact AWS Support for more information.
+ //
+ // * HTTP Status Code:
+ // 400 Bad Request
+ //
+ // * Code: N/A
+ //
+ // * Code: InvalidRequest
+ //
+ // * Description: Amazon S3
+ // Transfer Acceleration cannot be enabled on this bucket. Contact AWS Support for
+ // more information.
+ //
+ // * HTTP Status Code: 400 Bad Request
+ //
+ // * Code: N/A
+ //
+ // * Code:
+ // InvalidSecurity
+ //
+ // * Description: The provided security credentials are not
+ // valid.
+ //
+ // * HTTP Status Code: 403 Forbidden
+ //
+ // * SOAP Fault Code Prefix: Client
+ //
+ // *
+ // Code: InvalidSOAPRequest
+ //
+ // * Description: The SOAP request body is invalid.
+ //
+ // *
+ // HTTP Status Code: 400 Bad Request
+ //
+ // * SOAP Fault Code Prefix: Client
+ //
+ // * Code:
+ // InvalidStorageClass
+ //
+ // * Description: The storage class you specified is not
+ // valid.
+ //
+ // * HTTP Status Code: 400 Bad Request
+ //
+ // * SOAP Fault Code Prefix: Client
+ //
+ // *
+ // Code: InvalidTargetBucketForLogging
+ //
+ // * Description: The target bucket for
+ // logging does not exist, is not owned by you, or does not have the appropriate
+ // grants for the log-delivery group.
+ //
+ // * HTTP Status Code: 400 Bad Request
+ //
+ // * SOAP
+ // Fault Code Prefix: Client
+ //
+ // * Code: InvalidToken
+ //
+ // * Description: The provided
+ // token is malformed or otherwise invalid.
+ //
+ // * HTTP Status Code: 400 Bad Request
+ //
+ // *
+ // SOAP Fault Code Prefix: Client
+ //
+ // * Code: InvalidURI
+ //
+ // * Description: Couldn't
+ // parse the specified URI.
+ //
+ // * HTTP Status Code: 400 Bad Request
+ //
+ // * SOAP Fault Code
+ // Prefix: Client
+ //
+ // * Code: KeyTooLongError
+ //
+ // * Description: Your key is too long.
+ //
+ // *
+ // HTTP Status Code: 400 Bad Request
+ //
+ // * SOAP Fault Code Prefix: Client
+ //
+ // * Code:
+ // MalformedACLError
+ //
+ // * Description: The XML you provided was not well-formed or
+ // did not validate against our published schema.
+ //
+ // * HTTP Status Code: 400 Bad
+ // Request
+ //
+ // * SOAP Fault Code Prefix: Client
+ //
+ // * Code: MalformedPOSTRequest
+ //
+ // *
+ // Description: The body of your POST request is not well-formed
+ // multipart/form-data.
+ //
+ // * HTTP Status Code: 400 Bad Request
+ //
+ // * SOAP Fault Code
+ // Prefix: Client
+ //
+ // * Code: MalformedXML
+ //
+ // * Description: This happens when the user
+ // sends malformed XML (XML that doesn't conform to the published XSD) for the
+ // configuration. The error message is, "The XML you provided was not well-formed
+ // or did not validate against our published schema."
+ //
+ // * HTTP Status Code: 400 Bad
+ // Request
+ //
+ // * SOAP Fault Code Prefix: Client
+ //
+ // * Code: MaxMessageLengthExceeded
+ //
+ // *
+ // Description: Your request was too big.
+ //
+ // * HTTP Status Code: 400 Bad Request
+ //
+ // *
+ // SOAP Fault Code Prefix: Client
+ //
+ // * Code: MaxPostPreDataLengthExceededError
+ //
+ // *
+ // Description: Your POST request fields preceding the upload file were too
+ // large.
+ //
+ // * HTTP Status Code: 400 Bad Request
+ //
+ // * SOAP Fault Code Prefix: Client
+ //
+ // *
+ // Code: MetadataTooLarge
+ //
+ // * Description: Your metadata headers exceed the maximum
+ // allowed metadata size.
+ //
+ // * HTTP Status Code: 400 Bad Request
+ //
+ // * SOAP Fault Code
+ // Prefix: Client
+ //
+ // * Code: MethodNotAllowed
+ //
+ // * Description: The specified method is
+ // not allowed against this resource.
+ //
+ // * HTTP Status Code: 405 Method Not
+ // Allowed
+ //
+ // * SOAP Fault Code Prefix: Client
+ //
+ // * Code: MissingAttachment
+ //
+ // *
+ // Description: A SOAP attachment was expected, but none were found.
+ //
+ // * HTTP Status
+ // Code: N/A
+ //
+ // * SOAP Fault Code Prefix: Client
+ //
+ // * Code: MissingContentLength
+ //
+ // *
+ // Description: You must provide the Content-Length HTTP header.
+ //
+ // * HTTP Status
+ // Code: 411 Length Required
+ //
+ // * SOAP Fault Code Prefix: Client
+ //
+ // * Code:
+ // MissingRequestBodyError
+ //
+ // * Description: This happens when the user sends an
+ // empty XML document as a request. The error message is, "Request body is
+ // empty."
+ //
+ // * HTTP Status Code: 400 Bad Request
+ //
+ // * SOAP Fault Code Prefix:
+ // Client
+ //
+ // * Code: MissingSecurityElement
+ //
+ // * Description: The SOAP 1.1 request is
+ // missing a security element.
+ //
+ // * HTTP Status Code: 400 Bad Request
+ //
+ // * SOAP Fault
+ // Code Prefix: Client
+ //
+ // * Code: MissingSecurityHeader
+ //
+ // * Description: Your request
+ // is missing a required header.
+ //
+ // * HTTP Status Code: 400 Bad Request
+ //
+ // * SOAP Fault
+ // Code Prefix: Client
+ //
+ // * Code: NoLoggingStatusForKey
+ //
+ // * Description: There is no
+ // such thing as a logging status subresource for a key.
+ //
+ // * HTTP Status Code: 400
+ // Bad Request
+ //
+ // * SOAP Fault Code Prefix: Client
+ //
+ // * Code: NoSuchBucket
+ //
+ // *
+ // Description: The specified bucket does not exist.
+ //
+ // * HTTP Status Code: 404 Not
+ // Found
+ //
+ // * SOAP Fault Code Prefix: Client
+ //
+ // * Code: NoSuchBucketPolicy
+ //
+ // *
+ // Description: The specified bucket does not have a bucket policy.
+ //
+ // * HTTP Status
+ // Code: 404 Not Found
+ //
+ // * SOAP Fault Code Prefix: Client
+ //
+ // * Code: NoSuchKey
+ //
+ // *
+ // Description: The specified key does not exist.
+ //
+ // * HTTP Status Code: 404 Not
+ // Found
+ //
+ // * SOAP Fault Code Prefix: Client
+ //
+ // * Code: NoSuchLifecycleConfiguration
+ //
+ // *
+ // Description: The lifecycle configuration does not exist.
+ //
+ // * HTTP Status Code:
+ // 404 Not Found
+ //
+ // * SOAP Fault Code Prefix: Client
+ //
+ // * Code: NoSuchUpload
+ //
+ // *
+ // Description: The specified multipart upload does not exist. The upload ID might
+ // be invalid, or the multipart upload might have been aborted or completed.
+ //
+ // *
+ // HTTP Status Code: 404 Not Found
+ //
+ // * SOAP Fault Code Prefix: Client
+ //
+ // * Code:
+ // NoSuchVersion
+ //
+ // * Description: Indicates that the version ID specified in the
+ // request does not match an existing version.
+ //
+ // * HTTP Status Code: 404 Not
+ // Found
+ //
+ // * SOAP Fault Code Prefix: Client
+ //
+ // * Code: NotImplemented
+ //
+ // * Description:
+ // A header you provided implies functionality that is not implemented.
+ //
+ // * HTTP
+ // Status Code: 501 Not Implemented
+ //
+ // * SOAP Fault Code Prefix: Server
+ //
+ // * Code:
+ // NotSignedUp
+ //
+ // * Description: Your account is not signed up for the Amazon S3
+ // service. You must sign up before you can use Amazon S3. You can sign up at the
+ // following URL: https://aws.amazon.com/s3
+ //
+ // * HTTP Status Code: 403 Forbidden
+ //
+ // *
+ // SOAP Fault Code Prefix: Client
+ //
+ // * Code: OperationAborted
+ //
+ // * Description: A
+ // conflicting conditional operation is currently in progress against this
+ // resource. Try again.
+ //
+ // * HTTP Status Code: 409 Conflict
+ //
+ // * SOAP Fault Code
+ // Prefix: Client
+ //
+ // * Code: PermanentRedirect
+ //
+ // * Description: The bucket you are
+ // attempting to access must be addressed using the specified endpoint. Send all
+ // future requests to this endpoint.
+ //
+ // * HTTP Status Code: 301 Moved Permanently
+ //
+ // *
+ // SOAP Fault Code Prefix: Client
+ //
+ // * Code: PreconditionFailed
+ //
+ // * Description: At
+ // least one of the preconditions you specified did not hold.
+ //
+ // * HTTP Status Code:
+ // 412 Precondition Failed
+ //
+ // * SOAP Fault Code Prefix: Client
+ //
+ // * Code: Redirect
+ //
+ // *
+ // Description: Temporary redirect.
+ //
+ // * HTTP Status Code: 307 Moved Temporarily
+ //
+ // *
+ // SOAP Fault Code Prefix: Client
+ //
+ // * Code: RestoreAlreadyInProgress
+ //
+ // * Description:
+ // Object restore is already in progress.
+ //
+ // * HTTP Status Code: 409 Conflict
+ //
+ // * SOAP
+ // Fault Code Prefix: Client
+ //
+ // * Code: RequestIsNotMultiPartContent
+ //
+ // * Description:
+ // Bucket POST must be of the enclosure-type multipart/form-data.
+ //
+ // * HTTP Status
+ // Code: 400 Bad Request
+ //
+ // * SOAP Fault Code Prefix: Client
+ //
+ // * Code:
+ // RequestTimeout
+ //
+ // * Description: Your socket connection to the server was not read
+ // from or written to within the timeout period.
+ //
+ // * HTTP Status Code: 400 Bad
+ // Request
+ //
+ // * SOAP Fault Code Prefix: Client
+ //
+ // * Code: RequestTimeTooSkewed
+ //
+ // *
+ // Description: The difference between the request time and the server's time is
+ // too large.
+ //
+ // * HTTP Status Code: 403 Forbidden
+ //
+ // * SOAP Fault Code Prefix:
+ // Client
+ //
+ // * Code: RequestTorrentOfBucketError
+ //
+ // * Description: Requesting the
+ // torrent file of a bucket is not permitted.
+ //
+ // * HTTP Status Code: 400 Bad
+ // Request
+ //
+ // * SOAP Fault Code Prefix: Client
+ //
+ // * Code: SignatureDoesNotMatch
+ //
+ // *
+ // Description: The request signature we calculated does not match the signature
+ // you provided. Check your AWS secret access key and signing method. For more
+ // information, see REST Authentication
+ // (https://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html) and
+ // SOAP Authentication
+ // (https://docs.aws.amazon.com/AmazonS3/latest/dev/SOAPAuthentication.html) for
+ // details.
+ //
+ // * HTTP Status Code: 403 Forbidden
+ //
+ // * SOAP Fault Code Prefix: Client
+ //
+ // *
+ // Code: ServiceUnavailable
+ //
+ // * Description: Reduce your request rate.
+ //
+ // * HTTP
+ // Status Code: 503 Service Unavailable
+ //
+ // * SOAP Fault Code Prefix: Server
+ //
+ // * Code:
+ // SlowDown
+ //
+ // * Description: Reduce your request rate.
+ //
+ // * HTTP Status Code: 503 Slow
+ // Down
+ //
+ // * SOAP Fault Code Prefix: Server
+ //
+ // * Code: TemporaryRedirect
+ //
+ // *
+ // Description: You are being redirected to the bucket while DNS updates.
+ //
+ // * HTTP
+ // Status Code: 307 Moved Temporarily
+ //
+ // * SOAP Fault Code Prefix: Client
+ //
+ // * Code:
+ // TokenRefreshRequired
+ //
+ // * Description: The provided token must be refreshed.
+ //
+ // *
+ // HTTP Status Code: 400 Bad Request
+ //
+ // * SOAP Fault Code Prefix: Client
+ //
+ // * Code:
+ // TooManyBuckets
+ //
+ // * Description: You have attempted to create more buckets than
+ // allowed.
+ //
+ // * HTTP Status Code: 400 Bad Request
+ //
+ // * SOAP Fault Code Prefix:
+ // Client
+ //
+ // * Code: UnexpectedContent
+ //
+ // * Description: This request does not support
+ // content.
+ //
+ // * HTTP Status Code: 400 Bad Request
+ //
+ // * SOAP Fault Code Prefix:
+ // Client
+ //
+ // * Code: UnresolvableGrantByEmailAddress
+ //
+ // * Description: The email
+ // address you provided does not match any account on record.
+ //
+ // * HTTP Status Code:
+ // 400 Bad Request
+ //
+ // * SOAP Fault Code Prefix: Client
+ //
+ // * Code:
+ // UserKeyMustBeSpecified
+ //
+ // * Description: The bucket POST must contain the
+ // specified field name. If it is specified, check the order of the fields.
+ //
+ // * HTTP
+ // Status Code: 400 Bad Request
+ //
+ // * SOAP Fault Code Prefix: Client
+ Code *string
+
+ // The error key.
+ Key *string
+
+ // The error message contains a generic description of the error condition in
+ // English. It is intended for a human audience. Simple programs display the
+ // message directly to the end user if they encounter an error condition they don't
+ // know how or don't care to handle. Sophisticated programs with more exhaustive
+ // error handling and proper internationalization are more likely to ignore the
+ // error message.
+ Message *string
+
+ // The version ID of the error.
+ VersionId *string
+}
+
+// The error information.
+type ErrorDocument struct {
+
+ // The object key name to use when a 4XX class error occurs.
+ //
+ // This member is required.
+ Key *string
+}
+
+// Optional configuration to replicate existing source bucket objects. For more
+// information, see Replicating Existing Objects
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-what-is-isnot-replicated.html#existing-object-replication)
+// in the Amazon S3 Developer Guide.
+type ExistingObjectReplication struct {
+
+ //
+ //
+ // This member is required.
+ Status ExistingObjectReplicationStatus
+}
+
+// Specifies the Amazon S3 object key name to filter on and whether to filter on
+// the suffix or prefix of the key name.
+type FilterRule struct {
+
+ // The object key name prefix or suffix identifying one or more objects to which
+ // the filtering rule applies. The maximum length is 1,024 characters. Overlapping
+ // prefixes and suffixes are not supported. For more information, see Configuring
+ // Event Notifications
+ // (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) in the
+ // Amazon Simple Storage Service Developer Guide.
+ Name FilterRuleName
+
+ // The value that the filter searches for in object key names.
+ Value *string
+}
+
+// Container for S3 Glacier job parameters.
+type GlacierJobParameters struct {
+
+ // Retrieval tier at which the restore will be processed.
+ //
+ // This member is required.
+ Tier Tier
+}
+
+// Container for grant information.
+type Grant struct {
+
+ // The person being granted permissions.
+ Grantee *Grantee
+
+ // Specifies the permission given to the grantee.
+ Permission Permission
+}
+
+// Container for the person being granted permissions.
+type Grantee struct {
+
+ // Type of grantee
+ //
+ // This member is required.
+ Type Type
+
+ // Screen name of the grantee.
+ DisplayName *string
+
+ // Email address of the grantee. Using email addresses to specify a grantee is only
+ // supported in the following AWS Regions:
+ //
+ // * US East (N. Virginia)
+ //
+ // * US West (N.
+ // California)
+ //
+ // * US West (Oregon)
+ //
+ // * Asia Pacific (Singapore)
+ //
+ // * Asia Pacific
+ // (Sydney)
+ //
+ // * Asia Pacific (Tokyo)
+ //
+ // * Europe (Ireland)
+ //
+ // * South America (São
+ // Paulo)
+ //
+ // For a list of all the Amazon S3 supported Regions and endpoints, see
+ // Regions and Endpoints
+ // (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) in the AWS
+ // General Reference.
+ EmailAddress *string
+
+ // The canonical user ID of the grantee.
+ ID *string
+
+ // URI of the grantee group.
+ URI *string
+}
+
+// Container for the Suffix element.
+type IndexDocument struct {
+
+ // A suffix that is appended to a request that is for a directory on the website
+ // endpoint (for example,if the suffix is index.html and you make a request to
+ // samplebucket/images/ the data that is returned will be for the object with the
+ // key name images/index.html) The suffix must not be empty and must not include a
+ // slash character.
+ //
+ // This member is required.
+ Suffix *string
+}
+
+// Container element that identifies who initiated the multipart upload.
+type Initiator struct {
+
+ // Name of the Principal.
+ DisplayName *string
+
+ // If the principal is an AWS account, it provides the Canonical User ID. If the
+ // principal is an IAM User, it provides a user ARN value.
+ ID *string
+}
+
+// Describes the serialization format of the object.
+type InputSerialization struct {
+
+ // Describes the serialization of a CSV-encoded object.
+ CSV *CSVInput
+
+ // Specifies object's compression format. Valid values: NONE, GZIP, BZIP2. Default
+ // Value: NONE.
+ CompressionType CompressionType
+
+ // Specifies JSON as object's input serialization format.
+ JSON *JSONInput
+
+ // Specifies Parquet as object's input serialization format.
+ Parquet *ParquetInput
+}
+
+// A container for specifying S3 Intelligent-Tiering filters. The filters determine
+// the subset of objects to which the rule applies.
+type IntelligentTieringAndOperator struct {
+
+ // An object key name prefix that identifies the subset of objects to which the
+ // configuration applies.
+ Prefix *string
+
+ // All of these tags must exist in the object's tag set in order for the
+ // configuration to apply.
+ Tags []Tag
+}
+
+// Specifies the S3 Intelligent-Tiering configuration for an Amazon S3 bucket. For
+// information about the S3 Intelligent-Tiering storage class, see Storage class
+// for automatically optimizing frequently and infrequently accessed objects
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html#sc-dynamic-data-access).
+type IntelligentTieringConfiguration struct {
+
+ // The ID used to identify the S3 Intelligent-Tiering configuration.
+ //
+ // This member is required.
+ Id *string
+
+ // Specifies the status of the configuration.
+ //
+ // This member is required.
+ Status IntelligentTieringStatus
+
+ // Specifies the S3 Intelligent-Tiering storage class tier of the configuration.
+ //
+ // This member is required.
+ Tierings []Tiering
+
+ // Specifies a bucket filter. The configuration only includes objects that meet the
+ // filter's criteria.
+ Filter *IntelligentTieringFilter
+}
+
+// The Filter is used to identify objects that the S3 Intelligent-Tiering
+// configuration applies to.
+type IntelligentTieringFilter struct {
+
+ // A conjunction (logical AND) of predicates, which is used in evaluating a metrics
+ // filter. The operator must have at least two predicates, and an object must match
+ // all of the predicates in order for the filter to apply.
+ And *IntelligentTieringAndOperator
+
+ // An object key name prefix that identifies the subset of objects to which the
+ // rule applies.
+ Prefix *string
+
+ // A container of a key value name pair.
+ Tag *Tag
+}
+
+// Specifies the inventory configuration for an Amazon S3 bucket. For more
+// information, see GET Bucket inventory
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGETInventoryConfig.html)
+// in the Amazon Simple Storage Service API Reference.
+type InventoryConfiguration struct {
+
+ // Contains information about where to publish the inventory results.
+ //
+ // This member is required.
+ Destination *InventoryDestination
+
+ // The ID used to identify the inventory configuration.
+ //
+ // This member is required.
+ Id *string
+
+ // Object versions to include in the inventory list. If set to All, the list
+ // includes all the object versions, which adds the version-related fields
+ // VersionId, IsLatest, and DeleteMarker to the list. If set to Current, the list
+ // does not contain these version-related fields.
+ //
+ // This member is required.
+ IncludedObjectVersions InventoryIncludedObjectVersions
+
+ // Specifies whether the inventory is enabled or disabled. If set to True, an
+ // inventory list is generated. If set to False, no inventory list is generated.
+ //
+ // This member is required.
+ IsEnabled bool
+
+ // Specifies the schedule for generating inventory results.
+ //
+ // This member is required.
+ Schedule *InventorySchedule
+
+ // Specifies an inventory filter. The inventory only includes objects that meet the
+ // filter's criteria.
+ Filter *InventoryFilter
+
+ // Contains the optional fields that are included in the inventory results.
+ OptionalFields []InventoryOptionalField
+}
+
+// Specifies the inventory configuration for an Amazon S3 bucket.
+type InventoryDestination struct {
+
+ // Contains the bucket name, file format, bucket owner (optional), and prefix
+ // (optional) where inventory results are published.
+ //
+ // This member is required.
+ S3BucketDestination *InventoryS3BucketDestination
+}
+
+// Contains the type of server-side encryption used to encrypt the inventory
+// results.
+type InventoryEncryption struct {
+
+ // Specifies the use of SSE-KMS to encrypt delivered inventory reports.
+ SSEKMS *SSEKMS
+
+ // Specifies the use of SSE-S3 to encrypt delivered inventory reports.
+ SSES3 *SSES3
+}
+
+// Specifies an inventory filter. The inventory only includes objects that meet the
+// filter's criteria.
+type InventoryFilter struct {
+
+ // The prefix that an object must have to be included in the inventory results.
+ //
+ // This member is required.
+ Prefix *string
+}
+
+// Contains the bucket name, file format, bucket owner (optional), and prefix
+// (optional) where inventory results are published.
+type InventoryS3BucketDestination struct {
+
+ // The Amazon Resource Name (ARN) of the bucket where inventory results will be
+ // published.
+ //
+ // This member is required.
+ Bucket *string
+
+ // Specifies the output format of the inventory results.
+ //
+ // This member is required.
+ Format InventoryFormat
+
+ // The account ID that owns the destination S3 bucket. If no account ID is
+ // provided, the owner is not validated before exporting data. Although this value
+ // is optional, we strongly recommend that you set it to help prevent problems if
+ // the destination bucket ownership changes.
+ AccountId *string
+
+ // Contains the type of server-side encryption used to encrypt the inventory
+ // results.
+ Encryption *InventoryEncryption
+
+ // The prefix that is prepended to all inventory results.
+ Prefix *string
+}
+
+// Specifies the schedule for generating inventory results.
+type InventorySchedule struct {
+
+ // Specifies how frequently inventory results are produced.
+ //
+ // This member is required.
+ Frequency InventoryFrequency
+}
+
+// Specifies JSON as object's input serialization format.
+type JSONInput struct {
+
+ // The type of JSON. Valid values: Document, Lines.
+ Type JSONType
+}
+
+// Specifies JSON as request's output serialization format.
+type JSONOutput struct {
+
+ // The value used to separate individual records in the output. If no value is
+ // specified, Amazon S3 uses a newline character ('\n').
+ RecordDelimiter *string
+}
+
+// A container for specifying the configuration for AWS Lambda notifications.
+type LambdaFunctionConfiguration struct {
+
+ // The Amazon S3 bucket event for which to invoke the AWS Lambda function. For more
+ // information, see Supported Event Types
+ // (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) in the
+ // Amazon Simple Storage Service Developer Guide.
+ //
+ // This member is required.
+ Events []Event
+
+ // The Amazon Resource Name (ARN) of the AWS Lambda function that Amazon S3 invokes
+ // when the specified event type occurs.
+ //
+ // This member is required.
+ LambdaFunctionArn *string
+
+ // Specifies object key name filtering rules. For information about key name
+ // filtering, see Configuring Event Notifications
+ // (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) in the
+ // Amazon Simple Storage Service Developer Guide.
+ Filter *NotificationConfigurationFilter
+
+ // An optional unique identifier for configurations in a notification
+ // configuration. If you don't provide one, Amazon S3 will assign an ID.
+ Id *string
+}
+
+// Container for the expiration for the lifecycle of the object.
+type LifecycleExpiration struct {
+
+ // Indicates at what date the object is to be moved or deleted. Should be in GMT
+ // ISO 8601 Format.
+ Date *time.Time
+
+ // Indicates the lifetime, in days, of the objects that are subject to the rule.
+ // The value must be a non-zero positive integer.
+ Days int32
+
+ // Indicates whether Amazon S3 will remove a delete marker with no noncurrent
+ // versions. If set to true, the delete marker will be expired; if set to false the
+ // policy takes no action. This cannot be specified with Days or Date in a
+ // Lifecycle Expiration Policy.
+ ExpiredObjectDeleteMarker bool
+}
+
+// A lifecycle rule for individual objects in an Amazon S3 bucket.
+type LifecycleRule struct {
+
+ // If 'Enabled', the rule is currently being applied. If 'Disabled', the rule is
+ // not currently being applied.
+ //
+ // This member is required.
+ Status ExpirationStatus
+
+ // Specifies the days since the initiation of an incomplete multipart upload that
+ // Amazon S3 will wait before permanently removing all parts of the upload. For
+ // more information, see Aborting Incomplete Multipart Uploads Using a Bucket
+ // Lifecycle Policy
+ // (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config)
+ // in the Amazon Simple Storage Service Developer Guide.
+ AbortIncompleteMultipartUpload *AbortIncompleteMultipartUpload
+
+ // Specifies the expiration for the lifecycle of the object in the form of date,
+ // days and, whether the object has a delete marker.
+ Expiration *LifecycleExpiration
+
+ // The Filter is used to identify objects that a Lifecycle Rule applies to. A
+ // Filter must have exactly one of Prefix, Tag, or And specified.
+ Filter LifecycleRuleFilter
+
+ // Unique identifier for the rule. The value cannot be longer than 255 characters.
+ ID *string
+
+ // Specifies when noncurrent object versions expire. Upon expiration, Amazon S3
+ // permanently deletes the noncurrent object versions. You set this lifecycle
+ // configuration action on a bucket that has versioning enabled (or suspended) to
+ // request that Amazon S3 delete noncurrent object versions at a specific period in
+ // the object's lifetime.
+ NoncurrentVersionExpiration *NoncurrentVersionExpiration
+
+ // Specifies the transition rule for the lifecycle rule that describes when
+ // noncurrent objects transition to a specific storage class. If your bucket is
+ // versioning-enabled (or versioning is suspended), you can set this action to
+ // request that Amazon S3 transition noncurrent object versions to a specific
+ // storage class at a set period in the object's lifetime.
+ NoncurrentVersionTransitions []NoncurrentVersionTransition
+
+ // Prefix identifying one or more objects to which the rule applies. This is No
+ // longer used; use Filter instead.
+ //
+ // Deprecated: This member has been deprecated.
+ Prefix *string
+
+ // Specifies when an Amazon S3 object transitions to a specified storage class.
+ Transitions []Transition
+}
+
+// This is used in a Lifecycle Rule Filter to apply a logical AND to two or more
+// predicates. The Lifecycle Rule will apply to any object matching all of the
+// predicates configured inside the And operator.
+type LifecycleRuleAndOperator struct {
+
+ // Prefix identifying one or more objects to which the rule applies.
+ Prefix *string
+
+ // All of these tags must exist in the object's tag set in order for the rule to
+ // apply.
+ Tags []Tag
+}
+
+// The Filter is used to identify objects that a Lifecycle Rule applies to. A
+// Filter must have exactly one of Prefix, Tag, or And specified.
+//
+// The following types satisfy this interface:
+// LifecycleRuleFilterMemberPrefix
+// LifecycleRuleFilterMemberTag
+// LifecycleRuleFilterMemberAnd
+type LifecycleRuleFilter interface {
+ isLifecycleRuleFilter()
+}
+
+// Prefix identifying one or more objects to which the rule applies.
+type LifecycleRuleFilterMemberPrefix struct {
+ Value string
+}
+
+func (*LifecycleRuleFilterMemberPrefix) isLifecycleRuleFilter() {}
+
+// This tag must exist in the object's tag set in order for the rule to apply.
+type LifecycleRuleFilterMemberTag struct {
+ Value Tag
+}
+
+func (*LifecycleRuleFilterMemberTag) isLifecycleRuleFilter() {}
+
+// This is used in a Lifecycle Rule Filter to apply a logical AND to two or more
+// predicates. The Lifecycle Rule will apply to any object matching all of the
+// predicates configured inside the And operator.
+type LifecycleRuleFilterMemberAnd struct {
+ Value LifecycleRuleAndOperator
+}
+
+func (*LifecycleRuleFilterMemberAnd) isLifecycleRuleFilter() {}
+
+// Describes where logs are stored and the prefix that Amazon S3 assigns to all log
+// object keys for a bucket. For more information, see PUT Bucket logging
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTlogging.html) in
+// the Amazon Simple Storage Service API Reference.
+type LoggingEnabled struct {
+
+ // Specifies the bucket where you want Amazon S3 to store server access logs. You
+ // can have your logs delivered to any bucket that you own, including the same
+ // bucket that is being logged. You can also configure multiple buckets to deliver
+ // their logs to the same target bucket. In this case, you should choose a
+ // different TargetPrefix for each source bucket so that the delivered log files
+ // can be distinguished by key.
+ //
+ // This member is required.
+ TargetBucket *string
+
+ // A prefix for all log object keys. If you store log files from multiple Amazon S3
+ // buckets in a single bucket, you can use a prefix to distinguish which log files
+ // came from which bucket.
+ //
+ // This member is required.
+ TargetPrefix *string
+
+ // Container for granting information.
+ TargetGrants []TargetGrant
+}
+
+// A metadata key-value pair to store with an object.
+type MetadataEntry struct {
+
+ // Name of the Object.
+ Name *string
+
+ // Value of the Object.
+ Value *string
+}
+
+// A container specifying replication metrics-related settings enabling replication
+// metrics and events.
+type Metrics struct {
+
+ // Specifies whether the replication metrics are enabled.
+ //
+ // This member is required.
+ Status MetricsStatus
+
+ // A container specifying the time threshold for emitting the
+ // s3:Replication:OperationMissedThreshold event.
+ EventThreshold *ReplicationTimeValue
+}
+
+// A conjunction (logical AND) of predicates, which is used in evaluating a metrics
+// filter. The operator must have at least two predicates, and an object must match
+// all of the predicates in order for the filter to apply.
+type MetricsAndOperator struct {
+
+ // The prefix used when evaluating an AND predicate.
+ Prefix *string
+
+ // The list of tags used when evaluating an AND predicate.
+ Tags []Tag
+}
+
+// Specifies a metrics configuration for the CloudWatch request metrics (specified
+// by the metrics configuration ID) from an Amazon S3 bucket. If you're updating an
+// existing metrics configuration, note that this is a full replacement of the
+// existing metrics configuration. If you don't include the elements you want to
+// keep, they are erased. For more information, see PUT Bucket metrics
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTMetricConfiguration.html)
+// in the Amazon Simple Storage Service API Reference.
+type MetricsConfiguration struct {
+
+ // The ID used to identify the metrics configuration.
+ //
+ // This member is required.
+ Id *string
+
+ // Specifies a metrics configuration filter. The metrics configuration will only
+ // include objects that meet the filter's criteria. A filter must be a prefix, a
+ // tag, or a conjunction (MetricsAndOperator).
+ Filter MetricsFilter
+}
+
+// Specifies a metrics configuration filter. The metrics configuration only
+// includes objects that meet the filter's criteria. A filter must be a prefix, a
+// tag, or a conjunction (MetricsAndOperator).
+//
+// The following types satisfy this interface:
+// MetricsFilterMemberPrefix
+// MetricsFilterMemberTag
+// MetricsFilterMemberAnd
+type MetricsFilter interface {
+ isMetricsFilter()
+}
+
+// The prefix used when evaluating a metrics filter.
+type MetricsFilterMemberPrefix struct {
+ Value string
+}
+
+func (*MetricsFilterMemberPrefix) isMetricsFilter() {}
+
+// The tag used when evaluating a metrics filter.
+type MetricsFilterMemberTag struct {
+ Value Tag
+}
+
+func (*MetricsFilterMemberTag) isMetricsFilter() {}
+
+// A conjunction (logical AND) of predicates, which is used in evaluating a metrics
+// filter. The operator must have at least two predicates, and an object must match
+// all of the predicates in order for the filter to apply.
+type MetricsFilterMemberAnd struct {
+ Value MetricsAndOperator
+}
+
+func (*MetricsFilterMemberAnd) isMetricsFilter() {}
+
+// Container for the MultipartUpload for the Amazon S3 object.
+type MultipartUpload struct {
+
+ // Date and time at which the multipart upload was initiated.
+ Initiated *time.Time
+
+ // Identifies who initiated the multipart upload.
+ Initiator *Initiator
+
+ // Key of the object for which the multipart upload was initiated.
+ Key *string
+
+ // Specifies the owner of the object that is part of the multipart upload.
+ Owner *Owner
+
+ // The class of storage used to store the object.
+ StorageClass StorageClass
+
+ // Upload ID that identifies the multipart upload.
+ UploadId *string
+}
+
+// Specifies when noncurrent object versions expire. Upon expiration, Amazon S3
+// permanently deletes the noncurrent object versions. You set this lifecycle
+// configuration action on a bucket that has versioning enabled (or suspended) to
+// request that Amazon S3 delete noncurrent object versions at a specific period in
+// the object's lifetime.
+type NoncurrentVersionExpiration struct {
+
+ // Specifies the number of days an object is noncurrent before Amazon S3 can
+ // perform the associated action. For information about the noncurrent days
+ // calculations, see How Amazon S3 Calculates When an Object Became Noncurrent
+ // (https://docs.aws.amazon.com/AmazonS3/latest/dev/intro-lifecycle-rules.html#non-current-days-calculations)
+ // in the Amazon Simple Storage Service Developer Guide.
+ NoncurrentDays int32
+}
+
+// Container for the transition rule that describes when noncurrent objects
+// transition to the STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, GLACIER, or
+// DEEP_ARCHIVE storage class. If your bucket is versioning-enabled (or versioning
+// is suspended), you can set this action to request that Amazon S3 transition
+// noncurrent object versions to the STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING,
+// GLACIER, or DEEP_ARCHIVE storage class at a specific period in the object's
+// lifetime.
+type NoncurrentVersionTransition struct {
+
+ // Specifies the number of days an object is noncurrent before Amazon S3 can
+ // perform the associated action. For information about the noncurrent days
+ // calculations, see How Amazon S3 Calculates How Long an Object Has Been
+ // Noncurrent
+ // (https://docs.aws.amazon.com/AmazonS3/latest/dev/intro-lifecycle-rules.html#non-current-days-calculations)
+ // in the Amazon Simple Storage Service Developer Guide.
+ NoncurrentDays int32
+
+ // The class of storage used to store the object.
+ StorageClass TransitionStorageClass
+}
+
+// A container for specifying the notification configuration of the bucket. If this
+// element is empty, notifications are turned off for the bucket.
+type NotificationConfiguration struct {
+
+ // Describes the AWS Lambda functions to invoke and the events for which to invoke
+ // them.
+ LambdaFunctionConfigurations []LambdaFunctionConfiguration
+
+ // The Amazon Simple Queue Service queues to publish messages to and the events for
+ // which to publish messages.
+ QueueConfigurations []QueueConfiguration
+
+ // The topic to which notifications are sent and the events for which notifications
+ // are generated.
+ TopicConfigurations []TopicConfiguration
+}
+
+// Specifies object key name filtering rules. For information about key name
+// filtering, see Configuring Event Notifications
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) in the
+// Amazon Simple Storage Service Developer Guide.
+type NotificationConfigurationFilter struct {
+
+ // A container for object key name prefix and suffix filtering rules.
+ Key *S3KeyFilter
+}
+
+// An object consists of data and its descriptive metadata.
+type Object struct {
+
+ // The entity tag is a hash of the object. The ETag reflects changes only to the
+ // contents of an object, not its metadata. The ETag may or may not be an MD5
+ // digest of the object data. Whether or not it is depends on how the object was
+ // created and how it is encrypted as described below:
+ //
+ // * Objects created by the
+ // PUT Object, POST Object, or Copy operation, or through the AWS Management
+ // Console, and are encrypted by SSE-S3 or plaintext, have ETags that are an MD5
+ // digest of their object data.
+ //
+ // * Objects created by the PUT Object, POST Object,
+ // or Copy operation, or through the AWS Management Console, and are encrypted by
+ // SSE-C or SSE-KMS, have ETags that are not an MD5 digest of their object data.
+ //
+ // *
+ // If an object is created by either the Multipart Upload or Part Copy operation,
+ // the ETag is not an MD5 digest, regardless of the method of encryption.
+ ETag *string
+
+ // The name that you assign to an object. You use the object key to retrieve the
+ // object.
+ Key *string
+
+ // The date the Object was Last Modified
+ LastModified *time.Time
+
+ // The owner of the object
+ Owner *Owner
+
+ // Size in bytes of the object
+ Size int64
+
+ // The class of storage used to store the object.
+ StorageClass ObjectStorageClass
+}
+
+// Object Identifier is unique value to identify objects.
+type ObjectIdentifier struct {
+
+ // Key name of the object to delete.
+ //
+ // This member is required.
+ Key *string
+
+ // VersionId for the specific version of the object to delete.
+ VersionId *string
+}
+
+// The container element for Object Lock configuration parameters.
+type ObjectLockConfiguration struct {
+
+ // Indicates whether this bucket has an Object Lock configuration enabled.
+ ObjectLockEnabled ObjectLockEnabled
+
+ // The Object Lock rule in place for the specified object.
+ Rule *ObjectLockRule
+}
+
+// A Legal Hold configuration for an object.
+type ObjectLockLegalHold struct {
+
+ // Indicates whether the specified object has a Legal Hold in place.
+ Status ObjectLockLegalHoldStatus
+}
+
+// A Retention configuration for an object.
+type ObjectLockRetention struct {
+
+ // Indicates the Retention mode for the specified object.
+ Mode ObjectLockRetentionMode
+
+ // The date on which this Object Lock Retention will expire.
+ RetainUntilDate *time.Time
+}
+
+// The container element for an Object Lock rule.
+type ObjectLockRule struct {
+
+ // The default retention period that you want to apply to new objects placed in the
+ // specified bucket.
+ DefaultRetention *DefaultRetention
+}
+
+// The version of an object.
+type ObjectVersion struct {
+
+ // The entity tag is an MD5 hash of that version of the object.
+ ETag *string
+
+ // Specifies whether the object is (true) or is not (false) the latest version of
+ // an object.
+ IsLatest bool
+
+ // The object key.
+ Key *string
+
+ // Date and time the object was last modified.
+ LastModified *time.Time
+
+ // Specifies the owner of the object.
+ Owner *Owner
+
+ // Size in bytes of the object.
+ Size int64
+
+ // The class of storage used to store the object.
+ StorageClass ObjectVersionStorageClass
+
+ // Version ID of an object.
+ VersionId *string
+}
+
+// Describes the location where the restore job's output is stored.
+type OutputLocation struct {
+
+ // Describes an S3 location that will receive the results of the restore request.
+ S3 *S3Location
+}
+
+// Describes how results of the Select job are serialized.
+type OutputSerialization struct {
+
+ // Describes the serialization of CSV-encoded Select results.
+ CSV *CSVOutput
+
+ // Specifies JSON as request's output serialization format.
+ JSON *JSONOutput
+}
+
+// Container for the owner's display name and ID.
+type Owner struct {
+
+ // Container for the display name of the owner.
+ DisplayName *string
+
+ // Container for the ID of the owner.
+ ID *string
+}
+
+// The container element for a bucket's ownership controls.
+type OwnershipControls struct {
+
+ // The container element for an ownership control rule.
+ //
+ // This member is required.
+ Rules []OwnershipControlsRule
+}
+
+// The container element for an ownership control rule.
+type OwnershipControlsRule struct {
+
+ // The container element for object ownership for a bucket's ownership controls.
+ // BucketOwnerPreferred - Objects uploaded to the bucket change ownership to the
+ // bucket owner if the objects are uploaded with the bucket-owner-full-control
+ // canned ACL. ObjectWriter - The uploading account will own the object if the
+ // object is uploaded with the bucket-owner-full-control canned ACL.
+ //
+ // This member is required.
+ ObjectOwnership ObjectOwnership
+}
+
+// Container for Parquet.
+type ParquetInput struct {
+}
+
+// Container for elements related to a part.
+type Part struct {
+
+ // Entity tag returned when the part was uploaded.
+ ETag *string
+
+ // Date and time at which the part was uploaded.
+ LastModified *time.Time
+
+ // Part number identifying the part. This is a positive integer between 1 and
+ // 10,000.
+ PartNumber int32
+
+ // Size in bytes of the uploaded part data.
+ Size int64
+}
+
+// The container element for a bucket's policy status.
+type PolicyStatus struct {
+
+ // The policy status for this bucket. TRUE indicates that this bucket is public.
+ // FALSE indicates that the bucket is not public.
+ IsPublic bool
+}
+
+// The PublicAccessBlock configuration that you want to apply to this Amazon S3
+// bucket. You can enable the configuration options in any combination. For more
+// information about when Amazon S3 considers a bucket or object public, see The
+// Meaning of "Public"
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html#access-control-block-public-access-policy-status)
+// in the Amazon Simple Storage Service Developer Guide.
+type PublicAccessBlockConfiguration struct {
+
+ // Specifies whether Amazon S3 should block public access control lists (ACLs) for
+ // this bucket and objects in this bucket. Setting this element to TRUE causes the
+ // following behavior:
+ //
+ // * PUT Bucket acl and PUT Object acl calls fail if the
+ // specified ACL is public.
+ //
+ // * PUT Object calls fail if the request includes a
+ // public ACL.
+ //
+ // * PUT Bucket calls fail if the request includes a public
+ // ACL.
+ //
+ // Enabling this setting doesn't affect existing policies or ACLs.
+ BlockPublicAcls bool
+
+ // Specifies whether Amazon S3 should block public bucket policies for this bucket.
+ // Setting this element to TRUE causes Amazon S3 to reject calls to PUT Bucket
+ // policy if the specified bucket policy allows public access. Enabling this
+ // setting doesn't affect existing bucket policies.
+ BlockPublicPolicy bool
+
+ // Specifies whether Amazon S3 should ignore public ACLs for this bucket and
+ // objects in this bucket. Setting this element to TRUE causes Amazon S3 to ignore
+ // all public ACLs on this bucket and objects in this bucket. Enabling this setting
+ // doesn't affect the persistence of any existing ACLs and doesn't prevent new
+ // public ACLs from being set.
+ IgnorePublicAcls bool
+
+ // Specifies whether Amazon S3 should restrict public bucket policies for this
+ // bucket. Setting this element to TRUE restricts access to this bucket to only AWS
+ // service principals and authorized users within this account if the bucket has a
+ // public policy. Enabling this setting doesn't affect previously stored bucket
+ // policies, except that public and cross-account access within any public bucket
+ // policy, including non-public delegation to specific accounts, is blocked.
+ RestrictPublicBuckets bool
+}
+
+// Specifies the configuration for publishing messages to an Amazon Simple Queue
+// Service (Amazon SQS) queue when Amazon S3 detects specified events.
+type QueueConfiguration struct {
+
+ // A collection of bucket events for which to send notifications
+ //
+ // This member is required.
+ Events []Event
+
+ // The Amazon Resource Name (ARN) of the Amazon SQS queue to which Amazon S3
+ // publishes a message when it detects events of the specified type.
+ //
+ // This member is required.
+ QueueArn *string
+
+ // Specifies object key name filtering rules. For information about key name
+ // filtering, see Configuring Event Notifications
+ // (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) in the
+ // Amazon Simple Storage Service Developer Guide.
+ Filter *NotificationConfigurationFilter
+
+ // An optional unique identifier for configurations in a notification
+ // configuration. If you don't provide one, Amazon S3 will assign an ID.
+ Id *string
+}
+
+// Specifies how requests are redirected. In the event of an error, you can specify
+// a different error code to return.
+type Redirect struct {
+
+ // The host name to use in the redirect request.
+ HostName *string
+
+ // The HTTP redirect code to use on the response. Not required if one of the
+ // siblings is present.
+ HttpRedirectCode *string
+
+ // Protocol to use when redirecting requests. The default is the protocol that is
+ // used in the original request.
+ Protocol Protocol
+
+ // The object key prefix to use in the redirect request. For example, to redirect
+ // requests for all pages with prefix docs/ (objects in the docs/ folder) to
+ // documents/, you can set a condition block with KeyPrefixEquals set to docs/ and
+ // in the Redirect set ReplaceKeyPrefixWith to /documents. Not required if one of
+ // the siblings is present. Can be present only if ReplaceKeyWith is not provided.
+ ReplaceKeyPrefixWith *string
+
+ // The specific object key to use in the redirect request. For example, redirect
+ // request to error.html. Not required if one of the siblings is present. Can be
+ // present only if ReplaceKeyPrefixWith is not provided.
+ ReplaceKeyWith *string
+}
+
+// Specifies the redirect behavior of all requests to a website endpoint of an
+// Amazon S3 bucket.
+type RedirectAllRequestsTo struct {
+
+ // Name of the host where requests are redirected.
+ //
+ // This member is required.
+ HostName *string
+
+ // Protocol to use when redirecting requests. The default is the protocol that is
+ // used in the original request.
+ Protocol Protocol
+}
+
+// A filter that you can specify for selection for modifications on replicas.
+// Amazon S3 doesn't replicate replica modifications by default. In the latest
+// version of replication configuration (when Filter is specified), you can specify
+// this element and set the status to Enabled to replicate modifications on
+// replicas. If you don't specify the Filter element, Amazon S3 assumes that the
+// replication configuration is the earlier version, V1. In the earlier version,
+// this element is not allowed.
+type ReplicaModifications struct {
+
+ // Specifies whether Amazon S3 replicates modifications on replicas.
+ //
+ // This member is required.
+ Status ReplicaModificationsStatus
+}
+
+// A container for replication rules. You can add up to 1,000 rules. The maximum
+// size of a replication configuration is 2 MB.
+type ReplicationConfiguration struct {
+
+ // The Amazon Resource Name (ARN) of the AWS Identity and Access Management (IAM)
+ // role that Amazon S3 assumes when replicating objects. For more information, see
+ // How to Set Up Replication
+ // (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-how-setup.html) in
+ // the Amazon Simple Storage Service Developer Guide.
+ //
+ // This member is required.
+ Role *string
+
+ // A container for one or more replication rules. A replication configuration must
+ // have at least one rule and can contain a maximum of 1,000 rules.
+ //
+ // This member is required.
+ Rules []ReplicationRule
+}
+
+// Specifies which Amazon S3 objects to replicate and where to store the replicas.
+type ReplicationRule struct {
+
+ // A container for information about the replication destination and its
+ // configurations including enabling the S3 Replication Time Control (S3 RTC).
+ //
+ // This member is required.
+ Destination *Destination
+
+ // Specifies whether the rule is enabled.
+ //
+ // This member is required.
+ Status ReplicationRuleStatus
+
+ // Specifies whether Amazon S3 replicates delete markers. If you specify a Filter
+ // in your replication configuration, you must also include a
+ // DeleteMarkerReplication element. If your Filter includes a Tag element, the
+ // DeleteMarkerReplicationStatus must be set to Disabled, because Amazon S3 does
+ // not support replicating delete markers for tag-based rules. For an example
+ // configuration, see Basic Rule Configuration
+ // (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html#replication-config-min-rule-config).
+ // For more information about delete marker replication, see Basic Rule
+ // Configuration
+ // (https://docs.aws.amazon.com/AmazonS3/latest/dev/delete-marker-replication.html).
+ // If you are using an earlier version of the replication configuration, Amazon S3
+ // handles replication of delete markers differently. For more information, see
+ // Backward Compatibility
+ // (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html#replication-backward-compat-considerations).
+ DeleteMarkerReplication *DeleteMarkerReplication
+
+ //
+ ExistingObjectReplication *ExistingObjectReplication
+
+ // A filter that identifies the subset of objects to which the replication rule
+ // applies. A Filter must specify exactly one Prefix, Tag, or an And child element.
+ Filter ReplicationRuleFilter
+
+ // A unique identifier for the rule. The maximum value is 255 characters.
+ ID *string
+
+ // An object key name prefix that identifies the object or objects to which the
+ // rule applies. The maximum prefix length is 1,024 characters. To include all
+ // objects in a bucket, specify an empty string.
+ //
+ // Deprecated: This member has been deprecated.
+ Prefix *string
+
+ // The priority indicates which rule has precedence whenever two or more
+ // replication rules conflict. Amazon S3 will attempt to replicate objects
+ // according to all replication rules. However, if there are two or more rules with
+ // the same destination bucket, then objects will be replicated according to the
+ // rule with the highest priority. The higher the number, the higher the priority.
+ // For more information, see Replication
+ // (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication.html) in the Amazon
+ // Simple Storage Service Developer Guide.
+ Priority int32
+
+ // A container that describes additional filters for identifying the source objects
+ // that you want to replicate. You can choose to enable or disable the replication
+ // of these objects. Currently, Amazon S3 supports only the filter that you can
+ // specify for objects created with server-side encryption using a customer master
+ // key (CMK) stored in AWS Key Management Service (SSE-KMS).
+ SourceSelectionCriteria *SourceSelectionCriteria
+}
+
+// A container for specifying rule filters. The filters determine the subset of
+// objects to which the rule applies. This element is required only if you specify
+// more than one filter. For example:
+//
+// * If you specify both a Prefix and a Tag
+// filter, wrap these filters in an And tag.
+//
+// * If you specify a filter based on
+// multiple tags, wrap the Tag elements in an And tag
+type ReplicationRuleAndOperator struct {
+
+ // An object key name prefix that identifies the subset of objects to which the
+ // rule applies.
+ Prefix *string
+
+ // An array of tags containing key and value pairs.
+ Tags []Tag
+}
+
+// A filter that identifies the subset of objects to which the replication rule
+// applies. A Filter must specify exactly one Prefix, Tag, or an And child element.
+//
+// The following types satisfy this interface:
+// ReplicationRuleFilterMemberPrefix
+// ReplicationRuleFilterMemberTag
+// ReplicationRuleFilterMemberAnd
+type ReplicationRuleFilter interface {
+ isReplicationRuleFilter()
+}
+
+// An object key name prefix that identifies the subset of objects to which the
+// rule applies.
+type ReplicationRuleFilterMemberPrefix struct {
+ Value string
+}
+
+func (*ReplicationRuleFilterMemberPrefix) isReplicationRuleFilter() {}
+
+// A container for specifying a tag key and value. The rule applies only to objects
+// that have the tag in their tag set.
+type ReplicationRuleFilterMemberTag struct {
+ Value Tag
+}
+
+func (*ReplicationRuleFilterMemberTag) isReplicationRuleFilter() {}
+
+// A container for specifying rule filters. The filters determine the subset of
+// objects to which the rule applies. This element is required only if you specify
+// more than one filter. For example:
+//
+// * If you specify both a Prefix and a Tag
+// filter, wrap these filters in an And tag.
+//
+// * If you specify a filter based on
+// multiple tags, wrap the Tag elements in an And tag.
+type ReplicationRuleFilterMemberAnd struct {
+ Value ReplicationRuleAndOperator
+}
+
+func (*ReplicationRuleFilterMemberAnd) isReplicationRuleFilter() {}
+
+// A container specifying S3 Replication Time Control (S3 RTC) related information,
+// including whether S3 RTC is enabled and the time when all objects and operations
+// on objects must be replicated. Must be specified together with a Metrics block.
+type ReplicationTime struct {
+
+ // Specifies whether the replication time is enabled.
+ //
+ // This member is required.
+ Status ReplicationTimeStatus
+
+ // A container specifying the time by which replication should be complete for all
+ // objects and operations on objects.
+ //
+ // This member is required.
+ Time *ReplicationTimeValue
+}
+
+// A container specifying the time value for S3 Replication Time Control (S3 RTC)
+// and replication metrics EventThreshold.
+type ReplicationTimeValue struct {
+
+ // Contains an integer specifying time in minutes. Valid values: 15 minutes.
+ Minutes int32
+}
+
+// Container for Payer.
+type RequestPaymentConfiguration struct {
+
+ // Specifies who pays for the download and request fees.
+ //
+ // This member is required.
+ Payer Payer
+}
+
+// Container for restore job parameters.
+type RestoreRequest struct {
+
+ // Lifetime of the active copy in days. Do not use with restores that specify
+ // OutputLocation. The Days element is required for regular restores, and must not
+ // be provided for select requests.
+ Days int32
+
+ // The optional description for the job.
+ Description *string
+
+ // S3 Glacier related parameters pertaining to this job. Do not use with restores
+ // that specify OutputLocation.
+ GlacierJobParameters *GlacierJobParameters
+
+ // Describes the location where the restore job's output is stored.
+ OutputLocation *OutputLocation
+
+ // Describes the parameters for Select job types.
+ SelectParameters *SelectParameters
+
+ // Retrieval tier at which the restore will be processed.
+ Tier Tier
+
+ // Type of restore request.
+ Type RestoreRequestType
+}
+
+// Specifies the redirect behavior and when a redirect is applied. For more
+// information about routing rules, see Configuring advanced conditional redirects
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/how-to-page-redirect.html#advanced-conditional-redirects)
+// in the Amazon Simple Storage Service Developer Guide.
+type RoutingRule struct {
+
+ // Container for redirect information. You can redirect requests to another host,
+ // to another page, or with another protocol. In the event of an error, you can
+ // specify a different error code to return.
+ //
+ // This member is required.
+ Redirect *Redirect
+
+ // A container for describing a condition that must be met for the specified
+ // redirect to apply. For example, 1. If request is for pages in the /docs folder,
+ // redirect to the /documents folder. 2. If request results in HTTP error 4xx,
+ // redirect request to another host where you might process the error.
+ Condition *Condition
+}
+
+// A container for object key name prefix and suffix filtering rules.
+type S3KeyFilter struct {
+
+ // A list of containers for the key-value pair that defines the criteria for the
+ // filter rule.
+ FilterRules []FilterRule
+}
+
+// Describes an Amazon S3 location that will receive the results of the restore
+// request.
+type S3Location struct {
+
+ // The name of the bucket where the restore results will be placed.
+ //
+ // This member is required.
+ BucketName *string
+
+ // The prefix that is prepended to the restore results for this request.
+ //
+ // This member is required.
+ Prefix *string
+
+ // A list of grants that control access to the staged results.
+ AccessControlList []Grant
+
+ // The canned ACL to apply to the restore results.
+ CannedACL ObjectCannedACL
+
+ // Contains the type of server-side encryption used.
+ Encryption *Encryption
+
+ // The class of storage used to store the restore results.
+ StorageClass StorageClass
+
+ // The tag-set that is applied to the restore results.
+ Tagging *Tagging
+
+ // A list of metadata to store with the restore results in S3.
+ UserMetadata []MetadataEntry
+}
+
+// Describes the parameters for Select job types.
+type SelectParameters struct {
+
+ // The expression that is used to query the object.
+ //
+ // This member is required.
+ Expression *string
+
+ // The type of the provided expression (for example, SQL).
+ //
+ // This member is required.
+ ExpressionType ExpressionType
+
+ // Describes the serialization format of the object.
+ //
+ // This member is required.
+ InputSerialization *InputSerialization
+
+ // Describes how the results of the Select job are serialized.
+ //
+ // This member is required.
+ OutputSerialization *OutputSerialization
+}
+
+// Describes the default server-side encryption to apply to new objects in the
+// bucket. If a PUT Object request doesn't specify any server-side encryption, this
+// default encryption will be applied. For more information, see PUT Bucket
+// encryption
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTencryption.html)
+// in the Amazon Simple Storage Service API Reference.
+type ServerSideEncryptionByDefault struct {
+
+ // Server-side encryption algorithm to use for the default encryption.
+ //
+ // This member is required.
+ SSEAlgorithm ServerSideEncryption
+
+ // AWS Key Management Service (KMS) customer master key ID to use for the default
+ // encryption. This parameter is allowed if and only if SSEAlgorithm is set to
+ // aws:kms. You can specify the key ID or the Amazon Resource Name (ARN) of the
+ // CMK. However, if you are using encryption with cross-account operations, you
+ // must use a fully qualified CMK ARN. For more information, see Using encryption
+ // for cross-account operations
+ // (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html#bucket-encryption-update-bucket-policy).
+ // For example:
+ //
+ // * Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab
+ //
+ // * Key ARN:
+ // arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab
+ //
+ // Amazon
+ // S3 only supports symmetric CMKs and not asymmetric CMKs. For more information,
+ // see Using Symmetric and Asymmetric Keys
+ // (https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html)
+ // in the AWS Key Management Service Developer Guide.
+ KMSMasterKeyID *string
+}
+
+// Specifies the default server-side-encryption configuration.
+type ServerSideEncryptionConfiguration struct {
+
+ // Container for information about a particular server-side encryption
+ // configuration rule.
+ //
+ // This member is required.
+ Rules []ServerSideEncryptionRule
+}
+
+// Specifies the default server-side encryption configuration.
+type ServerSideEncryptionRule struct {
+
+ // Specifies the default server-side encryption to apply to new objects in the
+ // bucket. If a PUT Object request doesn't specify any server-side encryption, this
+ // default encryption will be applied.
+ ApplyServerSideEncryptionByDefault *ServerSideEncryptionByDefault
+
+ // Specifies whether Amazon S3 should use an S3 Bucket Key with server-side
+ // encryption using KMS (SSE-KMS) for new objects in the bucket. Existing objects
+ // are not affected. Setting the BucketKeyEnabled element to true causes Amazon S3
+ // to use an S3 Bucket Key. By default, S3 Bucket Key is not enabled. For more
+ // information, see Amazon S3 Bucket Keys
+ // (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-key.html) in the Amazon
+ // Simple Storage Service Developer Guide.
+ BucketKeyEnabled bool
+}
+
+// A container that describes additional filters for identifying the source objects
+// that you want to replicate. You can choose to enable or disable the replication
+// of these objects. Currently, Amazon S3 supports only the filter that you can
+// specify for objects created with server-side encryption using a customer master
+// key (CMK) stored in AWS Key Management Service (SSE-KMS).
+type SourceSelectionCriteria struct {
+
+ // A filter that you can specify for selections for modifications on replicas.
+ // Amazon S3 doesn't replicate replica modifications by default. In the latest
+ // version of replication configuration (when Filter is specified), you can specify
+ // this element and set the status to Enabled to replicate modifications on
+ // replicas. If you don't specify the Filter element, Amazon S3 assumes that the
+ // replication configuration is the earlier version, V1. In the earlier version,
+ // this element is not allowed
+ ReplicaModifications *ReplicaModifications
+
+ // A container for filter information for the selection of Amazon S3 objects
+ // encrypted with AWS KMS. If you include SourceSelectionCriteria in the
+ // replication configuration, this element is required.
+ SseKmsEncryptedObjects *SseKmsEncryptedObjects
+}
+
+// Specifies the use of SSE-KMS to encrypt delivered inventory reports.
+type SSEKMS struct {
+
+ // Specifies the ID of the AWS Key Management Service (AWS KMS) symmetric customer
+ // managed customer master key (CMK) to use for encrypting inventory reports.
+ //
+ // This member is required.
+ KeyId *string
+}
+
+// A container for filter information for the selection of S3 objects encrypted
+// with AWS KMS.
+type SseKmsEncryptedObjects struct {
+
+ // Specifies whether Amazon S3 replicates objects created with server-side
+ // encryption using a customer master key (CMK) stored in AWS Key Management
+ // Service.
+ //
+ // This member is required.
+ Status SseKmsEncryptedObjectsStatus
+}
+
+// Specifies the use of SSE-S3 to encrypt delivered inventory reports.
+type SSES3 struct {
+}
+
+// Specifies data related to access patterns to be collected and made available to
+// analyze the tradeoffs between different storage classes for an Amazon S3 bucket.
+type StorageClassAnalysis struct {
+
+ // Specifies how data related to the storage class analysis for an Amazon S3 bucket
+ // should be exported.
+ DataExport *StorageClassAnalysisDataExport
+}
+
+// Container for data related to the storage class analysis for an Amazon S3 bucket
+// for export.
+type StorageClassAnalysisDataExport struct {
+
+ // The place to store the data for an analysis.
+ //
+ // This member is required.
+ Destination *AnalyticsExportDestination
+
+ // The version of the output schema to use when exporting data. Must be V_1.
+ //
+ // This member is required.
+ OutputSchemaVersion StorageClassAnalysisSchemaVersion
+}
+
+// A container of a key value name pair.
+type Tag struct {
+
+ // Name of the object key.
+ //
+ // This member is required.
+ Key *string
+
+ // Value of the tag.
+ //
+ // This member is required.
+ Value *string
+}
+
+// Container for TagSet elements.
+type Tagging struct {
+
+ // A collection for a set of tags
+ //
+ // This member is required.
+ TagSet []Tag
+}
+
+// Container for granting information.
+type TargetGrant struct {
+
+ // Container for the person being granted permissions.
+ Grantee *Grantee
+
+ // Logging permissions assigned to the grantee for the bucket.
+ Permission BucketLogsPermission
+}
+
+// The S3 Intelligent-Tiering storage class is designed to optimize storage costs
+// by automatically moving data to the most cost-effective storage access tier,
+// without additional operational overhead.
+type Tiering struct {
+
+ // S3 Intelligent-Tiering access tier. See Storage class for automatically
+ // optimizing frequently and infrequently accessed objects
+ // (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html#sc-dynamic-data-access)
+ // for a list of access tiers in the S3 Intelligent-Tiering storage class.
+ //
+ // This member is required.
+ AccessTier IntelligentTieringAccessTier
+
+ // The number of consecutive days of no access after which an object will be
+ // eligible to be transitioned to the corresponding tier. The minimum number of
+ // days specified for Archive Access tier must be at least 90 days and Deep Archive
+ // Access tier must be at least 180 days. The maximum can be up to 2 years (730
+ // days).
+ //
+ // This member is required.
+ Days int32
+}
+
+// A container for specifying the configuration for publication of messages to an
+// Amazon Simple Notification Service (Amazon SNS) topic when Amazon S3 detects
+// specified events.
+type TopicConfiguration struct {
+
+ // The Amazon S3 bucket event about which to send notifications. For more
+ // information, see Supported Event Types
+ // (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) in the
+ // Amazon Simple Storage Service Developer Guide.
+ //
+ // This member is required.
+ Events []Event
+
+ // The Amazon Resource Name (ARN) of the Amazon SNS topic to which Amazon S3
+ // publishes a message when it detects events of the specified type.
+ //
+ // This member is required.
+ TopicArn *string
+
+ // Specifies object key name filtering rules. For information about key name
+ // filtering, see Configuring Event Notifications
+ // (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) in the
+ // Amazon Simple Storage Service Developer Guide.
+ Filter *NotificationConfigurationFilter
+
+ // An optional unique identifier for configurations in a notification
+ // configuration. If you don't provide one, Amazon S3 will assign an ID.
+ Id *string
+}
+
+// Specifies when an object transitions to a specified storage class. For more
+// information about Amazon S3 lifecycle configuration rules, see Transitioning
+// Objects Using Amazon S3 Lifecycle
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/lifecycle-transition-general-considerations.html)
+// in the Amazon Simple Storage Service Developer Guide.
+type Transition struct {
+
+ // Indicates when objects are transitioned to the specified storage class. The date
+ // value must be in ISO 8601 format. The time is always midnight UTC.
+ Date *time.Time
+
+ // Indicates the number of days after creation when objects are transitioned to the
+ // specified storage class. The value must be a positive integer.
+ Days int32
+
+ // The storage class to which you want the object to transition.
+ StorageClass TransitionStorageClass
+}
+
+// Describes the versioning state of an Amazon S3 bucket. For more information, see
+// PUT Bucket versioning
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTVersioningStatus.html)
+// in the Amazon Simple Storage Service API Reference.
+type VersioningConfiguration struct {
+
+ // Specifies whether MFA delete is enabled in the bucket versioning configuration.
+ // This element is only returned if the bucket has been configured with MFA delete.
+ // If the bucket has never been so configured, this element is not returned.
+ MFADelete MFADelete
+
+ // The versioning state of the bucket.
+ Status BucketVersioningStatus
+}
+
+// Specifies website configuration parameters for an Amazon S3 bucket.
+type WebsiteConfiguration struct {
+
+ // The name of the error document for the website.
+ ErrorDocument *ErrorDocument
+
+ // The name of the index document for the website.
+ IndexDocument *IndexDocument
+
+ // The redirect behavior for every request to this bucket's website endpoint. If
+ // you specify this property, you can't specify any other property.
+ RedirectAllRequestsTo *RedirectAllRequestsTo
+
+ // Rules that define when a redirect is applied and the redirect behavior.
+ RoutingRules []RoutingRule
+}
+
+// UnknownUnionMember is returned when a union member is returned over the wire,
+// but has an unknown tag.
+type UnknownUnionMember struct {
+ Tag string
+ Value []byte
+}
+
+func (*UnknownUnionMember) isAnalyticsFilter() {}
+func (*UnknownUnionMember) isLifecycleRuleFilter() {}
+func (*UnknownUnionMember) isMetricsFilter() {}
+func (*UnknownUnionMember) isReplicationRuleFilter() {}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/validators.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/validators.go
new file mode 100644
index 000000000..c48199ca9
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/validators.go
@@ -0,0 +1,5353 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package s3
+
+import (
+ "context"
+ "fmt"
+ "github.com/aws/aws-sdk-go-v2/service/s3/types"
+ smithy "github.com/aws/smithy-go"
+ "github.com/aws/smithy-go/middleware"
+)
+
+type validateOpAbortMultipartUpload struct {
+}
+
+func (*validateOpAbortMultipartUpload) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpAbortMultipartUpload) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*AbortMultipartUploadInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpAbortMultipartUploadInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpCompleteMultipartUpload struct {
+}
+
+func (*validateOpCompleteMultipartUpload) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpCompleteMultipartUpload) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*CompleteMultipartUploadInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpCompleteMultipartUploadInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpCopyObject struct {
+}
+
+func (*validateOpCopyObject) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpCopyObject) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*CopyObjectInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpCopyObjectInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpCreateBucket struct {
+}
+
+func (*validateOpCreateBucket) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpCreateBucket) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*CreateBucketInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpCreateBucketInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpCreateMultipartUpload struct {
+}
+
+func (*validateOpCreateMultipartUpload) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpCreateMultipartUpload) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*CreateMultipartUploadInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpCreateMultipartUploadInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpDeleteBucketAnalyticsConfiguration struct {
+}
+
+func (*validateOpDeleteBucketAnalyticsConfiguration) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpDeleteBucketAnalyticsConfiguration) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*DeleteBucketAnalyticsConfigurationInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpDeleteBucketAnalyticsConfigurationInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpDeleteBucketCors struct {
+}
+
+func (*validateOpDeleteBucketCors) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpDeleteBucketCors) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*DeleteBucketCorsInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpDeleteBucketCorsInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpDeleteBucketEncryption struct {
+}
+
+func (*validateOpDeleteBucketEncryption) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpDeleteBucketEncryption) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*DeleteBucketEncryptionInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpDeleteBucketEncryptionInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpDeleteBucket struct {
+}
+
+func (*validateOpDeleteBucket) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpDeleteBucket) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*DeleteBucketInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpDeleteBucketInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpDeleteBucketIntelligentTieringConfiguration struct {
+}
+
+func (*validateOpDeleteBucketIntelligentTieringConfiguration) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpDeleteBucketIntelligentTieringConfiguration) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*DeleteBucketIntelligentTieringConfigurationInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpDeleteBucketIntelligentTieringConfigurationInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpDeleteBucketInventoryConfiguration struct {
+}
+
+func (*validateOpDeleteBucketInventoryConfiguration) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpDeleteBucketInventoryConfiguration) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*DeleteBucketInventoryConfigurationInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpDeleteBucketInventoryConfigurationInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpDeleteBucketLifecycle struct {
+}
+
+func (*validateOpDeleteBucketLifecycle) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpDeleteBucketLifecycle) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*DeleteBucketLifecycleInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpDeleteBucketLifecycleInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpDeleteBucketMetricsConfiguration struct {
+}
+
+func (*validateOpDeleteBucketMetricsConfiguration) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpDeleteBucketMetricsConfiguration) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*DeleteBucketMetricsConfigurationInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpDeleteBucketMetricsConfigurationInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpDeleteBucketOwnershipControls struct {
+}
+
+func (*validateOpDeleteBucketOwnershipControls) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpDeleteBucketOwnershipControls) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*DeleteBucketOwnershipControlsInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpDeleteBucketOwnershipControlsInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpDeleteBucketPolicy struct {
+}
+
+func (*validateOpDeleteBucketPolicy) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpDeleteBucketPolicy) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*DeleteBucketPolicyInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpDeleteBucketPolicyInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpDeleteBucketReplication struct {
+}
+
+func (*validateOpDeleteBucketReplication) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpDeleteBucketReplication) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*DeleteBucketReplicationInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpDeleteBucketReplicationInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpDeleteBucketTagging struct {
+}
+
+func (*validateOpDeleteBucketTagging) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpDeleteBucketTagging) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*DeleteBucketTaggingInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpDeleteBucketTaggingInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpDeleteBucketWebsite struct {
+}
+
+func (*validateOpDeleteBucketWebsite) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpDeleteBucketWebsite) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*DeleteBucketWebsiteInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpDeleteBucketWebsiteInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpDeleteObject struct {
+}
+
+func (*validateOpDeleteObject) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpDeleteObject) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*DeleteObjectInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpDeleteObjectInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpDeleteObjects struct {
+}
+
+func (*validateOpDeleteObjects) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpDeleteObjects) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*DeleteObjectsInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpDeleteObjectsInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpDeleteObjectTagging struct {
+}
+
+func (*validateOpDeleteObjectTagging) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpDeleteObjectTagging) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*DeleteObjectTaggingInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpDeleteObjectTaggingInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpDeletePublicAccessBlock struct {
+}
+
+func (*validateOpDeletePublicAccessBlock) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpDeletePublicAccessBlock) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*DeletePublicAccessBlockInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpDeletePublicAccessBlockInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpGetBucketAccelerateConfiguration struct {
+}
+
+func (*validateOpGetBucketAccelerateConfiguration) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpGetBucketAccelerateConfiguration) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*GetBucketAccelerateConfigurationInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpGetBucketAccelerateConfigurationInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpGetBucketAcl struct {
+}
+
+func (*validateOpGetBucketAcl) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpGetBucketAcl) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*GetBucketAclInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpGetBucketAclInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpGetBucketAnalyticsConfiguration struct {
+}
+
+func (*validateOpGetBucketAnalyticsConfiguration) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpGetBucketAnalyticsConfiguration) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*GetBucketAnalyticsConfigurationInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpGetBucketAnalyticsConfigurationInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpGetBucketCors struct {
+}
+
+func (*validateOpGetBucketCors) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpGetBucketCors) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*GetBucketCorsInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpGetBucketCorsInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpGetBucketEncryption struct {
+}
+
+func (*validateOpGetBucketEncryption) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpGetBucketEncryption) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*GetBucketEncryptionInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpGetBucketEncryptionInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpGetBucketIntelligentTieringConfiguration struct {
+}
+
+func (*validateOpGetBucketIntelligentTieringConfiguration) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpGetBucketIntelligentTieringConfiguration) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*GetBucketIntelligentTieringConfigurationInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpGetBucketIntelligentTieringConfigurationInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpGetBucketInventoryConfiguration struct {
+}
+
+func (*validateOpGetBucketInventoryConfiguration) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpGetBucketInventoryConfiguration) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*GetBucketInventoryConfigurationInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpGetBucketInventoryConfigurationInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpGetBucketLifecycleConfiguration struct {
+}
+
+func (*validateOpGetBucketLifecycleConfiguration) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpGetBucketLifecycleConfiguration) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*GetBucketLifecycleConfigurationInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpGetBucketLifecycleConfigurationInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpGetBucketLocation struct {
+}
+
+func (*validateOpGetBucketLocation) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpGetBucketLocation) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*GetBucketLocationInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpGetBucketLocationInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpGetBucketLogging struct {
+}
+
+func (*validateOpGetBucketLogging) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpGetBucketLogging) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*GetBucketLoggingInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpGetBucketLoggingInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpGetBucketMetricsConfiguration struct {
+}
+
+func (*validateOpGetBucketMetricsConfiguration) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpGetBucketMetricsConfiguration) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*GetBucketMetricsConfigurationInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpGetBucketMetricsConfigurationInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpGetBucketNotificationConfiguration struct {
+}
+
+func (*validateOpGetBucketNotificationConfiguration) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpGetBucketNotificationConfiguration) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*GetBucketNotificationConfigurationInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpGetBucketNotificationConfigurationInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpGetBucketOwnershipControls struct {
+}
+
+func (*validateOpGetBucketOwnershipControls) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpGetBucketOwnershipControls) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*GetBucketOwnershipControlsInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpGetBucketOwnershipControlsInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpGetBucketPolicy struct {
+}
+
+func (*validateOpGetBucketPolicy) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpGetBucketPolicy) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*GetBucketPolicyInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpGetBucketPolicyInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpGetBucketPolicyStatus struct {
+}
+
+func (*validateOpGetBucketPolicyStatus) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpGetBucketPolicyStatus) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*GetBucketPolicyStatusInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpGetBucketPolicyStatusInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpGetBucketReplication struct {
+}
+
+func (*validateOpGetBucketReplication) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpGetBucketReplication) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*GetBucketReplicationInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpGetBucketReplicationInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpGetBucketRequestPayment struct {
+}
+
+func (*validateOpGetBucketRequestPayment) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpGetBucketRequestPayment) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*GetBucketRequestPaymentInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpGetBucketRequestPaymentInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpGetBucketTagging struct {
+}
+
+func (*validateOpGetBucketTagging) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpGetBucketTagging) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*GetBucketTaggingInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpGetBucketTaggingInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpGetBucketVersioning struct {
+}
+
+func (*validateOpGetBucketVersioning) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpGetBucketVersioning) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*GetBucketVersioningInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpGetBucketVersioningInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpGetBucketWebsite struct {
+}
+
+func (*validateOpGetBucketWebsite) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpGetBucketWebsite) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*GetBucketWebsiteInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpGetBucketWebsiteInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpGetObjectAcl struct {
+}
+
+func (*validateOpGetObjectAcl) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpGetObjectAcl) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*GetObjectAclInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpGetObjectAclInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpGetObject struct {
+}
+
+func (*validateOpGetObject) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpGetObject) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*GetObjectInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpGetObjectInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpGetObjectLegalHold struct {
+}
+
+func (*validateOpGetObjectLegalHold) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpGetObjectLegalHold) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*GetObjectLegalHoldInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpGetObjectLegalHoldInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpGetObjectLockConfiguration struct {
+}
+
+func (*validateOpGetObjectLockConfiguration) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpGetObjectLockConfiguration) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*GetObjectLockConfigurationInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpGetObjectLockConfigurationInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpGetObjectRetention struct {
+}
+
+func (*validateOpGetObjectRetention) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpGetObjectRetention) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*GetObjectRetentionInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpGetObjectRetentionInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpGetObjectTagging struct {
+}
+
+func (*validateOpGetObjectTagging) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpGetObjectTagging) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*GetObjectTaggingInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpGetObjectTaggingInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpGetObjectTorrent struct {
+}
+
+func (*validateOpGetObjectTorrent) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpGetObjectTorrent) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*GetObjectTorrentInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpGetObjectTorrentInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpGetPublicAccessBlock struct {
+}
+
+func (*validateOpGetPublicAccessBlock) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpGetPublicAccessBlock) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*GetPublicAccessBlockInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpGetPublicAccessBlockInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpHeadBucket struct {
+}
+
+func (*validateOpHeadBucket) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpHeadBucket) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*HeadBucketInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpHeadBucketInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpHeadObject struct {
+}
+
+func (*validateOpHeadObject) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpHeadObject) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*HeadObjectInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpHeadObjectInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpListBucketAnalyticsConfigurations struct {
+}
+
+func (*validateOpListBucketAnalyticsConfigurations) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpListBucketAnalyticsConfigurations) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*ListBucketAnalyticsConfigurationsInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpListBucketAnalyticsConfigurationsInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpListBucketIntelligentTieringConfigurations struct {
+}
+
+func (*validateOpListBucketIntelligentTieringConfigurations) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpListBucketIntelligentTieringConfigurations) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*ListBucketIntelligentTieringConfigurationsInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpListBucketIntelligentTieringConfigurationsInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpListBucketInventoryConfigurations struct {
+}
+
+func (*validateOpListBucketInventoryConfigurations) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpListBucketInventoryConfigurations) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*ListBucketInventoryConfigurationsInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpListBucketInventoryConfigurationsInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpListBucketMetricsConfigurations struct {
+}
+
+func (*validateOpListBucketMetricsConfigurations) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpListBucketMetricsConfigurations) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*ListBucketMetricsConfigurationsInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpListBucketMetricsConfigurationsInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpListMultipartUploads struct {
+}
+
+func (*validateOpListMultipartUploads) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpListMultipartUploads) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*ListMultipartUploadsInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpListMultipartUploadsInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpListObjects struct {
+}
+
+func (*validateOpListObjects) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpListObjects) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*ListObjectsInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpListObjectsInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpListObjectsV2 struct {
+}
+
+func (*validateOpListObjectsV2) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpListObjectsV2) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*ListObjectsV2Input)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpListObjectsV2Input(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpListObjectVersions struct {
+}
+
+func (*validateOpListObjectVersions) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpListObjectVersions) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*ListObjectVersionsInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpListObjectVersionsInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpListParts struct {
+}
+
+func (*validateOpListParts) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpListParts) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*ListPartsInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpListPartsInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpPutBucketAccelerateConfiguration struct {
+}
+
+func (*validateOpPutBucketAccelerateConfiguration) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpPutBucketAccelerateConfiguration) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*PutBucketAccelerateConfigurationInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpPutBucketAccelerateConfigurationInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpPutBucketAcl struct {
+}
+
+func (*validateOpPutBucketAcl) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpPutBucketAcl) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*PutBucketAclInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpPutBucketAclInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpPutBucketAnalyticsConfiguration struct {
+}
+
+func (*validateOpPutBucketAnalyticsConfiguration) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpPutBucketAnalyticsConfiguration) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*PutBucketAnalyticsConfigurationInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpPutBucketAnalyticsConfigurationInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpPutBucketCors struct {
+}
+
+func (*validateOpPutBucketCors) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpPutBucketCors) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*PutBucketCorsInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpPutBucketCorsInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpPutBucketEncryption struct {
+}
+
+func (*validateOpPutBucketEncryption) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpPutBucketEncryption) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*PutBucketEncryptionInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpPutBucketEncryptionInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpPutBucketIntelligentTieringConfiguration struct {
+}
+
+func (*validateOpPutBucketIntelligentTieringConfiguration) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpPutBucketIntelligentTieringConfiguration) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*PutBucketIntelligentTieringConfigurationInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpPutBucketIntelligentTieringConfigurationInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpPutBucketInventoryConfiguration struct {
+}
+
+func (*validateOpPutBucketInventoryConfiguration) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpPutBucketInventoryConfiguration) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*PutBucketInventoryConfigurationInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpPutBucketInventoryConfigurationInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpPutBucketLifecycleConfiguration struct {
+}
+
+func (*validateOpPutBucketLifecycleConfiguration) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpPutBucketLifecycleConfiguration) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*PutBucketLifecycleConfigurationInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpPutBucketLifecycleConfigurationInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpPutBucketLogging struct {
+}
+
+func (*validateOpPutBucketLogging) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpPutBucketLogging) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*PutBucketLoggingInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpPutBucketLoggingInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpPutBucketMetricsConfiguration struct {
+}
+
+func (*validateOpPutBucketMetricsConfiguration) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpPutBucketMetricsConfiguration) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*PutBucketMetricsConfigurationInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpPutBucketMetricsConfigurationInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpPutBucketNotificationConfiguration struct {
+}
+
+func (*validateOpPutBucketNotificationConfiguration) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpPutBucketNotificationConfiguration) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*PutBucketNotificationConfigurationInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpPutBucketNotificationConfigurationInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpPutBucketOwnershipControls struct {
+}
+
+func (*validateOpPutBucketOwnershipControls) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpPutBucketOwnershipControls) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*PutBucketOwnershipControlsInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpPutBucketOwnershipControlsInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpPutBucketPolicy struct {
+}
+
+func (*validateOpPutBucketPolicy) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpPutBucketPolicy) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*PutBucketPolicyInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpPutBucketPolicyInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpPutBucketReplication struct {
+}
+
+func (*validateOpPutBucketReplication) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpPutBucketReplication) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*PutBucketReplicationInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpPutBucketReplicationInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpPutBucketRequestPayment struct {
+}
+
+func (*validateOpPutBucketRequestPayment) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpPutBucketRequestPayment) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*PutBucketRequestPaymentInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpPutBucketRequestPaymentInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpPutBucketTagging struct {
+}
+
+func (*validateOpPutBucketTagging) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpPutBucketTagging) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*PutBucketTaggingInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpPutBucketTaggingInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpPutBucketVersioning struct {
+}
+
+func (*validateOpPutBucketVersioning) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpPutBucketVersioning) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*PutBucketVersioningInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpPutBucketVersioningInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpPutBucketWebsite struct {
+}
+
+func (*validateOpPutBucketWebsite) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpPutBucketWebsite) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*PutBucketWebsiteInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpPutBucketWebsiteInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpPutObjectAcl struct {
+}
+
+func (*validateOpPutObjectAcl) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpPutObjectAcl) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*PutObjectAclInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpPutObjectAclInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpPutObject struct {
+}
+
+func (*validateOpPutObject) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpPutObject) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*PutObjectInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpPutObjectInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpPutObjectLegalHold struct {
+}
+
+func (*validateOpPutObjectLegalHold) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpPutObjectLegalHold) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*PutObjectLegalHoldInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpPutObjectLegalHoldInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpPutObjectLockConfiguration struct {
+}
+
+func (*validateOpPutObjectLockConfiguration) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpPutObjectLockConfiguration) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*PutObjectLockConfigurationInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpPutObjectLockConfigurationInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpPutObjectRetention struct {
+}
+
+func (*validateOpPutObjectRetention) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpPutObjectRetention) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*PutObjectRetentionInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpPutObjectRetentionInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpPutObjectTagging struct {
+}
+
+func (*validateOpPutObjectTagging) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpPutObjectTagging) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*PutObjectTaggingInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpPutObjectTaggingInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpPutPublicAccessBlock struct {
+}
+
+func (*validateOpPutPublicAccessBlock) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpPutPublicAccessBlock) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*PutPublicAccessBlockInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpPutPublicAccessBlockInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpRestoreObject struct {
+}
+
+func (*validateOpRestoreObject) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpRestoreObject) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*RestoreObjectInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpRestoreObjectInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpUploadPartCopy struct {
+}
+
+func (*validateOpUploadPartCopy) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpUploadPartCopy) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*UploadPartCopyInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpUploadPartCopyInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpUploadPart struct {
+}
+
+func (*validateOpUploadPart) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpUploadPart) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*UploadPartInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpUploadPartInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+func addOpAbortMultipartUploadValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpAbortMultipartUpload{}, middleware.After)
+}
+
+func addOpCompleteMultipartUploadValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpCompleteMultipartUpload{}, middleware.After)
+}
+
+func addOpCopyObjectValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpCopyObject{}, middleware.After)
+}
+
+func addOpCreateBucketValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpCreateBucket{}, middleware.After)
+}
+
+func addOpCreateMultipartUploadValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpCreateMultipartUpload{}, middleware.After)
+}
+
+func addOpDeleteBucketAnalyticsConfigurationValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpDeleteBucketAnalyticsConfiguration{}, middleware.After)
+}
+
+func addOpDeleteBucketCorsValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpDeleteBucketCors{}, middleware.After)
+}
+
+func addOpDeleteBucketEncryptionValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpDeleteBucketEncryption{}, middleware.After)
+}
+
+func addOpDeleteBucketValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpDeleteBucket{}, middleware.After)
+}
+
+func addOpDeleteBucketIntelligentTieringConfigurationValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpDeleteBucketIntelligentTieringConfiguration{}, middleware.After)
+}
+
+func addOpDeleteBucketInventoryConfigurationValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpDeleteBucketInventoryConfiguration{}, middleware.After)
+}
+
+func addOpDeleteBucketLifecycleValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpDeleteBucketLifecycle{}, middleware.After)
+}
+
+func addOpDeleteBucketMetricsConfigurationValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpDeleteBucketMetricsConfiguration{}, middleware.After)
+}
+
+func addOpDeleteBucketOwnershipControlsValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpDeleteBucketOwnershipControls{}, middleware.After)
+}
+
+func addOpDeleteBucketPolicyValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpDeleteBucketPolicy{}, middleware.After)
+}
+
+func addOpDeleteBucketReplicationValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpDeleteBucketReplication{}, middleware.After)
+}
+
+func addOpDeleteBucketTaggingValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpDeleteBucketTagging{}, middleware.After)
+}
+
+func addOpDeleteBucketWebsiteValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpDeleteBucketWebsite{}, middleware.After)
+}
+
+func addOpDeleteObjectValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpDeleteObject{}, middleware.After)
+}
+
+func addOpDeleteObjectsValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpDeleteObjects{}, middleware.After)
+}
+
+func addOpDeleteObjectTaggingValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpDeleteObjectTagging{}, middleware.After)
+}
+
+func addOpDeletePublicAccessBlockValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpDeletePublicAccessBlock{}, middleware.After)
+}
+
+func addOpGetBucketAccelerateConfigurationValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpGetBucketAccelerateConfiguration{}, middleware.After)
+}
+
+func addOpGetBucketAclValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpGetBucketAcl{}, middleware.After)
+}
+
+func addOpGetBucketAnalyticsConfigurationValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpGetBucketAnalyticsConfiguration{}, middleware.After)
+}
+
+func addOpGetBucketCorsValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpGetBucketCors{}, middleware.After)
+}
+
+func addOpGetBucketEncryptionValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpGetBucketEncryption{}, middleware.After)
+}
+
+func addOpGetBucketIntelligentTieringConfigurationValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpGetBucketIntelligentTieringConfiguration{}, middleware.After)
+}
+
+func addOpGetBucketInventoryConfigurationValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpGetBucketInventoryConfiguration{}, middleware.After)
+}
+
+func addOpGetBucketLifecycleConfigurationValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpGetBucketLifecycleConfiguration{}, middleware.After)
+}
+
+func addOpGetBucketLocationValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpGetBucketLocation{}, middleware.After)
+}
+
+func addOpGetBucketLoggingValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpGetBucketLogging{}, middleware.After)
+}
+
+func addOpGetBucketMetricsConfigurationValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpGetBucketMetricsConfiguration{}, middleware.After)
+}
+
+func addOpGetBucketNotificationConfigurationValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpGetBucketNotificationConfiguration{}, middleware.After)
+}
+
+func addOpGetBucketOwnershipControlsValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpGetBucketOwnershipControls{}, middleware.After)
+}
+
+func addOpGetBucketPolicyValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpGetBucketPolicy{}, middleware.After)
+}
+
+func addOpGetBucketPolicyStatusValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpGetBucketPolicyStatus{}, middleware.After)
+}
+
+func addOpGetBucketReplicationValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpGetBucketReplication{}, middleware.After)
+}
+
+func addOpGetBucketRequestPaymentValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpGetBucketRequestPayment{}, middleware.After)
+}
+
+func addOpGetBucketTaggingValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpGetBucketTagging{}, middleware.After)
+}
+
+func addOpGetBucketVersioningValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpGetBucketVersioning{}, middleware.After)
+}
+
+func addOpGetBucketWebsiteValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpGetBucketWebsite{}, middleware.After)
+}
+
+func addOpGetObjectAclValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpGetObjectAcl{}, middleware.After)
+}
+
+func addOpGetObjectValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpGetObject{}, middleware.After)
+}
+
+func addOpGetObjectLegalHoldValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpGetObjectLegalHold{}, middleware.After)
+}
+
+func addOpGetObjectLockConfigurationValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpGetObjectLockConfiguration{}, middleware.After)
+}
+
+func addOpGetObjectRetentionValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpGetObjectRetention{}, middleware.After)
+}
+
+func addOpGetObjectTaggingValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpGetObjectTagging{}, middleware.After)
+}
+
+func addOpGetObjectTorrentValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpGetObjectTorrent{}, middleware.After)
+}
+
+func addOpGetPublicAccessBlockValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpGetPublicAccessBlock{}, middleware.After)
+}
+
+func addOpHeadBucketValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpHeadBucket{}, middleware.After)
+}
+
+func addOpHeadObjectValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpHeadObject{}, middleware.After)
+}
+
+func addOpListBucketAnalyticsConfigurationsValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpListBucketAnalyticsConfigurations{}, middleware.After)
+}
+
+func addOpListBucketIntelligentTieringConfigurationsValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpListBucketIntelligentTieringConfigurations{}, middleware.After)
+}
+
+func addOpListBucketInventoryConfigurationsValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpListBucketInventoryConfigurations{}, middleware.After)
+}
+
+func addOpListBucketMetricsConfigurationsValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpListBucketMetricsConfigurations{}, middleware.After)
+}
+
+func addOpListMultipartUploadsValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpListMultipartUploads{}, middleware.After)
+}
+
+func addOpListObjectsValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpListObjects{}, middleware.After)
+}
+
+func addOpListObjectsV2ValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpListObjectsV2{}, middleware.After)
+}
+
+func addOpListObjectVersionsValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpListObjectVersions{}, middleware.After)
+}
+
+func addOpListPartsValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpListParts{}, middleware.After)
+}
+
+func addOpPutBucketAccelerateConfigurationValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpPutBucketAccelerateConfiguration{}, middleware.After)
+}
+
+func addOpPutBucketAclValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpPutBucketAcl{}, middleware.After)
+}
+
+func addOpPutBucketAnalyticsConfigurationValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpPutBucketAnalyticsConfiguration{}, middleware.After)
+}
+
+func addOpPutBucketCorsValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpPutBucketCors{}, middleware.After)
+}
+
+func addOpPutBucketEncryptionValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpPutBucketEncryption{}, middleware.After)
+}
+
+func addOpPutBucketIntelligentTieringConfigurationValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpPutBucketIntelligentTieringConfiguration{}, middleware.After)
+}
+
+func addOpPutBucketInventoryConfigurationValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpPutBucketInventoryConfiguration{}, middleware.After)
+}
+
+func addOpPutBucketLifecycleConfigurationValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpPutBucketLifecycleConfiguration{}, middleware.After)
+}
+
+func addOpPutBucketLoggingValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpPutBucketLogging{}, middleware.After)
+}
+
+func addOpPutBucketMetricsConfigurationValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpPutBucketMetricsConfiguration{}, middleware.After)
+}
+
+func addOpPutBucketNotificationConfigurationValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpPutBucketNotificationConfiguration{}, middleware.After)
+}
+
+func addOpPutBucketOwnershipControlsValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpPutBucketOwnershipControls{}, middleware.After)
+}
+
+func addOpPutBucketPolicyValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpPutBucketPolicy{}, middleware.After)
+}
+
+func addOpPutBucketReplicationValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpPutBucketReplication{}, middleware.After)
+}
+
+func addOpPutBucketRequestPaymentValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpPutBucketRequestPayment{}, middleware.After)
+}
+
+func addOpPutBucketTaggingValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpPutBucketTagging{}, middleware.After)
+}
+
+func addOpPutBucketVersioningValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpPutBucketVersioning{}, middleware.After)
+}
+
+func addOpPutBucketWebsiteValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpPutBucketWebsite{}, middleware.After)
+}
+
+func addOpPutObjectAclValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpPutObjectAcl{}, middleware.After)
+}
+
+func addOpPutObjectValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpPutObject{}, middleware.After)
+}
+
+func addOpPutObjectLegalHoldValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpPutObjectLegalHold{}, middleware.After)
+}
+
+func addOpPutObjectLockConfigurationValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpPutObjectLockConfiguration{}, middleware.After)
+}
+
+func addOpPutObjectRetentionValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpPutObjectRetention{}, middleware.After)
+}
+
+func addOpPutObjectTaggingValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpPutObjectTagging{}, middleware.After)
+}
+
+func addOpPutPublicAccessBlockValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpPutPublicAccessBlock{}, middleware.After)
+}
+
+func addOpRestoreObjectValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpRestoreObject{}, middleware.After)
+}
+
+func addOpUploadPartCopyValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpUploadPartCopy{}, middleware.After)
+}
+
+func addOpUploadPartValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpUploadPart{}, middleware.After)
+}
+
+func validateAccessControlPolicy(v *types.AccessControlPolicy) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "AccessControlPolicy"}
+ if v.Grants != nil {
+ if err := validateGrants(v.Grants); err != nil {
+ invalidParams.AddNested("Grants", err.(smithy.InvalidParamsError))
+ }
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateAccessControlTranslation(v *types.AccessControlTranslation) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "AccessControlTranslation"}
+ if len(v.Owner) == 0 {
+ invalidParams.Add(smithy.NewErrParamRequired("Owner"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateAnalyticsAndOperator(v *types.AnalyticsAndOperator) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "AnalyticsAndOperator"}
+ if v.Tags != nil {
+ if err := validateTagSet(v.Tags); err != nil {
+ invalidParams.AddNested("Tags", err.(smithy.InvalidParamsError))
+ }
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateAnalyticsConfiguration(v *types.AnalyticsConfiguration) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "AnalyticsConfiguration"}
+ if v.Id == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Id"))
+ }
+ if v.Filter != nil {
+ if err := validateAnalyticsFilter(v.Filter); err != nil {
+ invalidParams.AddNested("Filter", err.(smithy.InvalidParamsError))
+ }
+ }
+ if v.StorageClassAnalysis == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("StorageClassAnalysis"))
+ } else if v.StorageClassAnalysis != nil {
+ if err := validateStorageClassAnalysis(v.StorageClassAnalysis); err != nil {
+ invalidParams.AddNested("StorageClassAnalysis", err.(smithy.InvalidParamsError))
+ }
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateAnalyticsExportDestination(v *types.AnalyticsExportDestination) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "AnalyticsExportDestination"}
+ if v.S3BucketDestination == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("S3BucketDestination"))
+ } else if v.S3BucketDestination != nil {
+ if err := validateAnalyticsS3BucketDestination(v.S3BucketDestination); err != nil {
+ invalidParams.AddNested("S3BucketDestination", err.(smithy.InvalidParamsError))
+ }
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateAnalyticsFilter(v types.AnalyticsFilter) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "AnalyticsFilter"}
+ switch uv := v.(type) {
+ case *types.AnalyticsFilterMemberAnd:
+ if err := validateAnalyticsAndOperator(&uv.Value); err != nil {
+ invalidParams.AddNested("[And]", err.(smithy.InvalidParamsError))
+ }
+
+ case *types.AnalyticsFilterMemberTag:
+ if err := validateTag(&uv.Value); err != nil {
+ invalidParams.AddNested("[Tag]", err.(smithy.InvalidParamsError))
+ }
+
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateAnalyticsS3BucketDestination(v *types.AnalyticsS3BucketDestination) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "AnalyticsS3BucketDestination"}
+ if len(v.Format) == 0 {
+ invalidParams.Add(smithy.NewErrParamRequired("Format"))
+ }
+ if v.Bucket == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Bucket"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateBucketLifecycleConfiguration(v *types.BucketLifecycleConfiguration) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "BucketLifecycleConfiguration"}
+ if v.Rules == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Rules"))
+ } else if v.Rules != nil {
+ if err := validateLifecycleRules(v.Rules); err != nil {
+ invalidParams.AddNested("Rules", err.(smithy.InvalidParamsError))
+ }
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateBucketLoggingStatus(v *types.BucketLoggingStatus) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "BucketLoggingStatus"}
+ if v.LoggingEnabled != nil {
+ if err := validateLoggingEnabled(v.LoggingEnabled); err != nil {
+ invalidParams.AddNested("LoggingEnabled", err.(smithy.InvalidParamsError))
+ }
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateCORSConfiguration(v *types.CORSConfiguration) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "CORSConfiguration"}
+ if v.CORSRules == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("CORSRules"))
+ } else if v.CORSRules != nil {
+ if err := validateCORSRules(v.CORSRules); err != nil {
+ invalidParams.AddNested("CORSRules", err.(smithy.InvalidParamsError))
+ }
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateCORSRule(v *types.CORSRule) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "CORSRule"}
+ if v.AllowedMethods == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("AllowedMethods"))
+ }
+ if v.AllowedOrigins == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("AllowedOrigins"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateCORSRules(v []types.CORSRule) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "CORSRules"}
+ for i := range v {
+ if err := validateCORSRule(&v[i]); err != nil {
+ invalidParams.AddNested(fmt.Sprintf("[%d]", i), err.(smithy.InvalidParamsError))
+ }
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateDelete(v *types.Delete) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "Delete"}
+ if v.Objects == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Objects"))
+ } else if v.Objects != nil {
+ if err := validateObjectIdentifierList(v.Objects); err != nil {
+ invalidParams.AddNested("Objects", err.(smithy.InvalidParamsError))
+ }
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateDestination(v *types.Destination) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "Destination"}
+ if v.Bucket == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Bucket"))
+ }
+ if v.AccessControlTranslation != nil {
+ if err := validateAccessControlTranslation(v.AccessControlTranslation); err != nil {
+ invalidParams.AddNested("AccessControlTranslation", err.(smithy.InvalidParamsError))
+ }
+ }
+ if v.ReplicationTime != nil {
+ if err := validateReplicationTime(v.ReplicationTime); err != nil {
+ invalidParams.AddNested("ReplicationTime", err.(smithy.InvalidParamsError))
+ }
+ }
+ if v.Metrics != nil {
+ if err := validateMetrics(v.Metrics); err != nil {
+ invalidParams.AddNested("Metrics", err.(smithy.InvalidParamsError))
+ }
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateEncryption(v *types.Encryption) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "Encryption"}
+ if len(v.EncryptionType) == 0 {
+ invalidParams.Add(smithy.NewErrParamRequired("EncryptionType"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateErrorDocument(v *types.ErrorDocument) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "ErrorDocument"}
+ if v.Key == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Key"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateExistingObjectReplication(v *types.ExistingObjectReplication) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "ExistingObjectReplication"}
+ if len(v.Status) == 0 {
+ invalidParams.Add(smithy.NewErrParamRequired("Status"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateGlacierJobParameters(v *types.GlacierJobParameters) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "GlacierJobParameters"}
+ if len(v.Tier) == 0 {
+ invalidParams.Add(smithy.NewErrParamRequired("Tier"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateGrant(v *types.Grant) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "Grant"}
+ if v.Grantee != nil {
+ if err := validateGrantee(v.Grantee); err != nil {
+ invalidParams.AddNested("Grantee", err.(smithy.InvalidParamsError))
+ }
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateGrantee(v *types.Grantee) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "Grantee"}
+ if len(v.Type) == 0 {
+ invalidParams.Add(smithy.NewErrParamRequired("Type"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateGrants(v []types.Grant) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "Grants"}
+ for i := range v {
+ if err := validateGrant(&v[i]); err != nil {
+ invalidParams.AddNested(fmt.Sprintf("[%d]", i), err.(smithy.InvalidParamsError))
+ }
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateIndexDocument(v *types.IndexDocument) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "IndexDocument"}
+ if v.Suffix == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Suffix"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateIntelligentTieringAndOperator(v *types.IntelligentTieringAndOperator) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "IntelligentTieringAndOperator"}
+ if v.Tags != nil {
+ if err := validateTagSet(v.Tags); err != nil {
+ invalidParams.AddNested("Tags", err.(smithy.InvalidParamsError))
+ }
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateIntelligentTieringConfiguration(v *types.IntelligentTieringConfiguration) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "IntelligentTieringConfiguration"}
+ if v.Id == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Id"))
+ }
+ if v.Filter != nil {
+ if err := validateIntelligentTieringFilter(v.Filter); err != nil {
+ invalidParams.AddNested("Filter", err.(smithy.InvalidParamsError))
+ }
+ }
+ if len(v.Status) == 0 {
+ invalidParams.Add(smithy.NewErrParamRequired("Status"))
+ }
+ if v.Tierings == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Tierings"))
+ } else if v.Tierings != nil {
+ if err := validateTieringList(v.Tierings); err != nil {
+ invalidParams.AddNested("Tierings", err.(smithy.InvalidParamsError))
+ }
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateIntelligentTieringFilter(v *types.IntelligentTieringFilter) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "IntelligentTieringFilter"}
+ if v.Tag != nil {
+ if err := validateTag(v.Tag); err != nil {
+ invalidParams.AddNested("Tag", err.(smithy.InvalidParamsError))
+ }
+ }
+ if v.And != nil {
+ if err := validateIntelligentTieringAndOperator(v.And); err != nil {
+ invalidParams.AddNested("And", err.(smithy.InvalidParamsError))
+ }
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateInventoryConfiguration(v *types.InventoryConfiguration) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "InventoryConfiguration"}
+ if v.Destination == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Destination"))
+ } else if v.Destination != nil {
+ if err := validateInventoryDestination(v.Destination); err != nil {
+ invalidParams.AddNested("Destination", err.(smithy.InvalidParamsError))
+ }
+ }
+ if v.Filter != nil {
+ if err := validateInventoryFilter(v.Filter); err != nil {
+ invalidParams.AddNested("Filter", err.(smithy.InvalidParamsError))
+ }
+ }
+ if v.Id == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Id"))
+ }
+ if len(v.IncludedObjectVersions) == 0 {
+ invalidParams.Add(smithy.NewErrParamRequired("IncludedObjectVersions"))
+ }
+ if v.Schedule == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Schedule"))
+ } else if v.Schedule != nil {
+ if err := validateInventorySchedule(v.Schedule); err != nil {
+ invalidParams.AddNested("Schedule", err.(smithy.InvalidParamsError))
+ }
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateInventoryDestination(v *types.InventoryDestination) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "InventoryDestination"}
+ if v.S3BucketDestination == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("S3BucketDestination"))
+ } else if v.S3BucketDestination != nil {
+ if err := validateInventoryS3BucketDestination(v.S3BucketDestination); err != nil {
+ invalidParams.AddNested("S3BucketDestination", err.(smithy.InvalidParamsError))
+ }
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateInventoryEncryption(v *types.InventoryEncryption) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "InventoryEncryption"}
+ if v.SSEKMS != nil {
+ if err := validateSSEKMS(v.SSEKMS); err != nil {
+ invalidParams.AddNested("SSEKMS", err.(smithy.InvalidParamsError))
+ }
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateInventoryFilter(v *types.InventoryFilter) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "InventoryFilter"}
+ if v.Prefix == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Prefix"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateInventoryS3BucketDestination(v *types.InventoryS3BucketDestination) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "InventoryS3BucketDestination"}
+ if v.Bucket == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Bucket"))
+ }
+ if len(v.Format) == 0 {
+ invalidParams.Add(smithy.NewErrParamRequired("Format"))
+ }
+ if v.Encryption != nil {
+ if err := validateInventoryEncryption(v.Encryption); err != nil {
+ invalidParams.AddNested("Encryption", err.(smithy.InvalidParamsError))
+ }
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateInventorySchedule(v *types.InventorySchedule) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "InventorySchedule"}
+ if len(v.Frequency) == 0 {
+ invalidParams.Add(smithy.NewErrParamRequired("Frequency"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateLambdaFunctionConfiguration(v *types.LambdaFunctionConfiguration) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "LambdaFunctionConfiguration"}
+ if v.LambdaFunctionArn == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("LambdaFunctionArn"))
+ }
+ if v.Events == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Events"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateLambdaFunctionConfigurationList(v []types.LambdaFunctionConfiguration) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "LambdaFunctionConfigurationList"}
+ for i := range v {
+ if err := validateLambdaFunctionConfiguration(&v[i]); err != nil {
+ invalidParams.AddNested(fmt.Sprintf("[%d]", i), err.(smithy.InvalidParamsError))
+ }
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateLifecycleRule(v *types.LifecycleRule) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "LifecycleRule"}
+ if v.Filter != nil {
+ if err := validateLifecycleRuleFilter(v.Filter); err != nil {
+ invalidParams.AddNested("Filter", err.(smithy.InvalidParamsError))
+ }
+ }
+ if len(v.Status) == 0 {
+ invalidParams.Add(smithy.NewErrParamRequired("Status"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateLifecycleRuleAndOperator(v *types.LifecycleRuleAndOperator) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "LifecycleRuleAndOperator"}
+ if v.Tags != nil {
+ if err := validateTagSet(v.Tags); err != nil {
+ invalidParams.AddNested("Tags", err.(smithy.InvalidParamsError))
+ }
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateLifecycleRuleFilter(v types.LifecycleRuleFilter) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "LifecycleRuleFilter"}
+ switch uv := v.(type) {
+ case *types.LifecycleRuleFilterMemberAnd:
+ if err := validateLifecycleRuleAndOperator(&uv.Value); err != nil {
+ invalidParams.AddNested("[And]", err.(smithy.InvalidParamsError))
+ }
+
+ case *types.LifecycleRuleFilterMemberTag:
+ if err := validateTag(&uv.Value); err != nil {
+ invalidParams.AddNested("[Tag]", err.(smithy.InvalidParamsError))
+ }
+
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateLifecycleRules(v []types.LifecycleRule) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "LifecycleRules"}
+ for i := range v {
+ if err := validateLifecycleRule(&v[i]); err != nil {
+ invalidParams.AddNested(fmt.Sprintf("[%d]", i), err.(smithy.InvalidParamsError))
+ }
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateLoggingEnabled(v *types.LoggingEnabled) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "LoggingEnabled"}
+ if v.TargetBucket == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("TargetBucket"))
+ }
+ if v.TargetGrants != nil {
+ if err := validateTargetGrants(v.TargetGrants); err != nil {
+ invalidParams.AddNested("TargetGrants", err.(smithy.InvalidParamsError))
+ }
+ }
+ if v.TargetPrefix == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("TargetPrefix"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateMetrics(v *types.Metrics) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "Metrics"}
+ if len(v.Status) == 0 {
+ invalidParams.Add(smithy.NewErrParamRequired("Status"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateMetricsAndOperator(v *types.MetricsAndOperator) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "MetricsAndOperator"}
+ if v.Tags != nil {
+ if err := validateTagSet(v.Tags); err != nil {
+ invalidParams.AddNested("Tags", err.(smithy.InvalidParamsError))
+ }
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateMetricsConfiguration(v *types.MetricsConfiguration) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "MetricsConfiguration"}
+ if v.Id == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Id"))
+ }
+ if v.Filter != nil {
+ if err := validateMetricsFilter(v.Filter); err != nil {
+ invalidParams.AddNested("Filter", err.(smithy.InvalidParamsError))
+ }
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateMetricsFilter(v types.MetricsFilter) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "MetricsFilter"}
+ switch uv := v.(type) {
+ case *types.MetricsFilterMemberAnd:
+ if err := validateMetricsAndOperator(&uv.Value); err != nil {
+ invalidParams.AddNested("[And]", err.(smithy.InvalidParamsError))
+ }
+
+ case *types.MetricsFilterMemberTag:
+ if err := validateTag(&uv.Value); err != nil {
+ invalidParams.AddNested("[Tag]", err.(smithy.InvalidParamsError))
+ }
+
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateNotificationConfiguration(v *types.NotificationConfiguration) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "NotificationConfiguration"}
+ if v.TopicConfigurations != nil {
+ if err := validateTopicConfigurationList(v.TopicConfigurations); err != nil {
+ invalidParams.AddNested("TopicConfigurations", err.(smithy.InvalidParamsError))
+ }
+ }
+ if v.QueueConfigurations != nil {
+ if err := validateQueueConfigurationList(v.QueueConfigurations); err != nil {
+ invalidParams.AddNested("QueueConfigurations", err.(smithy.InvalidParamsError))
+ }
+ }
+ if v.LambdaFunctionConfigurations != nil {
+ if err := validateLambdaFunctionConfigurationList(v.LambdaFunctionConfigurations); err != nil {
+ invalidParams.AddNested("LambdaFunctionConfigurations", err.(smithy.InvalidParamsError))
+ }
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateObjectIdentifier(v *types.ObjectIdentifier) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "ObjectIdentifier"}
+ if v.Key == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Key"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateObjectIdentifierList(v []types.ObjectIdentifier) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "ObjectIdentifierList"}
+ for i := range v {
+ if err := validateObjectIdentifier(&v[i]); err != nil {
+ invalidParams.AddNested(fmt.Sprintf("[%d]", i), err.(smithy.InvalidParamsError))
+ }
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOutputLocation(v *types.OutputLocation) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "OutputLocation"}
+ if v.S3 != nil {
+ if err := validateS3Location(v.S3); err != nil {
+ invalidParams.AddNested("S3", err.(smithy.InvalidParamsError))
+ }
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOwnershipControls(v *types.OwnershipControls) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "OwnershipControls"}
+ if v.Rules == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Rules"))
+ } else if v.Rules != nil {
+ if err := validateOwnershipControlsRules(v.Rules); err != nil {
+ invalidParams.AddNested("Rules", err.(smithy.InvalidParamsError))
+ }
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOwnershipControlsRule(v *types.OwnershipControlsRule) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "OwnershipControlsRule"}
+ if len(v.ObjectOwnership) == 0 {
+ invalidParams.Add(smithy.NewErrParamRequired("ObjectOwnership"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOwnershipControlsRules(v []types.OwnershipControlsRule) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "OwnershipControlsRules"}
+ for i := range v {
+ if err := validateOwnershipControlsRule(&v[i]); err != nil {
+ invalidParams.AddNested(fmt.Sprintf("[%d]", i), err.(smithy.InvalidParamsError))
+ }
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateQueueConfiguration(v *types.QueueConfiguration) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "QueueConfiguration"}
+ if v.QueueArn == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("QueueArn"))
+ }
+ if v.Events == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Events"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateQueueConfigurationList(v []types.QueueConfiguration) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "QueueConfigurationList"}
+ for i := range v {
+ if err := validateQueueConfiguration(&v[i]); err != nil {
+ invalidParams.AddNested(fmt.Sprintf("[%d]", i), err.(smithy.InvalidParamsError))
+ }
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateRedirectAllRequestsTo(v *types.RedirectAllRequestsTo) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "RedirectAllRequestsTo"}
+ if v.HostName == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("HostName"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateReplicaModifications(v *types.ReplicaModifications) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "ReplicaModifications"}
+ if len(v.Status) == 0 {
+ invalidParams.Add(smithy.NewErrParamRequired("Status"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateReplicationConfiguration(v *types.ReplicationConfiguration) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "ReplicationConfiguration"}
+ if v.Role == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Role"))
+ }
+ if v.Rules == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Rules"))
+ } else if v.Rules != nil {
+ if err := validateReplicationRules(v.Rules); err != nil {
+ invalidParams.AddNested("Rules", err.(smithy.InvalidParamsError))
+ }
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateReplicationRule(v *types.ReplicationRule) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "ReplicationRule"}
+ if v.Filter != nil {
+ if err := validateReplicationRuleFilter(v.Filter); err != nil {
+ invalidParams.AddNested("Filter", err.(smithy.InvalidParamsError))
+ }
+ }
+ if len(v.Status) == 0 {
+ invalidParams.Add(smithy.NewErrParamRequired("Status"))
+ }
+ if v.SourceSelectionCriteria != nil {
+ if err := validateSourceSelectionCriteria(v.SourceSelectionCriteria); err != nil {
+ invalidParams.AddNested("SourceSelectionCriteria", err.(smithy.InvalidParamsError))
+ }
+ }
+ if v.ExistingObjectReplication != nil {
+ if err := validateExistingObjectReplication(v.ExistingObjectReplication); err != nil {
+ invalidParams.AddNested("ExistingObjectReplication", err.(smithy.InvalidParamsError))
+ }
+ }
+ if v.Destination == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Destination"))
+ } else if v.Destination != nil {
+ if err := validateDestination(v.Destination); err != nil {
+ invalidParams.AddNested("Destination", err.(smithy.InvalidParamsError))
+ }
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateReplicationRuleAndOperator(v *types.ReplicationRuleAndOperator) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "ReplicationRuleAndOperator"}
+ if v.Tags != nil {
+ if err := validateTagSet(v.Tags); err != nil {
+ invalidParams.AddNested("Tags", err.(smithy.InvalidParamsError))
+ }
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateReplicationRuleFilter(v types.ReplicationRuleFilter) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "ReplicationRuleFilter"}
+ switch uv := v.(type) {
+ case *types.ReplicationRuleFilterMemberAnd:
+ if err := validateReplicationRuleAndOperator(&uv.Value); err != nil {
+ invalidParams.AddNested("[And]", err.(smithy.InvalidParamsError))
+ }
+
+ case *types.ReplicationRuleFilterMemberTag:
+ if err := validateTag(&uv.Value); err != nil {
+ invalidParams.AddNested("[Tag]", err.(smithy.InvalidParamsError))
+ }
+
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateReplicationRules(v []types.ReplicationRule) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "ReplicationRules"}
+ for i := range v {
+ if err := validateReplicationRule(&v[i]); err != nil {
+ invalidParams.AddNested(fmt.Sprintf("[%d]", i), err.(smithy.InvalidParamsError))
+ }
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateReplicationTime(v *types.ReplicationTime) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "ReplicationTime"}
+ if len(v.Status) == 0 {
+ invalidParams.Add(smithy.NewErrParamRequired("Status"))
+ }
+ if v.Time == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Time"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateRequestPaymentConfiguration(v *types.RequestPaymentConfiguration) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "RequestPaymentConfiguration"}
+ if len(v.Payer) == 0 {
+ invalidParams.Add(smithy.NewErrParamRequired("Payer"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateRestoreRequest(v *types.RestoreRequest) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "RestoreRequest"}
+ if v.GlacierJobParameters != nil {
+ if err := validateGlacierJobParameters(v.GlacierJobParameters); err != nil {
+ invalidParams.AddNested("GlacierJobParameters", err.(smithy.InvalidParamsError))
+ }
+ }
+ if v.SelectParameters != nil {
+ if err := validateSelectParameters(v.SelectParameters); err != nil {
+ invalidParams.AddNested("SelectParameters", err.(smithy.InvalidParamsError))
+ }
+ }
+ if v.OutputLocation != nil {
+ if err := validateOutputLocation(v.OutputLocation); err != nil {
+ invalidParams.AddNested("OutputLocation", err.(smithy.InvalidParamsError))
+ }
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateRoutingRule(v *types.RoutingRule) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "RoutingRule"}
+ if v.Redirect == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Redirect"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateRoutingRules(v []types.RoutingRule) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "RoutingRules"}
+ for i := range v {
+ if err := validateRoutingRule(&v[i]); err != nil {
+ invalidParams.AddNested(fmt.Sprintf("[%d]", i), err.(smithy.InvalidParamsError))
+ }
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateS3Location(v *types.S3Location) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "S3Location"}
+ if v.BucketName == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("BucketName"))
+ }
+ if v.Prefix == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Prefix"))
+ }
+ if v.Encryption != nil {
+ if err := validateEncryption(v.Encryption); err != nil {
+ invalidParams.AddNested("Encryption", err.(smithy.InvalidParamsError))
+ }
+ }
+ if v.AccessControlList != nil {
+ if err := validateGrants(v.AccessControlList); err != nil {
+ invalidParams.AddNested("AccessControlList", err.(smithy.InvalidParamsError))
+ }
+ }
+ if v.Tagging != nil {
+ if err := validateTagging(v.Tagging); err != nil {
+ invalidParams.AddNested("Tagging", err.(smithy.InvalidParamsError))
+ }
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateSelectParameters(v *types.SelectParameters) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "SelectParameters"}
+ if v.InputSerialization == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("InputSerialization"))
+ }
+ if len(v.ExpressionType) == 0 {
+ invalidParams.Add(smithy.NewErrParamRequired("ExpressionType"))
+ }
+ if v.Expression == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Expression"))
+ }
+ if v.OutputSerialization == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("OutputSerialization"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateServerSideEncryptionByDefault(v *types.ServerSideEncryptionByDefault) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "ServerSideEncryptionByDefault"}
+ if len(v.SSEAlgorithm) == 0 {
+ invalidParams.Add(smithy.NewErrParamRequired("SSEAlgorithm"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateServerSideEncryptionConfiguration(v *types.ServerSideEncryptionConfiguration) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "ServerSideEncryptionConfiguration"}
+ if v.Rules == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Rules"))
+ } else if v.Rules != nil {
+ if err := validateServerSideEncryptionRules(v.Rules); err != nil {
+ invalidParams.AddNested("Rules", err.(smithy.InvalidParamsError))
+ }
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateServerSideEncryptionRule(v *types.ServerSideEncryptionRule) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "ServerSideEncryptionRule"}
+ if v.ApplyServerSideEncryptionByDefault != nil {
+ if err := validateServerSideEncryptionByDefault(v.ApplyServerSideEncryptionByDefault); err != nil {
+ invalidParams.AddNested("ApplyServerSideEncryptionByDefault", err.(smithy.InvalidParamsError))
+ }
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateServerSideEncryptionRules(v []types.ServerSideEncryptionRule) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "ServerSideEncryptionRules"}
+ for i := range v {
+ if err := validateServerSideEncryptionRule(&v[i]); err != nil {
+ invalidParams.AddNested(fmt.Sprintf("[%d]", i), err.(smithy.InvalidParamsError))
+ }
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateSourceSelectionCriteria(v *types.SourceSelectionCriteria) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "SourceSelectionCriteria"}
+ if v.SseKmsEncryptedObjects != nil {
+ if err := validateSseKmsEncryptedObjects(v.SseKmsEncryptedObjects); err != nil {
+ invalidParams.AddNested("SseKmsEncryptedObjects", err.(smithy.InvalidParamsError))
+ }
+ }
+ if v.ReplicaModifications != nil {
+ if err := validateReplicaModifications(v.ReplicaModifications); err != nil {
+ invalidParams.AddNested("ReplicaModifications", err.(smithy.InvalidParamsError))
+ }
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateSSEKMS(v *types.SSEKMS) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "SSEKMS"}
+ if v.KeyId == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("KeyId"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateSseKmsEncryptedObjects(v *types.SseKmsEncryptedObjects) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "SseKmsEncryptedObjects"}
+ if len(v.Status) == 0 {
+ invalidParams.Add(smithy.NewErrParamRequired("Status"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateStorageClassAnalysis(v *types.StorageClassAnalysis) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "StorageClassAnalysis"}
+ if v.DataExport != nil {
+ if err := validateStorageClassAnalysisDataExport(v.DataExport); err != nil {
+ invalidParams.AddNested("DataExport", err.(smithy.InvalidParamsError))
+ }
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateStorageClassAnalysisDataExport(v *types.StorageClassAnalysisDataExport) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "StorageClassAnalysisDataExport"}
+ if len(v.OutputSchemaVersion) == 0 {
+ invalidParams.Add(smithy.NewErrParamRequired("OutputSchemaVersion"))
+ }
+ if v.Destination == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Destination"))
+ } else if v.Destination != nil {
+ if err := validateAnalyticsExportDestination(v.Destination); err != nil {
+ invalidParams.AddNested("Destination", err.(smithy.InvalidParamsError))
+ }
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateTag(v *types.Tag) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "Tag"}
+ if v.Key == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Key"))
+ }
+ if v.Value == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Value"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateTagging(v *types.Tagging) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "Tagging"}
+ if v.TagSet == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("TagSet"))
+ } else if v.TagSet != nil {
+ if err := validateTagSet(v.TagSet); err != nil {
+ invalidParams.AddNested("TagSet", err.(smithy.InvalidParamsError))
+ }
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateTagSet(v []types.Tag) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "TagSet"}
+ for i := range v {
+ if err := validateTag(&v[i]); err != nil {
+ invalidParams.AddNested(fmt.Sprintf("[%d]", i), err.(smithy.InvalidParamsError))
+ }
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateTargetGrant(v *types.TargetGrant) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "TargetGrant"}
+ if v.Grantee != nil {
+ if err := validateGrantee(v.Grantee); err != nil {
+ invalidParams.AddNested("Grantee", err.(smithy.InvalidParamsError))
+ }
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateTargetGrants(v []types.TargetGrant) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "TargetGrants"}
+ for i := range v {
+ if err := validateTargetGrant(&v[i]); err != nil {
+ invalidParams.AddNested(fmt.Sprintf("[%d]", i), err.(smithy.InvalidParamsError))
+ }
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateTiering(v *types.Tiering) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "Tiering"}
+ if len(v.AccessTier) == 0 {
+ invalidParams.Add(smithy.NewErrParamRequired("AccessTier"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateTieringList(v []types.Tiering) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "TieringList"}
+ for i := range v {
+ if err := validateTiering(&v[i]); err != nil {
+ invalidParams.AddNested(fmt.Sprintf("[%d]", i), err.(smithy.InvalidParamsError))
+ }
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateTopicConfiguration(v *types.TopicConfiguration) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "TopicConfiguration"}
+ if v.TopicArn == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("TopicArn"))
+ }
+ if v.Events == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Events"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateTopicConfigurationList(v []types.TopicConfiguration) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "TopicConfigurationList"}
+ for i := range v {
+ if err := validateTopicConfiguration(&v[i]); err != nil {
+ invalidParams.AddNested(fmt.Sprintf("[%d]", i), err.(smithy.InvalidParamsError))
+ }
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateWebsiteConfiguration(v *types.WebsiteConfiguration) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "WebsiteConfiguration"}
+ if v.ErrorDocument != nil {
+ if err := validateErrorDocument(v.ErrorDocument); err != nil {
+ invalidParams.AddNested("ErrorDocument", err.(smithy.InvalidParamsError))
+ }
+ }
+ if v.IndexDocument != nil {
+ if err := validateIndexDocument(v.IndexDocument); err != nil {
+ invalidParams.AddNested("IndexDocument", err.(smithy.InvalidParamsError))
+ }
+ }
+ if v.RedirectAllRequestsTo != nil {
+ if err := validateRedirectAllRequestsTo(v.RedirectAllRequestsTo); err != nil {
+ invalidParams.AddNested("RedirectAllRequestsTo", err.(smithy.InvalidParamsError))
+ }
+ }
+ if v.RoutingRules != nil {
+ if err := validateRoutingRules(v.RoutingRules); err != nil {
+ invalidParams.AddNested("RoutingRules", err.(smithy.InvalidParamsError))
+ }
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpAbortMultipartUploadInput(v *AbortMultipartUploadInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "AbortMultipartUploadInput"}
+ if v.Bucket == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Bucket"))
+ }
+ if v.Key == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Key"))
+ }
+ if v.UploadId == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("UploadId"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpCompleteMultipartUploadInput(v *CompleteMultipartUploadInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "CompleteMultipartUploadInput"}
+ if v.Bucket == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Bucket"))
+ }
+ if v.Key == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Key"))
+ }
+ if v.UploadId == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("UploadId"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpCopyObjectInput(v *CopyObjectInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "CopyObjectInput"}
+ if v.Bucket == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Bucket"))
+ }
+ if v.CopySource == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("CopySource"))
+ }
+ if v.Key == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Key"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpCreateBucketInput(v *CreateBucketInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "CreateBucketInput"}
+ if v.Bucket == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Bucket"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpCreateMultipartUploadInput(v *CreateMultipartUploadInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "CreateMultipartUploadInput"}
+ if v.Bucket == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Bucket"))
+ }
+ if v.Key == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Key"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpDeleteBucketAnalyticsConfigurationInput(v *DeleteBucketAnalyticsConfigurationInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "DeleteBucketAnalyticsConfigurationInput"}
+ if v.Bucket == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Bucket"))
+ }
+ if v.Id == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Id"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpDeleteBucketCorsInput(v *DeleteBucketCorsInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "DeleteBucketCorsInput"}
+ if v.Bucket == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Bucket"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpDeleteBucketEncryptionInput(v *DeleteBucketEncryptionInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "DeleteBucketEncryptionInput"}
+ if v.Bucket == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Bucket"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpDeleteBucketInput(v *DeleteBucketInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "DeleteBucketInput"}
+ if v.Bucket == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Bucket"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpDeleteBucketIntelligentTieringConfigurationInput(v *DeleteBucketIntelligentTieringConfigurationInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "DeleteBucketIntelligentTieringConfigurationInput"}
+ if v.Bucket == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Bucket"))
+ }
+ if v.Id == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Id"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpDeleteBucketInventoryConfigurationInput(v *DeleteBucketInventoryConfigurationInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "DeleteBucketInventoryConfigurationInput"}
+ if v.Bucket == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Bucket"))
+ }
+ if v.Id == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Id"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpDeleteBucketLifecycleInput(v *DeleteBucketLifecycleInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "DeleteBucketLifecycleInput"}
+ if v.Bucket == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Bucket"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpDeleteBucketMetricsConfigurationInput(v *DeleteBucketMetricsConfigurationInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "DeleteBucketMetricsConfigurationInput"}
+ if v.Bucket == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Bucket"))
+ }
+ if v.Id == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Id"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpDeleteBucketOwnershipControlsInput(v *DeleteBucketOwnershipControlsInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "DeleteBucketOwnershipControlsInput"}
+ if v.Bucket == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Bucket"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpDeleteBucketPolicyInput(v *DeleteBucketPolicyInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "DeleteBucketPolicyInput"}
+ if v.Bucket == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Bucket"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpDeleteBucketReplicationInput(v *DeleteBucketReplicationInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "DeleteBucketReplicationInput"}
+ if v.Bucket == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Bucket"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpDeleteBucketTaggingInput(v *DeleteBucketTaggingInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "DeleteBucketTaggingInput"}
+ if v.Bucket == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Bucket"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpDeleteBucketWebsiteInput(v *DeleteBucketWebsiteInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "DeleteBucketWebsiteInput"}
+ if v.Bucket == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Bucket"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpDeleteObjectInput(v *DeleteObjectInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "DeleteObjectInput"}
+ if v.Bucket == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Bucket"))
+ }
+ if v.Key == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Key"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpDeleteObjectsInput(v *DeleteObjectsInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "DeleteObjectsInput"}
+ if v.Bucket == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Bucket"))
+ }
+ if v.Delete == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Delete"))
+ } else if v.Delete != nil {
+ if err := validateDelete(v.Delete); err != nil {
+ invalidParams.AddNested("Delete", err.(smithy.InvalidParamsError))
+ }
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpDeleteObjectTaggingInput(v *DeleteObjectTaggingInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "DeleteObjectTaggingInput"}
+ if v.Bucket == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Bucket"))
+ }
+ if v.Key == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Key"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpDeletePublicAccessBlockInput(v *DeletePublicAccessBlockInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "DeletePublicAccessBlockInput"}
+ if v.Bucket == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Bucket"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpGetBucketAccelerateConfigurationInput(v *GetBucketAccelerateConfigurationInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "GetBucketAccelerateConfigurationInput"}
+ if v.Bucket == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Bucket"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpGetBucketAclInput(v *GetBucketAclInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "GetBucketAclInput"}
+ if v.Bucket == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Bucket"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpGetBucketAnalyticsConfigurationInput(v *GetBucketAnalyticsConfigurationInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "GetBucketAnalyticsConfigurationInput"}
+ if v.Bucket == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Bucket"))
+ }
+ if v.Id == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Id"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpGetBucketCorsInput(v *GetBucketCorsInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "GetBucketCorsInput"}
+ if v.Bucket == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Bucket"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpGetBucketEncryptionInput(v *GetBucketEncryptionInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "GetBucketEncryptionInput"}
+ if v.Bucket == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Bucket"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpGetBucketIntelligentTieringConfigurationInput(v *GetBucketIntelligentTieringConfigurationInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "GetBucketIntelligentTieringConfigurationInput"}
+ if v.Bucket == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Bucket"))
+ }
+ if v.Id == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Id"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpGetBucketInventoryConfigurationInput(v *GetBucketInventoryConfigurationInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "GetBucketInventoryConfigurationInput"}
+ if v.Bucket == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Bucket"))
+ }
+ if v.Id == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Id"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpGetBucketLifecycleConfigurationInput(v *GetBucketLifecycleConfigurationInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "GetBucketLifecycleConfigurationInput"}
+ if v.Bucket == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Bucket"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpGetBucketLocationInput(v *GetBucketLocationInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "GetBucketLocationInput"}
+ if v.Bucket == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Bucket"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpGetBucketLoggingInput(v *GetBucketLoggingInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "GetBucketLoggingInput"}
+ if v.Bucket == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Bucket"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpGetBucketMetricsConfigurationInput(v *GetBucketMetricsConfigurationInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "GetBucketMetricsConfigurationInput"}
+ if v.Bucket == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Bucket"))
+ }
+ if v.Id == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Id"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpGetBucketNotificationConfigurationInput(v *GetBucketNotificationConfigurationInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "GetBucketNotificationConfigurationInput"}
+ if v.Bucket == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Bucket"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpGetBucketOwnershipControlsInput(v *GetBucketOwnershipControlsInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "GetBucketOwnershipControlsInput"}
+ if v.Bucket == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Bucket"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpGetBucketPolicyInput(v *GetBucketPolicyInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "GetBucketPolicyInput"}
+ if v.Bucket == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Bucket"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpGetBucketPolicyStatusInput(v *GetBucketPolicyStatusInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "GetBucketPolicyStatusInput"}
+ if v.Bucket == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Bucket"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpGetBucketReplicationInput(v *GetBucketReplicationInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "GetBucketReplicationInput"}
+ if v.Bucket == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Bucket"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpGetBucketRequestPaymentInput(v *GetBucketRequestPaymentInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "GetBucketRequestPaymentInput"}
+ if v.Bucket == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Bucket"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpGetBucketTaggingInput(v *GetBucketTaggingInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "GetBucketTaggingInput"}
+ if v.Bucket == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Bucket"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpGetBucketVersioningInput(v *GetBucketVersioningInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "GetBucketVersioningInput"}
+ if v.Bucket == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Bucket"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpGetBucketWebsiteInput(v *GetBucketWebsiteInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "GetBucketWebsiteInput"}
+ if v.Bucket == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Bucket"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpGetObjectAclInput(v *GetObjectAclInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "GetObjectAclInput"}
+ if v.Bucket == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Bucket"))
+ }
+ if v.Key == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Key"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpGetObjectInput(v *GetObjectInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "GetObjectInput"}
+ if v.Bucket == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Bucket"))
+ }
+ if v.Key == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Key"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpGetObjectLegalHoldInput(v *GetObjectLegalHoldInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "GetObjectLegalHoldInput"}
+ if v.Bucket == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Bucket"))
+ }
+ if v.Key == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Key"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpGetObjectLockConfigurationInput(v *GetObjectLockConfigurationInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "GetObjectLockConfigurationInput"}
+ if v.Bucket == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Bucket"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpGetObjectRetentionInput(v *GetObjectRetentionInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "GetObjectRetentionInput"}
+ if v.Bucket == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Bucket"))
+ }
+ if v.Key == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Key"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpGetObjectTaggingInput(v *GetObjectTaggingInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "GetObjectTaggingInput"}
+ if v.Bucket == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Bucket"))
+ }
+ if v.Key == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Key"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpGetObjectTorrentInput(v *GetObjectTorrentInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "GetObjectTorrentInput"}
+ if v.Bucket == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Bucket"))
+ }
+ if v.Key == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Key"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpGetPublicAccessBlockInput(v *GetPublicAccessBlockInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "GetPublicAccessBlockInput"}
+ if v.Bucket == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Bucket"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpHeadBucketInput(v *HeadBucketInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "HeadBucketInput"}
+ if v.Bucket == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Bucket"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpHeadObjectInput(v *HeadObjectInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "HeadObjectInput"}
+ if v.Bucket == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Bucket"))
+ }
+ if v.Key == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Key"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpListBucketAnalyticsConfigurationsInput(v *ListBucketAnalyticsConfigurationsInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "ListBucketAnalyticsConfigurationsInput"}
+ if v.Bucket == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Bucket"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpListBucketIntelligentTieringConfigurationsInput(v *ListBucketIntelligentTieringConfigurationsInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "ListBucketIntelligentTieringConfigurationsInput"}
+ if v.Bucket == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Bucket"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpListBucketInventoryConfigurationsInput(v *ListBucketInventoryConfigurationsInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "ListBucketInventoryConfigurationsInput"}
+ if v.Bucket == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Bucket"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpListBucketMetricsConfigurationsInput(v *ListBucketMetricsConfigurationsInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "ListBucketMetricsConfigurationsInput"}
+ if v.Bucket == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Bucket"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpListMultipartUploadsInput(v *ListMultipartUploadsInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "ListMultipartUploadsInput"}
+ if v.Bucket == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Bucket"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpListObjectsInput(v *ListObjectsInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "ListObjectsInput"}
+ if v.Bucket == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Bucket"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpListObjectsV2Input(v *ListObjectsV2Input) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "ListObjectsV2Input"}
+ if v.Bucket == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Bucket"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpListObjectVersionsInput(v *ListObjectVersionsInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "ListObjectVersionsInput"}
+ if v.Bucket == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Bucket"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpListPartsInput(v *ListPartsInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "ListPartsInput"}
+ if v.Bucket == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Bucket"))
+ }
+ if v.Key == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Key"))
+ }
+ if v.UploadId == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("UploadId"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpPutBucketAccelerateConfigurationInput(v *PutBucketAccelerateConfigurationInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "PutBucketAccelerateConfigurationInput"}
+ if v.Bucket == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Bucket"))
+ }
+ if v.AccelerateConfiguration == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("AccelerateConfiguration"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpPutBucketAclInput(v *PutBucketAclInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "PutBucketAclInput"}
+ if v.AccessControlPolicy != nil {
+ if err := validateAccessControlPolicy(v.AccessControlPolicy); err != nil {
+ invalidParams.AddNested("AccessControlPolicy", err.(smithy.InvalidParamsError))
+ }
+ }
+ if v.Bucket == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Bucket"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpPutBucketAnalyticsConfigurationInput(v *PutBucketAnalyticsConfigurationInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "PutBucketAnalyticsConfigurationInput"}
+ if v.Bucket == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Bucket"))
+ }
+ if v.Id == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Id"))
+ }
+ if v.AnalyticsConfiguration == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("AnalyticsConfiguration"))
+ } else if v.AnalyticsConfiguration != nil {
+ if err := validateAnalyticsConfiguration(v.AnalyticsConfiguration); err != nil {
+ invalidParams.AddNested("AnalyticsConfiguration", err.(smithy.InvalidParamsError))
+ }
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpPutBucketCorsInput(v *PutBucketCorsInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "PutBucketCorsInput"}
+ if v.Bucket == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Bucket"))
+ }
+ if v.CORSConfiguration == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("CORSConfiguration"))
+ } else if v.CORSConfiguration != nil {
+ if err := validateCORSConfiguration(v.CORSConfiguration); err != nil {
+ invalidParams.AddNested("CORSConfiguration", err.(smithy.InvalidParamsError))
+ }
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpPutBucketEncryptionInput(v *PutBucketEncryptionInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "PutBucketEncryptionInput"}
+ if v.Bucket == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Bucket"))
+ }
+ if v.ServerSideEncryptionConfiguration == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("ServerSideEncryptionConfiguration"))
+ } else if v.ServerSideEncryptionConfiguration != nil {
+ if err := validateServerSideEncryptionConfiguration(v.ServerSideEncryptionConfiguration); err != nil {
+ invalidParams.AddNested("ServerSideEncryptionConfiguration", err.(smithy.InvalidParamsError))
+ }
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpPutBucketIntelligentTieringConfigurationInput(v *PutBucketIntelligentTieringConfigurationInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "PutBucketIntelligentTieringConfigurationInput"}
+ if v.Bucket == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Bucket"))
+ }
+ if v.Id == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Id"))
+ }
+ if v.IntelligentTieringConfiguration == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("IntelligentTieringConfiguration"))
+ } else if v.IntelligentTieringConfiguration != nil {
+ if err := validateIntelligentTieringConfiguration(v.IntelligentTieringConfiguration); err != nil {
+ invalidParams.AddNested("IntelligentTieringConfiguration", err.(smithy.InvalidParamsError))
+ }
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpPutBucketInventoryConfigurationInput(v *PutBucketInventoryConfigurationInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "PutBucketInventoryConfigurationInput"}
+ if v.Bucket == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Bucket"))
+ }
+ if v.Id == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Id"))
+ }
+ if v.InventoryConfiguration == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("InventoryConfiguration"))
+ } else if v.InventoryConfiguration != nil {
+ if err := validateInventoryConfiguration(v.InventoryConfiguration); err != nil {
+ invalidParams.AddNested("InventoryConfiguration", err.(smithy.InvalidParamsError))
+ }
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpPutBucketLifecycleConfigurationInput(v *PutBucketLifecycleConfigurationInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "PutBucketLifecycleConfigurationInput"}
+ if v.Bucket == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Bucket"))
+ }
+ if v.LifecycleConfiguration != nil {
+ if err := validateBucketLifecycleConfiguration(v.LifecycleConfiguration); err != nil {
+ invalidParams.AddNested("LifecycleConfiguration", err.(smithy.InvalidParamsError))
+ }
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpPutBucketLoggingInput(v *PutBucketLoggingInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "PutBucketLoggingInput"}
+ if v.Bucket == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Bucket"))
+ }
+ if v.BucketLoggingStatus == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("BucketLoggingStatus"))
+ } else if v.BucketLoggingStatus != nil {
+ if err := validateBucketLoggingStatus(v.BucketLoggingStatus); err != nil {
+ invalidParams.AddNested("BucketLoggingStatus", err.(smithy.InvalidParamsError))
+ }
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpPutBucketMetricsConfigurationInput(v *PutBucketMetricsConfigurationInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "PutBucketMetricsConfigurationInput"}
+ if v.Bucket == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Bucket"))
+ }
+ if v.Id == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Id"))
+ }
+ if v.MetricsConfiguration == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("MetricsConfiguration"))
+ } else if v.MetricsConfiguration != nil {
+ if err := validateMetricsConfiguration(v.MetricsConfiguration); err != nil {
+ invalidParams.AddNested("MetricsConfiguration", err.(smithy.InvalidParamsError))
+ }
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpPutBucketNotificationConfigurationInput(v *PutBucketNotificationConfigurationInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "PutBucketNotificationConfigurationInput"}
+ if v.Bucket == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Bucket"))
+ }
+ if v.NotificationConfiguration == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("NotificationConfiguration"))
+ } else if v.NotificationConfiguration != nil {
+ if err := validateNotificationConfiguration(v.NotificationConfiguration); err != nil {
+ invalidParams.AddNested("NotificationConfiguration", err.(smithy.InvalidParamsError))
+ }
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpPutBucketOwnershipControlsInput(v *PutBucketOwnershipControlsInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "PutBucketOwnershipControlsInput"}
+ if v.Bucket == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Bucket"))
+ }
+ if v.OwnershipControls == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("OwnershipControls"))
+ } else if v.OwnershipControls != nil {
+ if err := validateOwnershipControls(v.OwnershipControls); err != nil {
+ invalidParams.AddNested("OwnershipControls", err.(smithy.InvalidParamsError))
+ }
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpPutBucketPolicyInput(v *PutBucketPolicyInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "PutBucketPolicyInput"}
+ if v.Bucket == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Bucket"))
+ }
+ if v.Policy == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Policy"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpPutBucketReplicationInput(v *PutBucketReplicationInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "PutBucketReplicationInput"}
+ if v.Bucket == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Bucket"))
+ }
+ if v.ReplicationConfiguration == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("ReplicationConfiguration"))
+ } else if v.ReplicationConfiguration != nil {
+ if err := validateReplicationConfiguration(v.ReplicationConfiguration); err != nil {
+ invalidParams.AddNested("ReplicationConfiguration", err.(smithy.InvalidParamsError))
+ }
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpPutBucketRequestPaymentInput(v *PutBucketRequestPaymentInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "PutBucketRequestPaymentInput"}
+ if v.Bucket == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Bucket"))
+ }
+ if v.RequestPaymentConfiguration == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("RequestPaymentConfiguration"))
+ } else if v.RequestPaymentConfiguration != nil {
+ if err := validateRequestPaymentConfiguration(v.RequestPaymentConfiguration); err != nil {
+ invalidParams.AddNested("RequestPaymentConfiguration", err.(smithy.InvalidParamsError))
+ }
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpPutBucketTaggingInput(v *PutBucketTaggingInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "PutBucketTaggingInput"}
+ if v.Bucket == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Bucket"))
+ }
+ if v.Tagging == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Tagging"))
+ } else if v.Tagging != nil {
+ if err := validateTagging(v.Tagging); err != nil {
+ invalidParams.AddNested("Tagging", err.(smithy.InvalidParamsError))
+ }
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpPutBucketVersioningInput(v *PutBucketVersioningInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "PutBucketVersioningInput"}
+ if v.Bucket == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Bucket"))
+ }
+ if v.VersioningConfiguration == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("VersioningConfiguration"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpPutBucketWebsiteInput(v *PutBucketWebsiteInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "PutBucketWebsiteInput"}
+ if v.Bucket == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Bucket"))
+ }
+ if v.WebsiteConfiguration == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("WebsiteConfiguration"))
+ } else if v.WebsiteConfiguration != nil {
+ if err := validateWebsiteConfiguration(v.WebsiteConfiguration); err != nil {
+ invalidParams.AddNested("WebsiteConfiguration", err.(smithy.InvalidParamsError))
+ }
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpPutObjectAclInput(v *PutObjectAclInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "PutObjectAclInput"}
+ if v.AccessControlPolicy != nil {
+ if err := validateAccessControlPolicy(v.AccessControlPolicy); err != nil {
+ invalidParams.AddNested("AccessControlPolicy", err.(smithy.InvalidParamsError))
+ }
+ }
+ if v.Bucket == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Bucket"))
+ }
+ if v.Key == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Key"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpPutObjectInput(v *PutObjectInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "PutObjectInput"}
+ if v.Bucket == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Bucket"))
+ }
+ if v.Key == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Key"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpPutObjectLegalHoldInput(v *PutObjectLegalHoldInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "PutObjectLegalHoldInput"}
+ if v.Bucket == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Bucket"))
+ }
+ if v.Key == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Key"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpPutObjectLockConfigurationInput(v *PutObjectLockConfigurationInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "PutObjectLockConfigurationInput"}
+ if v.Bucket == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Bucket"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpPutObjectRetentionInput(v *PutObjectRetentionInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "PutObjectRetentionInput"}
+ if v.Bucket == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Bucket"))
+ }
+ if v.Key == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Key"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpPutObjectTaggingInput(v *PutObjectTaggingInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "PutObjectTaggingInput"}
+ if v.Bucket == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Bucket"))
+ }
+ if v.Key == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Key"))
+ }
+ if v.Tagging == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Tagging"))
+ } else if v.Tagging != nil {
+ if err := validateTagging(v.Tagging); err != nil {
+ invalidParams.AddNested("Tagging", err.(smithy.InvalidParamsError))
+ }
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpPutPublicAccessBlockInput(v *PutPublicAccessBlockInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "PutPublicAccessBlockInput"}
+ if v.Bucket == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Bucket"))
+ }
+ if v.PublicAccessBlockConfiguration == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("PublicAccessBlockConfiguration"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpRestoreObjectInput(v *RestoreObjectInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "RestoreObjectInput"}
+ if v.Bucket == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Bucket"))
+ }
+ if v.Key == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Key"))
+ }
+ if v.RestoreRequest != nil {
+ if err := validateRestoreRequest(v.RestoreRequest); err != nil {
+ invalidParams.AddNested("RestoreRequest", err.(smithy.InvalidParamsError))
+ }
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpUploadPartCopyInput(v *UploadPartCopyInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "UploadPartCopyInput"}
+ if v.Bucket == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Bucket"))
+ }
+ if v.CopySource == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("CopySource"))
+ }
+ if v.Key == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Key"))
+ }
+ if v.UploadId == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("UploadId"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpUploadPartInput(v *UploadPartInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "UploadPartInput"}
+ if v.Bucket == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Bucket"))
+ }
+ if v.Key == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Key"))
+ }
+ if v.UploadId == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("UploadId"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/LICENSE.txt b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/LICENSE.txt
new file mode 100644
index 000000000..d64569567
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/LICENSE.txt
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_client.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_client.go
new file mode 100644
index 000000000..b435c591c
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_client.go
@@ -0,0 +1,258 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package sso
+
+import (
+ "context"
+ "github.com/aws/aws-sdk-go-v2/aws"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/aws/retry"
+ "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
+ awshttp "github.com/aws/aws-sdk-go-v2/aws/transport/http"
+ smithy "github.com/aws/smithy-go"
+ "github.com/aws/smithy-go/logging"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+ "net/http"
+ "time"
+)
+
+const ServiceID = "SSO"
+const ServiceAPIVersion = "2019-06-10"
+
+// Client provides the API client to make operations call for AWS Single Sign-On.
+type Client struct {
+ options Options
+}
+
+// New returns an initialized Client based on the functional options. Provide
+// additional functional options to further configure the behavior of the client,
+// such as changing the client's endpoint or adding custom middleware behavior.
+func New(options Options, optFns ...func(*Options)) *Client {
+ options = options.Copy()
+
+ resolveDefaultLogger(&options)
+
+ resolveRetryer(&options)
+
+ resolveHTTPClient(&options)
+
+ resolveHTTPSignerV4(&options)
+
+ resolveDefaultEndpointConfiguration(&options)
+
+ for _, fn := range optFns {
+ fn(&options)
+ }
+
+ client := &Client{
+ options: options,
+ }
+
+ return client
+}
+
+type Options struct {
+ // Set of options to modify how an operation is invoked. These apply to all
+ // operations invoked for this client. Use functional options on operation call to
+ // modify this list for per operation behavior.
+ APIOptions []func(*middleware.Stack) error
+
+ // Configures the events that will be sent to the configured logger.
+ ClientLogMode aws.ClientLogMode
+
+ // The credentials object to use when signing requests.
+ Credentials aws.CredentialsProvider
+
+ // The endpoint options to be used when attempting to resolve an endpoint.
+ EndpointOptions EndpointResolverOptions
+
+ // The service endpoint resolver.
+ EndpointResolver EndpointResolver
+
+ // Signature Version 4 (SigV4) Signer
+ HTTPSignerV4 HTTPSignerV4
+
+ // The logger writer interface to write logging messages to.
+ Logger logging.Logger
+
+ // The region to send requests to. (Required)
+ Region string
+
+ // Retryer guides how HTTP requests should be retried in case of recoverable
+ // failures. When nil the API client will use a default retryer.
+ Retryer aws.Retryer
+
+ // The HTTP client to invoke API calls with. Defaults to client's default HTTP
+ // implementation if nil.
+ HTTPClient HTTPClient
+}
+
+// WithAPIOptions returns a functional option for setting the Client's APIOptions
+// option.
+func WithAPIOptions(optFns ...func(*middleware.Stack) error) func(*Options) {
+ return func(o *Options) {
+ o.APIOptions = append(o.APIOptions, optFns...)
+ }
+}
+
+// WithEndpointResolver returns a functional option for setting the Client's
+// EndpointResolver option.
+func WithEndpointResolver(v EndpointResolver) func(*Options) {
+ return func(o *Options) {
+ o.EndpointResolver = v
+ }
+}
+
+type HTTPClient interface {
+ Do(*http.Request) (*http.Response, error)
+}
+
+// Copy creates a clone where the APIOptions list is deep copied.
+func (o Options) Copy() Options {
+ to := o
+ to.APIOptions = make([]func(*middleware.Stack) error, len(o.APIOptions))
+ copy(to.APIOptions, o.APIOptions)
+ return to
+}
+func (c *Client) invokeOperation(ctx context.Context, opID string, params interface{}, optFns []func(*Options), stackFns ...func(*middleware.Stack, Options) error) (result interface{}, metadata middleware.Metadata, err error) {
+ ctx = middleware.ClearStackValues(ctx)
+ stack := middleware.NewStack(opID, smithyhttp.NewStackRequest)
+ options := c.options.Copy()
+ for _, fn := range optFns {
+ fn(&options)
+ }
+
+ for _, fn := range stackFns {
+ if err := fn(stack, options); err != nil {
+ return nil, metadata, err
+ }
+ }
+
+ for _, fn := range options.APIOptions {
+ if err := fn(stack); err != nil {
+ return nil, metadata, err
+ }
+ }
+
+ handler := middleware.DecorateHandler(smithyhttp.NewClientHandler(options.HTTPClient), stack)
+ result, metadata, err = handler.Handle(ctx, params)
+ if err != nil {
+ err = &smithy.OperationError{
+ ServiceID: ServiceID,
+ OperationName: opID,
+ Err: err,
+ }
+ }
+ return result, metadata, err
+}
+
+func resolveDefaultLogger(o *Options) {
+ if o.Logger != nil {
+ return
+ }
+ o.Logger = logging.Nop{}
+}
+
+func addSetLoggerMiddleware(stack *middleware.Stack, o Options) error {
+ return middleware.AddSetLoggerMiddleware(stack, o.Logger)
+}
+
+// NewFromConfig returns a new client from the provided config.
+func NewFromConfig(cfg aws.Config, optFns ...func(*Options)) *Client {
+ opts := Options{
+ Region: cfg.Region,
+ HTTPClient: cfg.HTTPClient,
+ Credentials: cfg.Credentials,
+ APIOptions: cfg.APIOptions,
+ Logger: cfg.Logger,
+ ClientLogMode: cfg.ClientLogMode,
+ }
+ resolveAWSRetryerProvider(cfg, &opts)
+ resolveAWSEndpointResolver(cfg, &opts)
+ return New(opts, optFns...)
+}
+
+func resolveHTTPClient(o *Options) {
+ if o.HTTPClient != nil {
+ return
+ }
+ o.HTTPClient = awshttp.NewBuildableClient()
+}
+
+func resolveRetryer(o *Options) {
+ if o.Retryer != nil {
+ return
+ }
+ o.Retryer = retry.NewStandard()
+}
+
+func resolveAWSRetryerProvider(cfg aws.Config, o *Options) {
+ if cfg.Retryer == nil {
+ return
+ }
+ o.Retryer = cfg.Retryer()
+}
+
+func resolveAWSEndpointResolver(cfg aws.Config, o *Options) {
+ if cfg.EndpointResolver == nil {
+ return
+ }
+ o.EndpointResolver = withEndpointResolver(cfg.EndpointResolver, NewDefaultEndpointResolver())
+}
+
+func addClientUserAgent(stack *middleware.Stack) error {
+ return awsmiddleware.AddRequestUserAgentMiddleware(stack)
+}
+
+func addHTTPSignerV4Middleware(stack *middleware.Stack, o Options) error {
+ mw := v4.NewSignHTTPRequestMiddleware(v4.SignHTTPRequestMiddlewareOptions{
+ CredentialsProvider: o.Credentials,
+ Signer: o.HTTPSignerV4,
+ LogSigning: o.ClientLogMode.IsSigning(),
+ })
+ return stack.Finalize.Add(mw, middleware.After)
+}
+
+type HTTPSignerV4 interface {
+ SignHTTP(ctx context.Context, credentials aws.Credentials, r *http.Request, payloadHash string, service string, region string, signingTime time.Time, optFns ...func(*v4.SignerOptions)) error
+}
+
+func resolveHTTPSignerV4(o *Options) {
+ if o.HTTPSignerV4 != nil {
+ return
+ }
+ o.HTTPSignerV4 = newDefaultV4Signer(*o)
+}
+
+func newDefaultV4Signer(o Options) *v4.Signer {
+ return v4.NewSigner(func(so *v4.SignerOptions) {
+ so.Logger = o.Logger
+ so.LogSigning = o.ClientLogMode.IsSigning()
+ })
+}
+
+func addRetryMiddlewares(stack *middleware.Stack, o Options) error {
+ mo := retry.AddRetryMiddlewaresOptions{
+ Retryer: o.Retryer,
+ LogRetryAttempts: o.ClientLogMode.IsRetries(),
+ }
+ return retry.AddRetryMiddlewares(stack, mo)
+}
+
+func addRequestIDRetrieverMiddleware(stack *middleware.Stack) error {
+ return awsmiddleware.AddRequestIDRetrieverMiddleware(stack)
+}
+
+func addResponseErrorMiddleware(stack *middleware.Stack) error {
+ return awshttp.AddResponseErrorMiddleware(stack)
+}
+
+func addRequestResponseLogging(stack *middleware.Stack, o Options) error {
+ return stack.Deserialize.Add(&smithyhttp.RequestResponseLogger{
+ LogRequest: o.ClientLogMode.IsRequest(),
+ LogRequestWithBody: o.ClientLogMode.IsRequestWithBody(),
+ LogResponse: o.ClientLogMode.IsResponse(),
+ LogResponseWithBody: o.ClientLogMode.IsResponseWithBody(),
+ }, middleware.After)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_GetRoleCredentials.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_GetRoleCredentials.go
new file mode 100644
index 000000000..3190c7dcc
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_GetRoleCredentials.go
@@ -0,0 +1,123 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package sso
+
+import (
+ "context"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/service/sso/types"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// Returns the STS short-term credentials for a given role name that is assigned to
+// the user.
+func (c *Client) GetRoleCredentials(ctx context.Context, params *GetRoleCredentialsInput, optFns ...func(*Options)) (*GetRoleCredentialsOutput, error) {
+ if params == nil {
+ params = &GetRoleCredentialsInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "GetRoleCredentials", params, optFns, addOperationGetRoleCredentialsMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*GetRoleCredentialsOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type GetRoleCredentialsInput struct {
+
+ // The token issued by the CreateToken API call. For more information, see
+ // CreateToken
+ // (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/API_CreateToken.html)
+ // in the AWS SSO OIDC API Reference Guide.
+ //
+ // This member is required.
+ AccessToken *string
+
+ // The identifier for the AWS account that is assigned to the user.
+ //
+ // This member is required.
+ AccountId *string
+
+ // The friendly name of the role that is assigned to the user.
+ //
+ // This member is required.
+ RoleName *string
+}
+
+type GetRoleCredentialsOutput struct {
+
+ // The credentials for the role that is assigned to the user.
+ RoleCredentials *types.RoleCredentials
+
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+}
+
+func addOperationGetRoleCredentialsMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ err = stack.Serialize.Add(&awsRestjson1_serializeOpGetRoleCredentials{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsRestjson1_deserializeOpGetRoleCredentials{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addRetryMiddlewares(stack, options); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addOpGetRoleCredentialsValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetRoleCredentials(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addRequestIDRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ return nil
+}
+
+func newServiceMetadataMiddleware_opGetRoleCredentials(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ OperationName: "GetRoleCredentials",
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_ListAccountRoles.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_ListAccountRoles.go
new file mode 100644
index 000000000..aa25e3c6a
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_ListAccountRoles.go
@@ -0,0 +1,215 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package sso
+
+import (
+ "context"
+ "fmt"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/service/sso/types"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// Lists all roles that are assigned to the user for a given AWS account.
+func (c *Client) ListAccountRoles(ctx context.Context, params *ListAccountRolesInput, optFns ...func(*Options)) (*ListAccountRolesOutput, error) {
+ if params == nil {
+ params = &ListAccountRolesInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "ListAccountRoles", params, optFns, addOperationListAccountRolesMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*ListAccountRolesOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type ListAccountRolesInput struct {
+
+ // The token issued by the CreateToken API call. For more information, see
+ // CreateToken
+ // (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/API_CreateToken.html)
+ // in the AWS SSO OIDC API Reference Guide.
+ //
+ // This member is required.
+ AccessToken *string
+
+ // The identifier for the AWS account that is assigned to the user.
+ //
+ // This member is required.
+ AccountId *string
+
+ // The number of items that clients can request per page.
+ MaxResults *int32
+
+ // The page token from the previous response output when you request subsequent
+ // pages.
+ NextToken *string
+}
+
+type ListAccountRolesOutput struct {
+
+ // The page token client that is used to retrieve the list of accounts.
+ NextToken *string
+
+ // A paginated response with the list of roles and the next token if more results
+ // are available.
+ RoleList []types.RoleInfo
+
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+}
+
+func addOperationListAccountRolesMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ err = stack.Serialize.Add(&awsRestjson1_serializeOpListAccountRoles{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsRestjson1_deserializeOpListAccountRoles{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addRetryMiddlewares(stack, options); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addOpListAccountRolesValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListAccountRoles(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addRequestIDRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ return nil
+}
+
+// ListAccountRolesAPIClient is a client that implements the ListAccountRoles
+// operation.
+type ListAccountRolesAPIClient interface {
+ ListAccountRoles(context.Context, *ListAccountRolesInput, ...func(*Options)) (*ListAccountRolesOutput, error)
+}
+
+var _ ListAccountRolesAPIClient = (*Client)(nil)
+
+// ListAccountRolesPaginatorOptions is the paginator options for ListAccountRoles
+type ListAccountRolesPaginatorOptions struct {
+ // The number of items that clients can request per page.
+ Limit int32
+
+ // Set to true if pagination should stop if the service returns a pagination token
+ // that matches the most recent token provided to the service.
+ StopOnDuplicateToken bool
+}
+
+// ListAccountRolesPaginator is a paginator for ListAccountRoles
+type ListAccountRolesPaginator struct {
+ options ListAccountRolesPaginatorOptions
+ client ListAccountRolesAPIClient
+ params *ListAccountRolesInput
+ nextToken *string
+ firstPage bool
+}
+
+// NewListAccountRolesPaginator returns a new ListAccountRolesPaginator
+func NewListAccountRolesPaginator(client ListAccountRolesAPIClient, params *ListAccountRolesInput, optFns ...func(*ListAccountRolesPaginatorOptions)) *ListAccountRolesPaginator {
+ if params == nil {
+ params = &ListAccountRolesInput{}
+ }
+
+ options := ListAccountRolesPaginatorOptions{}
+ if params.MaxResults != nil {
+ options.Limit = *params.MaxResults
+ }
+
+ for _, fn := range optFns {
+ fn(&options)
+ }
+
+ return &ListAccountRolesPaginator{
+ options: options,
+ client: client,
+ params: params,
+ firstPage: true,
+ }
+}
+
+// HasMorePages returns a boolean indicating whether more pages are available
+func (p *ListAccountRolesPaginator) HasMorePages() bool {
+ return p.firstPage || p.nextToken != nil
+}
+
+// NextPage retrieves the next ListAccountRoles page.
+func (p *ListAccountRolesPaginator) NextPage(ctx context.Context, optFns ...func(*Options)) (*ListAccountRolesOutput, error) {
+ if !p.HasMorePages() {
+ return nil, fmt.Errorf("no more pages available")
+ }
+
+ params := *p.params
+ params.NextToken = p.nextToken
+
+ var limit *int32
+ if p.options.Limit > 0 {
+ limit = &p.options.Limit
+ }
+ params.MaxResults = limit
+
+ result, err := p.client.ListAccountRoles(ctx, ¶ms, optFns...)
+ if err != nil {
+ return nil, err
+ }
+ p.firstPage = false
+
+ prevToken := p.nextToken
+ p.nextToken = result.NextToken
+
+ if p.options.StopOnDuplicateToken && prevToken != nil && p.nextToken != nil && *prevToken == *p.nextToken {
+ p.nextToken = nil
+ }
+
+ return result, nil
+}
+
+func newServiceMetadataMiddleware_opListAccountRoles(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ OperationName: "ListAccountRoles",
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_ListAccounts.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_ListAccounts.go
new file mode 100644
index 000000000..b802c4c27
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_ListAccounts.go
@@ -0,0 +1,212 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package sso
+
+import (
+ "context"
+ "fmt"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/service/sso/types"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// Lists all AWS accounts assigned to the user. These AWS accounts are assigned by
+// the administrator of the account. For more information, see Assign User Access
+// (https://docs.aws.amazon.com/singlesignon/latest/userguide/useraccess.html#assignusers)
+// in the AWS SSO User Guide. This operation returns a paginated response.
+func (c *Client) ListAccounts(ctx context.Context, params *ListAccountsInput, optFns ...func(*Options)) (*ListAccountsOutput, error) {
+ if params == nil {
+ params = &ListAccountsInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "ListAccounts", params, optFns, addOperationListAccountsMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*ListAccountsOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type ListAccountsInput struct {
+
+ // The token issued by the CreateToken API call. For more information, see
+ // CreateToken
+ // (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/API_CreateToken.html)
+ // in the AWS SSO OIDC API Reference Guide.
+ //
+ // This member is required.
+ AccessToken *string
+
+ // This is the number of items clients can request per page.
+ MaxResults *int32
+
+ // (Optional) When requesting subsequent pages, this is the page token from the
+ // previous response output.
+ NextToken *string
+}
+
+type ListAccountsOutput struct {
+
+ // A paginated response with the list of account information and the next token if
+ // more results are available.
+ AccountList []types.AccountInfo
+
+ // The page token client that is used to retrieve the list of accounts.
+ NextToken *string
+
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+}
+
+func addOperationListAccountsMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ err = stack.Serialize.Add(&awsRestjson1_serializeOpListAccounts{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsRestjson1_deserializeOpListAccounts{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addRetryMiddlewares(stack, options); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addOpListAccountsValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListAccounts(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addRequestIDRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ return nil
+}
+
+// ListAccountsAPIClient is a client that implements the ListAccounts operation.
+type ListAccountsAPIClient interface {
+ ListAccounts(context.Context, *ListAccountsInput, ...func(*Options)) (*ListAccountsOutput, error)
+}
+
+var _ ListAccountsAPIClient = (*Client)(nil)
+
+// ListAccountsPaginatorOptions is the paginator options for ListAccounts
+type ListAccountsPaginatorOptions struct {
+ // This is the number of items clients can request per page.
+ Limit int32
+
+ // Set to true if pagination should stop if the service returns a pagination token
+ // that matches the most recent token provided to the service.
+ StopOnDuplicateToken bool
+}
+
+// ListAccountsPaginator is a paginator for ListAccounts
+type ListAccountsPaginator struct {
+ options ListAccountsPaginatorOptions
+ client ListAccountsAPIClient
+ params *ListAccountsInput
+ nextToken *string
+ firstPage bool
+}
+
+// NewListAccountsPaginator returns a new ListAccountsPaginator
+func NewListAccountsPaginator(client ListAccountsAPIClient, params *ListAccountsInput, optFns ...func(*ListAccountsPaginatorOptions)) *ListAccountsPaginator {
+ if params == nil {
+ params = &ListAccountsInput{}
+ }
+
+ options := ListAccountsPaginatorOptions{}
+ if params.MaxResults != nil {
+ options.Limit = *params.MaxResults
+ }
+
+ for _, fn := range optFns {
+ fn(&options)
+ }
+
+ return &ListAccountsPaginator{
+ options: options,
+ client: client,
+ params: params,
+ firstPage: true,
+ }
+}
+
+// HasMorePages returns a boolean indicating whether more pages are available
+func (p *ListAccountsPaginator) HasMorePages() bool {
+ return p.firstPage || p.nextToken != nil
+}
+
+// NextPage retrieves the next ListAccounts page.
+func (p *ListAccountsPaginator) NextPage(ctx context.Context, optFns ...func(*Options)) (*ListAccountsOutput, error) {
+ if !p.HasMorePages() {
+ return nil, fmt.Errorf("no more pages available")
+ }
+
+ params := *p.params
+ params.NextToken = p.nextToken
+
+ var limit *int32
+ if p.options.Limit > 0 {
+ limit = &p.options.Limit
+ }
+ params.MaxResults = limit
+
+ result, err := p.client.ListAccounts(ctx, ¶ms, optFns...)
+ if err != nil {
+ return nil, err
+ }
+ p.firstPage = false
+
+ prevToken := p.nextToken
+ p.nextToken = result.NextToken
+
+ if p.options.StopOnDuplicateToken && prevToken != nil && p.nextToken != nil && *prevToken == *p.nextToken {
+ p.nextToken = nil
+ }
+
+ return result, nil
+}
+
+func newServiceMetadataMiddleware_opListAccounts(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ OperationName: "ListAccounts",
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_Logout.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_Logout.go
new file mode 100644
index 000000000..2a4f90a4b
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_Logout.go
@@ -0,0 +1,107 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package sso
+
+import (
+ "context"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// Removes the client- and server-side session that is associated with the user.
+func (c *Client) Logout(ctx context.Context, params *LogoutInput, optFns ...func(*Options)) (*LogoutOutput, error) {
+ if params == nil {
+ params = &LogoutInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "Logout", params, optFns, addOperationLogoutMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*LogoutOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type LogoutInput struct {
+
+ // The token issued by the CreateToken API call. For more information, see
+ // CreateToken
+ // (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/API_CreateToken.html)
+ // in the AWS SSO OIDC API Reference Guide.
+ //
+ // This member is required.
+ AccessToken *string
+}
+
+type LogoutOutput struct {
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+}
+
+func addOperationLogoutMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ err = stack.Serialize.Add(&awsRestjson1_serializeOpLogout{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsRestjson1_deserializeOpLogout{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addRetryMiddlewares(stack, options); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addOpLogoutValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opLogout(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addRequestIDRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ return nil
+}
+
+func newServiceMetadataMiddleware_opLogout(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ OperationName: "Logout",
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/deserializers.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/deserializers.go
new file mode 100644
index 000000000..6a1851da2
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/deserializers.go
@@ -0,0 +1,1151 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package sso
+
+import (
+ "bytes"
+ "context"
+ "encoding/json"
+ "fmt"
+ "github.com/aws/aws-sdk-go-v2/aws/protocol/restjson"
+ "github.com/aws/aws-sdk-go-v2/service/sso/types"
+ smithy "github.com/aws/smithy-go"
+ smithyio "github.com/aws/smithy-go/io"
+ "github.com/aws/smithy-go/middleware"
+ "github.com/aws/smithy-go/ptr"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+ "io"
+ "io/ioutil"
+ "strings"
+)
+
+type awsRestjson1_deserializeOpGetRoleCredentials struct {
+}
+
+func (*awsRestjson1_deserializeOpGetRoleCredentials) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsRestjson1_deserializeOpGetRoleCredentials) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsRestjson1_deserializeOpErrorGetRoleCredentials(response, &metadata)
+ }
+ output := &GetRoleCredentialsOutput{}
+ out.Result = output
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+
+ body := io.TeeReader(response.Body, ringBuffer)
+
+ decoder := json.NewDecoder(body)
+ decoder.UseNumber()
+ var shape interface{}
+ if err := decoder.Decode(&shape); err != nil && err != io.EOF {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return out, metadata, err
+ }
+
+ err = awsRestjson1_deserializeOpDocumentGetRoleCredentialsOutput(&output, shape)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body with invalid JSON, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ return out, metadata, err
+}
+
+func awsRestjson1_deserializeOpErrorGetRoleCredentials(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ code := response.Header.Get("X-Amzn-ErrorType")
+ if len(code) != 0 {
+ errorCode = restjson.SanitizeErrorCode(code)
+ }
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+
+ body := io.TeeReader(errorBody, ringBuffer)
+ decoder := json.NewDecoder(body)
+ decoder.UseNumber()
+ code, message, err := restjson.GetErrorInfo(decoder)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return err
+ }
+
+ errorBody.Seek(0, io.SeekStart)
+ if len(code) != 0 {
+ errorCode = restjson.SanitizeErrorCode(code)
+ }
+ if len(message) != 0 {
+ errorMessage = message
+ }
+
+ switch {
+ case strings.EqualFold("InvalidRequestException", errorCode):
+ return awsRestjson1_deserializeErrorInvalidRequestException(response, errorBody)
+
+ case strings.EqualFold("ResourceNotFoundException", errorCode):
+ return awsRestjson1_deserializeErrorResourceNotFoundException(response, errorBody)
+
+ case strings.EqualFold("TooManyRequestsException", errorCode):
+ return awsRestjson1_deserializeErrorTooManyRequestsException(response, errorBody)
+
+ case strings.EqualFold("UnauthorizedException", errorCode):
+ return awsRestjson1_deserializeErrorUnauthorizedException(response, errorBody)
+
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+func awsRestjson1_deserializeOpDocumentGetRoleCredentialsOutput(v **GetRoleCredentialsOutput, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var sv *GetRoleCredentialsOutput
+ if *v == nil {
+ sv = &GetRoleCredentialsOutput{}
+ } else {
+ sv = *v
+ }
+
+ for key, value := range shape {
+ switch key {
+ case "roleCredentials":
+ if err := awsRestjson1_deserializeDocumentRoleCredentials(&sv.RoleCredentials, value); err != nil {
+ return err
+ }
+
+ default:
+ _, _ = key, value
+
+ }
+ }
+ *v = sv
+ return nil
+}
+
+type awsRestjson1_deserializeOpListAccountRoles struct {
+}
+
+func (*awsRestjson1_deserializeOpListAccountRoles) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsRestjson1_deserializeOpListAccountRoles) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsRestjson1_deserializeOpErrorListAccountRoles(response, &metadata)
+ }
+ output := &ListAccountRolesOutput{}
+ out.Result = output
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+
+ body := io.TeeReader(response.Body, ringBuffer)
+
+ decoder := json.NewDecoder(body)
+ decoder.UseNumber()
+ var shape interface{}
+ if err := decoder.Decode(&shape); err != nil && err != io.EOF {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return out, metadata, err
+ }
+
+ err = awsRestjson1_deserializeOpDocumentListAccountRolesOutput(&output, shape)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body with invalid JSON, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ return out, metadata, err
+}
+
+func awsRestjson1_deserializeOpErrorListAccountRoles(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ code := response.Header.Get("X-Amzn-ErrorType")
+ if len(code) != 0 {
+ errorCode = restjson.SanitizeErrorCode(code)
+ }
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+
+ body := io.TeeReader(errorBody, ringBuffer)
+ decoder := json.NewDecoder(body)
+ decoder.UseNumber()
+ code, message, err := restjson.GetErrorInfo(decoder)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return err
+ }
+
+ errorBody.Seek(0, io.SeekStart)
+ if len(code) != 0 {
+ errorCode = restjson.SanitizeErrorCode(code)
+ }
+ if len(message) != 0 {
+ errorMessage = message
+ }
+
+ switch {
+ case strings.EqualFold("InvalidRequestException", errorCode):
+ return awsRestjson1_deserializeErrorInvalidRequestException(response, errorBody)
+
+ case strings.EqualFold("ResourceNotFoundException", errorCode):
+ return awsRestjson1_deserializeErrorResourceNotFoundException(response, errorBody)
+
+ case strings.EqualFold("TooManyRequestsException", errorCode):
+ return awsRestjson1_deserializeErrorTooManyRequestsException(response, errorBody)
+
+ case strings.EqualFold("UnauthorizedException", errorCode):
+ return awsRestjson1_deserializeErrorUnauthorizedException(response, errorBody)
+
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+func awsRestjson1_deserializeOpDocumentListAccountRolesOutput(v **ListAccountRolesOutput, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var sv *ListAccountRolesOutput
+ if *v == nil {
+ sv = &ListAccountRolesOutput{}
+ } else {
+ sv = *v
+ }
+
+ for key, value := range shape {
+ switch key {
+ case "nextToken":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected NextTokenType to be of type string, got %T instead", value)
+ }
+ sv.NextToken = ptr.String(jtv)
+ }
+
+ case "roleList":
+ if err := awsRestjson1_deserializeDocumentRoleListType(&sv.RoleList, value); err != nil {
+ return err
+ }
+
+ default:
+ _, _ = key, value
+
+ }
+ }
+ *v = sv
+ return nil
+}
+
+type awsRestjson1_deserializeOpListAccounts struct {
+}
+
+func (*awsRestjson1_deserializeOpListAccounts) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsRestjson1_deserializeOpListAccounts) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsRestjson1_deserializeOpErrorListAccounts(response, &metadata)
+ }
+ output := &ListAccountsOutput{}
+ out.Result = output
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+
+ body := io.TeeReader(response.Body, ringBuffer)
+
+ decoder := json.NewDecoder(body)
+ decoder.UseNumber()
+ var shape interface{}
+ if err := decoder.Decode(&shape); err != nil && err != io.EOF {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return out, metadata, err
+ }
+
+ err = awsRestjson1_deserializeOpDocumentListAccountsOutput(&output, shape)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body with invalid JSON, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ return out, metadata, err
+}
+
+func awsRestjson1_deserializeOpErrorListAccounts(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ code := response.Header.Get("X-Amzn-ErrorType")
+ if len(code) != 0 {
+ errorCode = restjson.SanitizeErrorCode(code)
+ }
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+
+ body := io.TeeReader(errorBody, ringBuffer)
+ decoder := json.NewDecoder(body)
+ decoder.UseNumber()
+ code, message, err := restjson.GetErrorInfo(decoder)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return err
+ }
+
+ errorBody.Seek(0, io.SeekStart)
+ if len(code) != 0 {
+ errorCode = restjson.SanitizeErrorCode(code)
+ }
+ if len(message) != 0 {
+ errorMessage = message
+ }
+
+ switch {
+ case strings.EqualFold("InvalidRequestException", errorCode):
+ return awsRestjson1_deserializeErrorInvalidRequestException(response, errorBody)
+
+ case strings.EqualFold("ResourceNotFoundException", errorCode):
+ return awsRestjson1_deserializeErrorResourceNotFoundException(response, errorBody)
+
+ case strings.EqualFold("TooManyRequestsException", errorCode):
+ return awsRestjson1_deserializeErrorTooManyRequestsException(response, errorBody)
+
+ case strings.EqualFold("UnauthorizedException", errorCode):
+ return awsRestjson1_deserializeErrorUnauthorizedException(response, errorBody)
+
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+func awsRestjson1_deserializeOpDocumentListAccountsOutput(v **ListAccountsOutput, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var sv *ListAccountsOutput
+ if *v == nil {
+ sv = &ListAccountsOutput{}
+ } else {
+ sv = *v
+ }
+
+ for key, value := range shape {
+ switch key {
+ case "accountList":
+ if err := awsRestjson1_deserializeDocumentAccountListType(&sv.AccountList, value); err != nil {
+ return err
+ }
+
+ case "nextToken":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected NextTokenType to be of type string, got %T instead", value)
+ }
+ sv.NextToken = ptr.String(jtv)
+ }
+
+ default:
+ _, _ = key, value
+
+ }
+ }
+ *v = sv
+ return nil
+}
+
+type awsRestjson1_deserializeOpLogout struct {
+}
+
+func (*awsRestjson1_deserializeOpLogout) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsRestjson1_deserializeOpLogout) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsRestjson1_deserializeOpErrorLogout(response, &metadata)
+ }
+ output := &LogoutOutput{}
+ out.Result = output
+
+ if _, err = io.Copy(ioutil.Discard, response.Body); err != nil {
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to discard response body, %w", err),
+ }
+ }
+
+ return out, metadata, err
+}
+
+func awsRestjson1_deserializeOpErrorLogout(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ code := response.Header.Get("X-Amzn-ErrorType")
+ if len(code) != 0 {
+ errorCode = restjson.SanitizeErrorCode(code)
+ }
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+
+ body := io.TeeReader(errorBody, ringBuffer)
+ decoder := json.NewDecoder(body)
+ decoder.UseNumber()
+ code, message, err := restjson.GetErrorInfo(decoder)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return err
+ }
+
+ errorBody.Seek(0, io.SeekStart)
+ if len(code) != 0 {
+ errorCode = restjson.SanitizeErrorCode(code)
+ }
+ if len(message) != 0 {
+ errorMessage = message
+ }
+
+ switch {
+ case strings.EqualFold("InvalidRequestException", errorCode):
+ return awsRestjson1_deserializeErrorInvalidRequestException(response, errorBody)
+
+ case strings.EqualFold("TooManyRequestsException", errorCode):
+ return awsRestjson1_deserializeErrorTooManyRequestsException(response, errorBody)
+
+ case strings.EqualFold("UnauthorizedException", errorCode):
+ return awsRestjson1_deserializeErrorUnauthorizedException(response, errorBody)
+
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+func awsRestjson1_deserializeErrorInvalidRequestException(response *smithyhttp.Response, errorBody *bytes.Reader) error {
+ output := &types.InvalidRequestException{}
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+
+ body := io.TeeReader(errorBody, ringBuffer)
+ decoder := json.NewDecoder(body)
+ decoder.UseNumber()
+ var shape interface{}
+ if err := decoder.Decode(&shape); err != nil && err != io.EOF {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return err
+ }
+
+ err := awsRestjson1_deserializeDocumentInvalidRequestException(&output, shape)
+
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return err
+ }
+
+ errorBody.Seek(0, io.SeekStart)
+
+ return output
+}
+
+func awsRestjson1_deserializeErrorResourceNotFoundException(response *smithyhttp.Response, errorBody *bytes.Reader) error {
+ output := &types.ResourceNotFoundException{}
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+
+ body := io.TeeReader(errorBody, ringBuffer)
+ decoder := json.NewDecoder(body)
+ decoder.UseNumber()
+ var shape interface{}
+ if err := decoder.Decode(&shape); err != nil && err != io.EOF {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return err
+ }
+
+ err := awsRestjson1_deserializeDocumentResourceNotFoundException(&output, shape)
+
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return err
+ }
+
+ errorBody.Seek(0, io.SeekStart)
+
+ return output
+}
+
+func awsRestjson1_deserializeErrorTooManyRequestsException(response *smithyhttp.Response, errorBody *bytes.Reader) error {
+ output := &types.TooManyRequestsException{}
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+
+ body := io.TeeReader(errorBody, ringBuffer)
+ decoder := json.NewDecoder(body)
+ decoder.UseNumber()
+ var shape interface{}
+ if err := decoder.Decode(&shape); err != nil && err != io.EOF {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return err
+ }
+
+ err := awsRestjson1_deserializeDocumentTooManyRequestsException(&output, shape)
+
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return err
+ }
+
+ errorBody.Seek(0, io.SeekStart)
+
+ return output
+}
+
+func awsRestjson1_deserializeErrorUnauthorizedException(response *smithyhttp.Response, errorBody *bytes.Reader) error {
+ output := &types.UnauthorizedException{}
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+
+ body := io.TeeReader(errorBody, ringBuffer)
+ decoder := json.NewDecoder(body)
+ decoder.UseNumber()
+ var shape interface{}
+ if err := decoder.Decode(&shape); err != nil && err != io.EOF {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return err
+ }
+
+ err := awsRestjson1_deserializeDocumentUnauthorizedException(&output, shape)
+
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return err
+ }
+
+ errorBody.Seek(0, io.SeekStart)
+
+ return output
+}
+
+func awsRestjson1_deserializeDocumentAccountInfo(v **types.AccountInfo, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var sv *types.AccountInfo
+ if *v == nil {
+ sv = &types.AccountInfo{}
+ } else {
+ sv = *v
+ }
+
+ for key, value := range shape {
+ switch key {
+ case "accountId":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected AccountIdType to be of type string, got %T instead", value)
+ }
+ sv.AccountId = ptr.String(jtv)
+ }
+
+ case "accountName":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected AccountNameType to be of type string, got %T instead", value)
+ }
+ sv.AccountName = ptr.String(jtv)
+ }
+
+ case "emailAddress":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected EmailAddressType to be of type string, got %T instead", value)
+ }
+ sv.EmailAddress = ptr.String(jtv)
+ }
+
+ default:
+ _, _ = key, value
+
+ }
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestjson1_deserializeDocumentAccountListType(v *[]types.AccountInfo, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.([]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var cv []types.AccountInfo
+ if *v == nil {
+ cv = []types.AccountInfo{}
+ } else {
+ cv = *v
+ }
+
+ for _, value := range shape {
+ var col types.AccountInfo
+ destAddr := &col
+ if err := awsRestjson1_deserializeDocumentAccountInfo(&destAddr, value); err != nil {
+ return err
+ }
+ col = *destAddr
+ cv = append(cv, col)
+
+ }
+ *v = cv
+ return nil
+}
+
+func awsRestjson1_deserializeDocumentInvalidRequestException(v **types.InvalidRequestException, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var sv *types.InvalidRequestException
+ if *v == nil {
+ sv = &types.InvalidRequestException{}
+ } else {
+ sv = *v
+ }
+
+ for key, value := range shape {
+ switch key {
+ case "message":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected ErrorDescription to be of type string, got %T instead", value)
+ }
+ sv.Message = ptr.String(jtv)
+ }
+
+ default:
+ _, _ = key, value
+
+ }
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestjson1_deserializeDocumentResourceNotFoundException(v **types.ResourceNotFoundException, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var sv *types.ResourceNotFoundException
+ if *v == nil {
+ sv = &types.ResourceNotFoundException{}
+ } else {
+ sv = *v
+ }
+
+ for key, value := range shape {
+ switch key {
+ case "message":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected ErrorDescription to be of type string, got %T instead", value)
+ }
+ sv.Message = ptr.String(jtv)
+ }
+
+ default:
+ _, _ = key, value
+
+ }
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestjson1_deserializeDocumentRoleCredentials(v **types.RoleCredentials, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var sv *types.RoleCredentials
+ if *v == nil {
+ sv = &types.RoleCredentials{}
+ } else {
+ sv = *v
+ }
+
+ for key, value := range shape {
+ switch key {
+ case "accessKeyId":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected AccessKeyType to be of type string, got %T instead", value)
+ }
+ sv.AccessKeyId = ptr.String(jtv)
+ }
+
+ case "expiration":
+ if value != nil {
+ jtv, ok := value.(json.Number)
+ if !ok {
+ return fmt.Errorf("expected ExpirationTimestampType to be json.Number, got %T instead", value)
+ }
+ i64, err := jtv.Int64()
+ if err != nil {
+ return err
+ }
+ sv.Expiration = i64
+ }
+
+ case "secretAccessKey":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected SecretAccessKeyType to be of type string, got %T instead", value)
+ }
+ sv.SecretAccessKey = ptr.String(jtv)
+ }
+
+ case "sessionToken":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected SessionTokenType to be of type string, got %T instead", value)
+ }
+ sv.SessionToken = ptr.String(jtv)
+ }
+
+ default:
+ _, _ = key, value
+
+ }
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestjson1_deserializeDocumentRoleInfo(v **types.RoleInfo, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var sv *types.RoleInfo
+ if *v == nil {
+ sv = &types.RoleInfo{}
+ } else {
+ sv = *v
+ }
+
+ for key, value := range shape {
+ switch key {
+ case "accountId":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected AccountIdType to be of type string, got %T instead", value)
+ }
+ sv.AccountId = ptr.String(jtv)
+ }
+
+ case "roleName":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected RoleNameType to be of type string, got %T instead", value)
+ }
+ sv.RoleName = ptr.String(jtv)
+ }
+
+ default:
+ _, _ = key, value
+
+ }
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestjson1_deserializeDocumentRoleListType(v *[]types.RoleInfo, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.([]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var cv []types.RoleInfo
+ if *v == nil {
+ cv = []types.RoleInfo{}
+ } else {
+ cv = *v
+ }
+
+ for _, value := range shape {
+ var col types.RoleInfo
+ destAddr := &col
+ if err := awsRestjson1_deserializeDocumentRoleInfo(&destAddr, value); err != nil {
+ return err
+ }
+ col = *destAddr
+ cv = append(cv, col)
+
+ }
+ *v = cv
+ return nil
+}
+
+func awsRestjson1_deserializeDocumentTooManyRequestsException(v **types.TooManyRequestsException, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var sv *types.TooManyRequestsException
+ if *v == nil {
+ sv = &types.TooManyRequestsException{}
+ } else {
+ sv = *v
+ }
+
+ for key, value := range shape {
+ switch key {
+ case "message":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected ErrorDescription to be of type string, got %T instead", value)
+ }
+ sv.Message = ptr.String(jtv)
+ }
+
+ default:
+ _, _ = key, value
+
+ }
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestjson1_deserializeDocumentUnauthorizedException(v **types.UnauthorizedException, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var sv *types.UnauthorizedException
+ if *v == nil {
+ sv = &types.UnauthorizedException{}
+ } else {
+ sv = *v
+ }
+
+ for key, value := range shape {
+ switch key {
+ case "message":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected ErrorDescription to be of type string, got %T instead", value)
+ }
+ sv.Message = ptr.String(jtv)
+ }
+
+ default:
+ _, _ = key, value
+
+ }
+ }
+ *v = sv
+ return nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/doc.go
new file mode 100644
index 000000000..c5d03d8e4
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/doc.go
@@ -0,0 +1,20 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+// Package sso provides the API client, operations, and parameter types for AWS
+// Single Sign-On.
+//
+// AWS Single Sign-On Portal is a web service that makes it easy for you to assign
+// user access to AWS SSO resources such as the user portal. Users can get AWS
+// account applications and roles assigned to them and get federated into the
+// application. For general information about AWS SSO, see What is AWS Single
+// Sign-On?
+// (https://docs.aws.amazon.com/singlesignon/latest/userguide/what-is.html) in the
+// AWS SSO User Guide. This API reference guide describes the AWS SSO Portal
+// operations that you can call programatically and includes detailed information
+// on data types and errors. AWS provides SDKs that consist of libraries and sample
+// code for various programming languages and platforms, such as Java, Ruby, .Net,
+// iOS, or Android. The SDKs provide a convenient way to create programmatic access
+// to AWS SSO and other AWS services. For more information about the AWS SDKs,
+// including how to download and install them, see Tools for Amazon Web Services
+// (http://aws.amazon.com/tools/).
+package sso
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/endpoints.go
new file mode 100644
index 000000000..761a3a792
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/endpoints.go
@@ -0,0 +1,160 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package sso
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "github.com/aws/aws-sdk-go-v2/aws"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ internalendpoints "github.com/aws/aws-sdk-go-v2/service/sso/internal/endpoints"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+ "net/url"
+)
+
+// EndpointResolverOptions is the service endpoint resolver options
+type EndpointResolverOptions = internalendpoints.Options
+
+// EndpointResolver interface for resolving service endpoints.
+type EndpointResolver interface {
+ ResolveEndpoint(region string, options EndpointResolverOptions) (aws.Endpoint, error)
+}
+
+var _ EndpointResolver = &internalendpoints.Resolver{}
+
+// NewDefaultEndpointResolver constructs a new service endpoint resolver
+func NewDefaultEndpointResolver() *internalendpoints.Resolver {
+ return internalendpoints.New()
+}
+
+// EndpointResolverFunc is a helper utility that wraps a function so it satisfies
+// the EndpointResolver interface. This is useful when you want to add additional
+// endpoint resolving logic, or stub out specific endpoints with custom values.
+type EndpointResolverFunc func(region string, options EndpointResolverOptions) (aws.Endpoint, error)
+
+func (fn EndpointResolverFunc) ResolveEndpoint(region string, options EndpointResolverOptions) (endpoint aws.Endpoint, err error) {
+ return fn(region, options)
+}
+
+func resolveDefaultEndpointConfiguration(o *Options) {
+ if o.EndpointResolver != nil {
+ return
+ }
+ o.EndpointResolver = NewDefaultEndpointResolver()
+}
+
+// EndpointResolverFromURL returns an EndpointResolver configured using the
+// provided endpoint url. By default, the resolved endpoint resolver uses the
+// client region as signing region, and the endpoint source is set to
+// EndpointSourceCustom.You can provide functional options to configure endpoint
+// values for the resolved endpoint.
+func EndpointResolverFromURL(url string, optFns ...func(*aws.Endpoint)) EndpointResolver {
+ e := aws.Endpoint{URL: url, Source: aws.EndpointSourceCustom}
+ for _, fn := range optFns {
+ fn(&e)
+ }
+
+ return EndpointResolverFunc(
+ func(region string, options EndpointResolverOptions) (aws.Endpoint, error) {
+ if len(e.SigningRegion) == 0 {
+ e.SigningRegion = region
+ }
+ return e, nil
+ },
+ )
+}
+
+type ResolveEndpoint struct {
+ Resolver EndpointResolver
+ Options EndpointResolverOptions
+}
+
+func (*ResolveEndpoint) ID() string {
+ return "ResolveEndpoint"
+}
+
+func (m *ResolveEndpoint) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ req, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown transport type %T", in.Request)
+ }
+
+ if m.Resolver == nil {
+ return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil")
+ }
+
+ var endpoint aws.Endpoint
+ endpoint, err = m.Resolver.ResolveEndpoint(awsmiddleware.GetRegion(ctx), m.Options)
+ if err != nil {
+ return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err)
+ }
+
+ req.URL, err = url.Parse(endpoint.URL)
+ if err != nil {
+ return out, metadata, fmt.Errorf("failed to parse endpoint URL: %w", err)
+ }
+
+ if len(awsmiddleware.GetSigningName(ctx)) == 0 {
+ signingName := endpoint.SigningName
+ if len(signingName) == 0 {
+ signingName = "awsssoportal"
+ }
+ ctx = awsmiddleware.SetSigningName(ctx, signingName)
+ }
+ ctx = awsmiddleware.SetEndpointSource(ctx, endpoint.Source)
+ ctx = smithyhttp.SetHostnameImmutable(ctx, endpoint.HostnameImmutable)
+ ctx = awsmiddleware.SetSigningRegion(ctx, endpoint.SigningRegion)
+ ctx = awsmiddleware.SetPartitionID(ctx, endpoint.PartitionID)
+ return next.HandleSerialize(ctx, in)
+}
+func addResolveEndpointMiddleware(stack *middleware.Stack, o Options) error {
+ return stack.Serialize.Insert(&ResolveEndpoint{
+ Resolver: o.EndpointResolver,
+ Options: o.EndpointOptions,
+ }, "OperationSerializer", middleware.Before)
+}
+
+func removeResolveEndpointMiddleware(stack *middleware.Stack) error {
+ _, err := stack.Serialize.Remove((&ResolveEndpoint{}).ID())
+ return err
+}
+
+type wrappedEndpointResolver struct {
+ awsResolver aws.EndpointResolver
+ resolver EndpointResolver
+}
+
+func (w *wrappedEndpointResolver) ResolveEndpoint(region string, options EndpointResolverOptions) (endpoint aws.Endpoint, err error) {
+ if w.awsResolver == nil {
+ goto fallback
+ }
+ endpoint, err = w.awsResolver.ResolveEndpoint(ServiceID, region)
+ if err == nil {
+ return endpoint, nil
+ }
+
+ if nf := (&aws.EndpointNotFoundError{}); !errors.As(err, &nf) {
+ return endpoint, err
+ }
+
+fallback:
+ if w.resolver == nil {
+ return endpoint, fmt.Errorf("default endpoint resolver provided was nil")
+ }
+ return w.resolver.ResolveEndpoint(region, options)
+}
+
+// withEndpointResolver returns an EndpointResolver that first delegates endpoint
+// resolution to the awsResolver. If awsResolver returns aws.EndpointNotFoundError
+// error, the resolver will use the the provided fallbackResolver for resolution.
+// awsResolver and fallbackResolver must not be nil
+func withEndpointResolver(awsResolver aws.EndpointResolver, fallbackResolver EndpointResolver) EndpointResolver {
+ return &wrappedEndpointResolver{
+ awsResolver: awsResolver,
+ resolver: fallbackResolver,
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/go.mod b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/go.mod
new file mode 100644
index 000000000..a53df70f3
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/go.mod
@@ -0,0 +1,10 @@
+module github.com/aws/aws-sdk-go-v2/service/sso
+
+go 1.15
+
+require (
+ github.com/aws/aws-sdk-go-v2 v1.2.1
+ github.com/aws/smithy-go v1.2.0
+)
+
+replace github.com/aws/aws-sdk-go-v2 => ../../
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/go.sum b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/go.sum
new file mode 100644
index 000000000..c3783ae60
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/go.sum
@@ -0,0 +1,13 @@
+github.com/aws/smithy-go v1.2.0 h1:0PoGBWXkXDIyVdPaZW9gMhaGzj3UOAgTdiVoHuuZAFA=
+github.com/aws/smithy-go v1.2.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAmR5n+E=
+github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/google/go-cmp v0.5.4 h1:L8R9j+yAqZuZjsqh/z+F1NCffTKKLShY6zXTItVIZ8M=
+github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
+github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U=
+github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4=
+golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/internal/endpoints/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/internal/endpoints/endpoints.go
new file mode 100644
index 000000000..697414b88
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/internal/endpoints/endpoints.go
@@ -0,0 +1,147 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package endpoints
+
+import (
+ "github.com/aws/aws-sdk-go-v2/aws"
+ "github.com/aws/aws-sdk-go-v2/internal/endpoints"
+ "regexp"
+)
+
+// Options is the endpoint resolver configuration options
+type Options struct {
+ DisableHTTPS bool
+}
+
+// Resolver SSO endpoint resolver
+type Resolver struct {
+ partitions endpoints.Partitions
+}
+
+// ResolveEndpoint resolves the service endpoint for the given region and options
+func (r *Resolver) ResolveEndpoint(region string, options Options) (endpoint aws.Endpoint, err error) {
+ if len(region) == 0 {
+ return endpoint, &aws.MissingRegionError{}
+ }
+
+ opt := endpoints.Options{
+ DisableHTTPS: options.DisableHTTPS,
+ }
+ return r.partitions.ResolveEndpoint(region, opt)
+}
+
+// New returns a new Resolver
+func New() *Resolver {
+ return &Resolver{
+ partitions: defaultPartitions,
+ }
+}
+
+var defaultPartitions = endpoints.Partitions{
+ {
+ ID: "aws",
+ Defaults: endpoints.Endpoint{
+ Hostname: "portal.sso.{region}.amazonaws.com",
+ Protocols: []string{"https"},
+ SignatureVersions: []string{"v4"},
+ },
+ RegionRegex: regexp.MustCompile("^(us|eu|ap|sa|ca|me|af)\\-\\w+\\-\\d+$"),
+ IsRegionalized: true,
+ Endpoints: endpoints.Endpoints{
+ "ap-southeast-1": endpoints.Endpoint{
+ Hostname: "portal.sso.ap-southeast-1.amazonaws.com",
+ CredentialScope: endpoints.CredentialScope{
+ Region: "ap-southeast-1",
+ },
+ },
+ "ap-southeast-2": endpoints.Endpoint{
+ Hostname: "portal.sso.ap-southeast-2.amazonaws.com",
+ CredentialScope: endpoints.CredentialScope{
+ Region: "ap-southeast-2",
+ },
+ },
+ "ca-central-1": endpoints.Endpoint{
+ Hostname: "portal.sso.ca-central-1.amazonaws.com",
+ CredentialScope: endpoints.CredentialScope{
+ Region: "ca-central-1",
+ },
+ },
+ "eu-central-1": endpoints.Endpoint{
+ Hostname: "portal.sso.eu-central-1.amazonaws.com",
+ CredentialScope: endpoints.CredentialScope{
+ Region: "eu-central-1",
+ },
+ },
+ "eu-west-1": endpoints.Endpoint{
+ Hostname: "portal.sso.eu-west-1.amazonaws.com",
+ CredentialScope: endpoints.CredentialScope{
+ Region: "eu-west-1",
+ },
+ },
+ "eu-west-2": endpoints.Endpoint{
+ Hostname: "portal.sso.eu-west-2.amazonaws.com",
+ CredentialScope: endpoints.CredentialScope{
+ Region: "eu-west-2",
+ },
+ },
+ "us-east-1": endpoints.Endpoint{
+ Hostname: "portal.sso.us-east-1.amazonaws.com",
+ CredentialScope: endpoints.CredentialScope{
+ Region: "us-east-1",
+ },
+ },
+ "us-east-2": endpoints.Endpoint{
+ Hostname: "portal.sso.us-east-2.amazonaws.com",
+ CredentialScope: endpoints.CredentialScope{
+ Region: "us-east-2",
+ },
+ },
+ "us-west-2": endpoints.Endpoint{
+ Hostname: "portal.sso.us-west-2.amazonaws.com",
+ CredentialScope: endpoints.CredentialScope{
+ Region: "us-west-2",
+ },
+ },
+ },
+ },
+ {
+ ID: "aws-cn",
+ Defaults: endpoints.Endpoint{
+ Hostname: "portal.sso.{region}.amazonaws.com.cn",
+ Protocols: []string{"https"},
+ SignatureVersions: []string{"v4"},
+ },
+ RegionRegex: regexp.MustCompile("^cn\\-\\w+\\-\\d+$"),
+ IsRegionalized: true,
+ },
+ {
+ ID: "aws-iso",
+ Defaults: endpoints.Endpoint{
+ Hostname: "portal.sso.{region}.c2s.ic.gov",
+ Protocols: []string{"https"},
+ SignatureVersions: []string{"v4"},
+ },
+ RegionRegex: regexp.MustCompile("^us\\-iso\\-\\w+\\-\\d+$"),
+ IsRegionalized: true,
+ },
+ {
+ ID: "aws-iso-b",
+ Defaults: endpoints.Endpoint{
+ Hostname: "portal.sso.{region}.sc2s.sgov.gov",
+ Protocols: []string{"https"},
+ SignatureVersions: []string{"v4"},
+ },
+ RegionRegex: regexp.MustCompile("^us\\-isob\\-\\w+\\-\\d+$"),
+ IsRegionalized: true,
+ },
+ {
+ ID: "aws-us-gov",
+ Defaults: endpoints.Endpoint{
+ Hostname: "portal.sso.{region}.amazonaws.com",
+ Protocols: []string{"https"},
+ SignatureVersions: []string{"v4"},
+ },
+ RegionRegex: regexp.MustCompile("^us\\-gov\\-\\w+\\-\\d+$"),
+ IsRegionalized: true,
+ },
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/serializers.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/serializers.go
new file mode 100644
index 000000000..ea20305b2
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/serializers.go
@@ -0,0 +1,276 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package sso
+
+import (
+ "context"
+ "fmt"
+ smithy "github.com/aws/smithy-go"
+ "github.com/aws/smithy-go/encoding/httpbinding"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+type awsRestjson1_serializeOpGetRoleCredentials struct {
+}
+
+func (*awsRestjson1_serializeOpGetRoleCredentials) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsRestjson1_serializeOpGetRoleCredentials) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*GetRoleCredentialsInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ opPath, opQuery := httpbinding.SplitURI("/federation/credentials")
+ request.URL.Path = opPath
+ if len(request.URL.RawQuery) > 0 {
+ request.URL.RawQuery = "&" + opQuery
+ } else {
+ request.URL.RawQuery = opQuery
+ }
+
+ request.Method = "GET"
+ restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if err := awsRestjson1_serializeOpHttpBindingsGetRoleCredentialsInput(input, restEncoder); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request.Request, err = restEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ return next.HandleSerialize(ctx, in)
+}
+func awsRestjson1_serializeOpHttpBindingsGetRoleCredentialsInput(v *GetRoleCredentialsInput, encoder *httpbinding.Encoder) error {
+ if v == nil {
+ return fmt.Errorf("unsupported serialization of nil %T", v)
+ }
+
+ if v.AccessToken != nil && len(*v.AccessToken) > 0 {
+ locationName := "X-Amz-Sso_bearer_token"
+ encoder.SetHeader(locationName).String(*v.AccessToken)
+ }
+
+ if v.AccountId != nil {
+ encoder.SetQuery("account_id").String(*v.AccountId)
+ }
+
+ if v.RoleName != nil {
+ encoder.SetQuery("role_name").String(*v.RoleName)
+ }
+
+ return nil
+}
+
+type awsRestjson1_serializeOpListAccountRoles struct {
+}
+
+func (*awsRestjson1_serializeOpListAccountRoles) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsRestjson1_serializeOpListAccountRoles) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*ListAccountRolesInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ opPath, opQuery := httpbinding.SplitURI("/assignment/roles")
+ request.URL.Path = opPath
+ if len(request.URL.RawQuery) > 0 {
+ request.URL.RawQuery = "&" + opQuery
+ } else {
+ request.URL.RawQuery = opQuery
+ }
+
+ request.Method = "GET"
+ restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if err := awsRestjson1_serializeOpHttpBindingsListAccountRolesInput(input, restEncoder); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request.Request, err = restEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ return next.HandleSerialize(ctx, in)
+}
+func awsRestjson1_serializeOpHttpBindingsListAccountRolesInput(v *ListAccountRolesInput, encoder *httpbinding.Encoder) error {
+ if v == nil {
+ return fmt.Errorf("unsupported serialization of nil %T", v)
+ }
+
+ if v.AccessToken != nil && len(*v.AccessToken) > 0 {
+ locationName := "X-Amz-Sso_bearer_token"
+ encoder.SetHeader(locationName).String(*v.AccessToken)
+ }
+
+ if v.AccountId != nil {
+ encoder.SetQuery("account_id").String(*v.AccountId)
+ }
+
+ if v.MaxResults != nil {
+ encoder.SetQuery("max_result").Integer(*v.MaxResults)
+ }
+
+ if v.NextToken != nil {
+ encoder.SetQuery("next_token").String(*v.NextToken)
+ }
+
+ return nil
+}
+
+type awsRestjson1_serializeOpListAccounts struct {
+}
+
+func (*awsRestjson1_serializeOpListAccounts) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsRestjson1_serializeOpListAccounts) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*ListAccountsInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ opPath, opQuery := httpbinding.SplitURI("/assignment/accounts")
+ request.URL.Path = opPath
+ if len(request.URL.RawQuery) > 0 {
+ request.URL.RawQuery = "&" + opQuery
+ } else {
+ request.URL.RawQuery = opQuery
+ }
+
+ request.Method = "GET"
+ restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if err := awsRestjson1_serializeOpHttpBindingsListAccountsInput(input, restEncoder); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request.Request, err = restEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ return next.HandleSerialize(ctx, in)
+}
+func awsRestjson1_serializeOpHttpBindingsListAccountsInput(v *ListAccountsInput, encoder *httpbinding.Encoder) error {
+ if v == nil {
+ return fmt.Errorf("unsupported serialization of nil %T", v)
+ }
+
+ if v.AccessToken != nil && len(*v.AccessToken) > 0 {
+ locationName := "X-Amz-Sso_bearer_token"
+ encoder.SetHeader(locationName).String(*v.AccessToken)
+ }
+
+ if v.MaxResults != nil {
+ encoder.SetQuery("max_result").Integer(*v.MaxResults)
+ }
+
+ if v.NextToken != nil {
+ encoder.SetQuery("next_token").String(*v.NextToken)
+ }
+
+ return nil
+}
+
+type awsRestjson1_serializeOpLogout struct {
+}
+
+func (*awsRestjson1_serializeOpLogout) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsRestjson1_serializeOpLogout) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*LogoutInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ opPath, opQuery := httpbinding.SplitURI("/logout")
+ request.URL.Path = opPath
+ if len(request.URL.RawQuery) > 0 {
+ request.URL.RawQuery = "&" + opQuery
+ } else {
+ request.URL.RawQuery = opQuery
+ }
+
+ request.Method = "POST"
+ restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if err := awsRestjson1_serializeOpHttpBindingsLogoutInput(input, restEncoder); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request.Request, err = restEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ return next.HandleSerialize(ctx, in)
+}
+func awsRestjson1_serializeOpHttpBindingsLogoutInput(v *LogoutInput, encoder *httpbinding.Encoder) error {
+ if v == nil {
+ return fmt.Errorf("unsupported serialization of nil %T", v)
+ }
+
+ if v.AccessToken != nil && len(*v.AccessToken) > 0 {
+ locationName := "X-Amz-Sso_bearer_token"
+ encoder.SetHeader(locationName).String(*v.AccessToken)
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/types/errors.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/types/errors.go
new file mode 100644
index 000000000..26b8d1b42
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/types/errors.go
@@ -0,0 +1,79 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package types
+
+import (
+ "fmt"
+ smithy "github.com/aws/smithy-go"
+)
+
+// Indicates that a problem occurred with the input to the request. For example, a
+// required parameter might be missing or out of range.
+type InvalidRequestException struct {
+ Message *string
+}
+
+func (e *InvalidRequestException) Error() string {
+ return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage())
+}
+func (e *InvalidRequestException) ErrorMessage() string {
+ if e.Message == nil {
+ return ""
+ }
+ return *e.Message
+}
+func (e *InvalidRequestException) ErrorCode() string { return "InvalidRequestException" }
+func (e *InvalidRequestException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient }
+
+// The specified resource doesn't exist.
+type ResourceNotFoundException struct {
+ Message *string
+}
+
+func (e *ResourceNotFoundException) Error() string {
+ return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage())
+}
+func (e *ResourceNotFoundException) ErrorMessage() string {
+ if e.Message == nil {
+ return ""
+ }
+ return *e.Message
+}
+func (e *ResourceNotFoundException) ErrorCode() string { return "ResourceNotFoundException" }
+func (e *ResourceNotFoundException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient }
+
+// Indicates that the request is being made too frequently and is more than what
+// the server can handle.
+type TooManyRequestsException struct {
+ Message *string
+}
+
+func (e *TooManyRequestsException) Error() string {
+ return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage())
+}
+func (e *TooManyRequestsException) ErrorMessage() string {
+ if e.Message == nil {
+ return ""
+ }
+ return *e.Message
+}
+func (e *TooManyRequestsException) ErrorCode() string { return "TooManyRequestsException" }
+func (e *TooManyRequestsException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient }
+
+// Indicates that the request is not authorized. This can happen due to an invalid
+// access token in the request.
+type UnauthorizedException struct {
+ Message *string
+}
+
+func (e *UnauthorizedException) Error() string {
+ return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage())
+}
+func (e *UnauthorizedException) ErrorMessage() string {
+ if e.Message == nil {
+ return ""
+ }
+ return *e.Message
+}
+func (e *UnauthorizedException) ErrorCode() string { return "UnauthorizedException" }
+func (e *UnauthorizedException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient }
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/types/types.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/types/types.go
new file mode 100644
index 000000000..8d43acf42
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/types/types.go
@@ -0,0 +1,52 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package types
+
+// Provides information about your AWS account.
+type AccountInfo struct {
+
+ // The identifier of the AWS account that is assigned to the user.
+ AccountId *string
+
+ // The display name of the AWS account that is assigned to the user.
+ AccountName *string
+
+ // The email address of the AWS account that is assigned to the user.
+ EmailAddress *string
+}
+
+// Provides information about the role credentials that are assigned to the user.
+type RoleCredentials struct {
+
+ // The identifier used for the temporary security credentials. For more
+ // information, see Using Temporary Security Credentials to Request Access to AWS
+ // Resources
+ // (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_use-resources.html)
+ // in the AWS IAM User Guide.
+ AccessKeyId *string
+
+ // The date on which temporary security credentials expire.
+ Expiration int64
+
+ // The key that is used to sign the request. For more information, see Using
+ // Temporary Security Credentials to Request Access to AWS Resources
+ // (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_use-resources.html)
+ // in the AWS IAM User Guide.
+ SecretAccessKey *string
+
+ // The token used for temporary credentials. For more information, see Using
+ // Temporary Security Credentials to Request Access to AWS Resources
+ // (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_use-resources.html)
+ // in the AWS IAM User Guide.
+ SessionToken *string
+}
+
+// Provides information about the role that is assigned to the user.
+type RoleInfo struct {
+
+ // The identifier of the AWS account assigned to the user.
+ AccountId *string
+
+ // The friendly name of the role that is assigned to the user.
+ RoleName *string
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/validators.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/validators.go
new file mode 100644
index 000000000..f6bf461f7
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/validators.go
@@ -0,0 +1,175 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package sso
+
+import (
+ "context"
+ "fmt"
+ smithy "github.com/aws/smithy-go"
+ "github.com/aws/smithy-go/middleware"
+)
+
+type validateOpGetRoleCredentials struct {
+}
+
+func (*validateOpGetRoleCredentials) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpGetRoleCredentials) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*GetRoleCredentialsInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpGetRoleCredentialsInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpListAccountRoles struct {
+}
+
+func (*validateOpListAccountRoles) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpListAccountRoles) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*ListAccountRolesInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpListAccountRolesInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpListAccounts struct {
+}
+
+func (*validateOpListAccounts) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpListAccounts) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*ListAccountsInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpListAccountsInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpLogout struct {
+}
+
+func (*validateOpLogout) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpLogout) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*LogoutInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpLogoutInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+func addOpGetRoleCredentialsValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpGetRoleCredentials{}, middleware.After)
+}
+
+func addOpListAccountRolesValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpListAccountRoles{}, middleware.After)
+}
+
+func addOpListAccountsValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpListAccounts{}, middleware.After)
+}
+
+func addOpLogoutValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpLogout{}, middleware.After)
+}
+
+func validateOpGetRoleCredentialsInput(v *GetRoleCredentialsInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "GetRoleCredentialsInput"}
+ if v.RoleName == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("RoleName"))
+ }
+ if v.AccountId == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("AccountId"))
+ }
+ if v.AccessToken == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("AccessToken"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpListAccountRolesInput(v *ListAccountRolesInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "ListAccountRolesInput"}
+ if v.AccessToken == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("AccessToken"))
+ }
+ if v.AccountId == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("AccountId"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpListAccountsInput(v *ListAccountsInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "ListAccountsInput"}
+ if v.AccessToken == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("AccessToken"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpLogoutInput(v *LogoutInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "LogoutInput"}
+ if v.AccessToken == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("AccessToken"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/LICENSE.txt b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/LICENSE.txt
new file mode 100644
index 000000000..d64569567
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/LICENSE.txt
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_client.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_client.go
new file mode 100644
index 000000000..0d67082f5
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_client.go
@@ -0,0 +1,358 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package sts
+
+import (
+ "context"
+ "github.com/aws/aws-sdk-go-v2/aws"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/aws/protocol/query"
+ "github.com/aws/aws-sdk-go-v2/aws/retry"
+ "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
+ awshttp "github.com/aws/aws-sdk-go-v2/aws/transport/http"
+ presignedurlcust "github.com/aws/aws-sdk-go-v2/service/internal/presigned-url"
+ smithy "github.com/aws/smithy-go"
+ "github.com/aws/smithy-go/logging"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+ "net/http"
+ "time"
+)
+
+const ServiceID = "STS"
+const ServiceAPIVersion = "2011-06-15"
+
+// Client provides the API client to make operations call for AWS Security Token
+// Service.
+type Client struct {
+ options Options
+}
+
+// New returns an initialized Client based on the functional options. Provide
+// additional functional options to further configure the behavior of the client,
+// such as changing the client's endpoint or adding custom middleware behavior.
+func New(options Options, optFns ...func(*Options)) *Client {
+ options = options.Copy()
+
+ resolveDefaultLogger(&options)
+
+ resolveRetryer(&options)
+
+ resolveHTTPClient(&options)
+
+ resolveHTTPSignerV4(&options)
+
+ resolveDefaultEndpointConfiguration(&options)
+
+ for _, fn := range optFns {
+ fn(&options)
+ }
+
+ client := &Client{
+ options: options,
+ }
+
+ return client
+}
+
+type Options struct {
+ // Set of options to modify how an operation is invoked. These apply to all
+ // operations invoked for this client. Use functional options on operation call to
+ // modify this list for per operation behavior.
+ APIOptions []func(*middleware.Stack) error
+
+ // Configures the events that will be sent to the configured logger.
+ ClientLogMode aws.ClientLogMode
+
+ // The credentials object to use when signing requests.
+ Credentials aws.CredentialsProvider
+
+ // The endpoint options to be used when attempting to resolve an endpoint.
+ EndpointOptions EndpointResolverOptions
+
+ // The service endpoint resolver.
+ EndpointResolver EndpointResolver
+
+ // Signature Version 4 (SigV4) Signer
+ HTTPSignerV4 HTTPSignerV4
+
+ // The logger writer interface to write logging messages to.
+ Logger logging.Logger
+
+ // The region to send requests to. (Required)
+ Region string
+
+ // Retryer guides how HTTP requests should be retried in case of recoverable
+ // failures. When nil the API client will use a default retryer.
+ Retryer aws.Retryer
+
+ // The HTTP client to invoke API calls with. Defaults to client's default HTTP
+ // implementation if nil.
+ HTTPClient HTTPClient
+}
+
+// WithAPIOptions returns a functional option for setting the Client's APIOptions
+// option.
+func WithAPIOptions(optFns ...func(*middleware.Stack) error) func(*Options) {
+ return func(o *Options) {
+ o.APIOptions = append(o.APIOptions, optFns...)
+ }
+}
+
+// WithEndpointResolver returns a functional option for setting the Client's
+// EndpointResolver option.
+func WithEndpointResolver(v EndpointResolver) func(*Options) {
+ return func(o *Options) {
+ o.EndpointResolver = v
+ }
+}
+
+type HTTPClient interface {
+ Do(*http.Request) (*http.Response, error)
+}
+
+// Copy creates a clone where the APIOptions list is deep copied.
+func (o Options) Copy() Options {
+ to := o
+ to.APIOptions = make([]func(*middleware.Stack) error, len(o.APIOptions))
+ copy(to.APIOptions, o.APIOptions)
+ return to
+}
+func (c *Client) invokeOperation(ctx context.Context, opID string, params interface{}, optFns []func(*Options), stackFns ...func(*middleware.Stack, Options) error) (result interface{}, metadata middleware.Metadata, err error) {
+ ctx = middleware.ClearStackValues(ctx)
+ stack := middleware.NewStack(opID, smithyhttp.NewStackRequest)
+ options := c.options.Copy()
+ for _, fn := range optFns {
+ fn(&options)
+ }
+
+ for _, fn := range stackFns {
+ if err := fn(stack, options); err != nil {
+ return nil, metadata, err
+ }
+ }
+
+ for _, fn := range options.APIOptions {
+ if err := fn(stack); err != nil {
+ return nil, metadata, err
+ }
+ }
+
+ handler := middleware.DecorateHandler(smithyhttp.NewClientHandler(options.HTTPClient), stack)
+ result, metadata, err = handler.Handle(ctx, params)
+ if err != nil {
+ err = &smithy.OperationError{
+ ServiceID: ServiceID,
+ OperationName: opID,
+ Err: err,
+ }
+ }
+ return result, metadata, err
+}
+
+func resolveDefaultLogger(o *Options) {
+ if o.Logger != nil {
+ return
+ }
+ o.Logger = logging.Nop{}
+}
+
+func addSetLoggerMiddleware(stack *middleware.Stack, o Options) error {
+ return middleware.AddSetLoggerMiddleware(stack, o.Logger)
+}
+
+// NewFromConfig returns a new client from the provided config.
+func NewFromConfig(cfg aws.Config, optFns ...func(*Options)) *Client {
+ opts := Options{
+ Region: cfg.Region,
+ HTTPClient: cfg.HTTPClient,
+ Credentials: cfg.Credentials,
+ APIOptions: cfg.APIOptions,
+ Logger: cfg.Logger,
+ ClientLogMode: cfg.ClientLogMode,
+ }
+ resolveAWSRetryerProvider(cfg, &opts)
+ resolveAWSEndpointResolver(cfg, &opts)
+ return New(opts, optFns...)
+}
+
+func resolveHTTPClient(o *Options) {
+ if o.HTTPClient != nil {
+ return
+ }
+ o.HTTPClient = awshttp.NewBuildableClient()
+}
+
+func resolveRetryer(o *Options) {
+ if o.Retryer != nil {
+ return
+ }
+ o.Retryer = retry.NewStandard()
+}
+
+func resolveAWSRetryerProvider(cfg aws.Config, o *Options) {
+ if cfg.Retryer == nil {
+ return
+ }
+ o.Retryer = cfg.Retryer()
+}
+
+func resolveAWSEndpointResolver(cfg aws.Config, o *Options) {
+ if cfg.EndpointResolver == nil {
+ return
+ }
+ o.EndpointResolver = withEndpointResolver(cfg.EndpointResolver, NewDefaultEndpointResolver())
+}
+
+func addClientUserAgent(stack *middleware.Stack) error {
+ return awsmiddleware.AddRequestUserAgentMiddleware(stack)
+}
+
+func addHTTPSignerV4Middleware(stack *middleware.Stack, o Options) error {
+ mw := v4.NewSignHTTPRequestMiddleware(v4.SignHTTPRequestMiddlewareOptions{
+ CredentialsProvider: o.Credentials,
+ Signer: o.HTTPSignerV4,
+ LogSigning: o.ClientLogMode.IsSigning(),
+ })
+ return stack.Finalize.Add(mw, middleware.After)
+}
+
+type HTTPSignerV4 interface {
+ SignHTTP(ctx context.Context, credentials aws.Credentials, r *http.Request, payloadHash string, service string, region string, signingTime time.Time, optFns ...func(*v4.SignerOptions)) error
+}
+
+func resolveHTTPSignerV4(o *Options) {
+ if o.HTTPSignerV4 != nil {
+ return
+ }
+ o.HTTPSignerV4 = newDefaultV4Signer(*o)
+}
+
+func newDefaultV4Signer(o Options) *v4.Signer {
+ return v4.NewSigner(func(so *v4.SignerOptions) {
+ so.Logger = o.Logger
+ so.LogSigning = o.ClientLogMode.IsSigning()
+ })
+}
+
+func addRetryMiddlewares(stack *middleware.Stack, o Options) error {
+ mo := retry.AddRetryMiddlewaresOptions{
+ Retryer: o.Retryer,
+ LogRetryAttempts: o.ClientLogMode.IsRetries(),
+ }
+ return retry.AddRetryMiddlewares(stack, mo)
+}
+
+func addRequestIDRetrieverMiddleware(stack *middleware.Stack) error {
+ return awsmiddleware.AddRequestIDRetrieverMiddleware(stack)
+}
+
+func addResponseErrorMiddleware(stack *middleware.Stack) error {
+ return awshttp.AddResponseErrorMiddleware(stack)
+}
+
+// HTTPPresignerV4 represents presigner interface used by presign url client
+type HTTPPresignerV4 interface {
+ PresignHTTP(
+ ctx context.Context, credentials aws.Credentials, r *http.Request,
+ payloadHash string, service string, region string, signingTime time.Time,
+ optFns ...func(*v4.SignerOptions),
+ ) (url string, signedHeader http.Header, err error)
+}
+
+// PresignOptions represents the presign client options
+type PresignOptions struct {
+
+ // ClientOptions are list of functional options to mutate client options used by
+ // the presign client.
+ ClientOptions []func(*Options)
+
+ // Presigner is the presigner used by the presign url client
+ Presigner HTTPPresignerV4
+}
+
+func (o PresignOptions) copy() PresignOptions {
+ clientOptions := make([]func(*Options), len(o.ClientOptions))
+ copy(clientOptions, o.ClientOptions)
+ o.ClientOptions = clientOptions
+ return o
+}
+
+// WithPresignClientFromClientOptions is a helper utility to retrieve a function
+// that takes PresignOption as input
+func WithPresignClientFromClientOptions(optFns ...func(*Options)) func(*PresignOptions) {
+ return withPresignClientFromClientOptions(optFns).options
+}
+
+type withPresignClientFromClientOptions []func(*Options)
+
+func (w withPresignClientFromClientOptions) options(o *PresignOptions) {
+ o.ClientOptions = append(o.ClientOptions, w...)
+}
+
+// PresignClient represents the presign url client
+type PresignClient struct {
+ client *Client
+ options PresignOptions
+}
+
+// NewPresignClient generates a presign client using provided API Client and
+// presign options
+func NewPresignClient(c *Client, optFns ...func(*PresignOptions)) *PresignClient {
+ var options PresignOptions
+ for _, fn := range optFns {
+ fn(&options)
+ }
+ if len(options.ClientOptions) != 0 {
+ c = New(c.options, options.ClientOptions...)
+ }
+
+ if options.Presigner == nil {
+ options.Presigner = newDefaultV4Signer(c.options)
+ }
+
+ return &PresignClient{
+ client: c,
+ options: options,
+ }
+}
+
+func withNopHTTPClientAPIOption(o *Options) {
+ o.HTTPClient = smithyhttp.NopClient{}
+}
+
+type presignConverter PresignOptions
+
+func (c presignConverter) convertToPresignMiddleware(stack *middleware.Stack, options Options) (err error) {
+ stack.Finalize.Clear()
+ stack.Deserialize.Clear()
+ stack.Build.Remove((*awsmiddleware.ClientRequestID)(nil).ID())
+ pmw := v4.NewPresignHTTPRequestMiddleware(v4.PresignHTTPRequestMiddlewareOptions{
+ CredentialsProvider: options.Credentials,
+ Presigner: c.Presigner,
+ LogSigning: options.ClientLogMode.IsSigning(),
+ })
+ err = stack.Finalize.Add(pmw, middleware.After)
+ if err != nil {
+ return err
+ }
+ // convert request to a GET request
+ err = query.AddAsGetRequestMiddleware(stack)
+ if err != nil {
+ return err
+ }
+ err = presignedurlcust.AddAsIsPresigingMiddleware(stack)
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+func addRequestResponseLogging(stack *middleware.Stack, o Options) error {
+ return stack.Deserialize.Add(&smithyhttp.RequestResponseLogger{
+ LogRequest: o.ClientLogMode.IsRequest(),
+ LogRequestWithBody: o.ClientLogMode.IsRequestWithBody(),
+ LogResponse: o.ClientLogMode.IsResponse(),
+ LogResponseWithBody: o.ClientLogMode.IsResponseWithBody(),
+ }, middleware.After)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRole.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRole.go
new file mode 100644
index 000000000..a37c514bc
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRole.go
@@ -0,0 +1,387 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package sts
+
+import (
+ "context"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
+ "github.com/aws/aws-sdk-go-v2/service/sts/types"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// Returns a set of temporary security credentials that you can use to access AWS
+// resources that you might not normally have access to. These temporary
+// credentials consist of an access key ID, a secret access key, and a security
+// token. Typically, you use AssumeRole within your account or for cross-account
+// access. For a comparison of AssumeRole with other API operations that produce
+// temporary credentials, see Requesting Temporary Security Credentials
+// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html)
+// and Comparing the AWS STS API operations
+// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison)
+// in the IAM User Guide. You cannot use AWS account root user credentials to call
+// AssumeRole. You must use credentials for an IAM user or an IAM role to call
+// AssumeRole. For cross-account access, imagine that you own multiple accounts and
+// need to access resources in each account. You could create long-term credentials
+// in each account to access those resources. However, managing all those
+// credentials and remembering which one can access which account can be time
+// consuming. Instead, you can create one set of long-term credentials in one
+// account. Then use temporary security credentials to access all the other
+// accounts by assuming roles in those accounts. For more information about roles,
+// see IAM Roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles.html)
+// in the IAM User Guide. Session Duration By default, the temporary security
+// credentials created by AssumeRole last for one hour. However, you can use the
+// optional DurationSeconds parameter to specify the duration of your session. You
+// can provide a value from 900 seconds (15 minutes) up to the maximum session
+// duration setting for the role. This setting can have a value from 1 hour to 12
+// hours. To learn how to view the maximum value for your role, see View the
+// Maximum Session Duration Setting for a Role
+// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session)
+// in the IAM User Guide. The maximum session duration limit applies when you use
+// the AssumeRole* API operations or the assume-role* CLI commands. However the
+// limit does not apply when you use those operations to create a console URL. For
+// more information, see Using IAM Roles
+// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html) in the IAM
+// User Guide. Permissions The temporary security credentials created by AssumeRole
+// can be used to make API calls to any AWS service with the following exception:
+// You cannot call the AWS STS GetFederationToken or GetSessionToken API
+// operations. (Optional) You can pass inline or managed session policies
+// (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
+// to this operation. You can pass a single JSON policy document to use as an
+// inline session policy. You can also specify up to 10 managed policies to use as
+// managed session policies. The plain text that you use for both inline and
+// managed session policies can't exceed 2,048 characters. Passing policies to this
+// operation returns new temporary credentials. The resulting session's permissions
+// are the intersection of the role's identity-based policy and the session
+// policies. You can use the role's temporary credentials in subsequent AWS API
+// calls to access resources in the account that owns the role. You cannot use
+// session policies to grant more permissions than those allowed by the
+// identity-based policy of the role that is being assumed. For more information,
+// see Session Policies
+// (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
+// in the IAM User Guide. To assume a role from a different account, your AWS
+// account must be trusted by the role. The trust relationship is defined in the
+// role's trust policy when the role is created. That trust policy states which
+// accounts are allowed to delegate that access to users in the account. A user who
+// wants to access a role in a different account must also have permissions that
+// are delegated from the user account administrator. The administrator must attach
+// a policy that allows the user to call AssumeRole for the ARN of the role in the
+// other account. If the user is in the same account as the role, then you can do
+// either of the following:
+//
+// * Attach a policy to the user (identical to the
+// previous user in a different account).
+//
+// * Add the user as a principal directly
+// in the role's trust policy.
+//
+// In this case, the trust policy acts as an IAM
+// resource-based policy. Users in the same account as the role do not need
+// explicit permission to assume the role. For more information about trust
+// policies and resource-based policies, see IAM Policies
+// (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html) in the
+// IAM User Guide. Tags (Optional) You can pass tag key-value pairs to your
+// session. These tags are called session tags. For more information about session
+// tags, see Passing Session Tags in STS
+// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) in the
+// IAM User Guide. An administrator must grant you the permissions necessary to
+// pass session tags. The administrator can also create granular permissions to
+// allow you to pass only specific session tags. For more information, see
+// Tutorial: Using Tags for Attribute-Based Access Control
+// (https://docs.aws.amazon.com/IAM/latest/UserGuide/tutorial_attribute-based-access-control.html)
+// in the IAM User Guide. You can set the session tags as transitive. Transitive
+// tags persist during role chaining. For more information, see Chaining Roles with
+// Session Tags
+// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_role-chaining)
+// in the IAM User Guide. Using MFA with AssumeRole (Optional) You can include
+// multi-factor authentication (MFA) information when you call AssumeRole. This is
+// useful for cross-account scenarios to ensure that the user that assumes the role
+// has been authenticated with an AWS MFA device. In that scenario, the trust
+// policy of the role being assumed includes a condition that tests for MFA
+// authentication. If the caller does not include valid MFA information, the
+// request to assume the role is denied. The condition in a trust policy that tests
+// for MFA authentication might look like the following example. "Condition":
+// {"Bool": {"aws:MultiFactorAuthPresent": true}} For more information, see
+// Configuring MFA-Protected API Access
+// (https://docs.aws.amazon.com/IAM/latest/UserGuide/MFAProtectedAPI.html) in the
+// IAM User Guide guide. To use MFA with AssumeRole, you pass values for the
+// SerialNumber and TokenCode parameters. The SerialNumber value identifies the
+// user's hardware or virtual MFA device. The TokenCode is the time-based one-time
+// password (TOTP) that the MFA device produces.
+func (c *Client) AssumeRole(ctx context.Context, params *AssumeRoleInput, optFns ...func(*Options)) (*AssumeRoleOutput, error) {
+ if params == nil {
+ params = &AssumeRoleInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "AssumeRole", params, optFns, addOperationAssumeRoleMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*AssumeRoleOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type AssumeRoleInput struct {
+
+ // The Amazon Resource Name (ARN) of the role to assume.
+ //
+ // This member is required.
+ RoleArn *string
+
+ // An identifier for the assumed role session. Use the role session name to
+ // uniquely identify a session when the same role is assumed by different
+ // principals or for different reasons. In cross-account scenarios, the role
+ // session name is visible to, and can be logged by the account that owns the role.
+ // The role session name is also used in the ARN of the assumed role principal.
+ // This means that subsequent cross-account API requests that use the temporary
+ // security credentials will expose the role session name to the external account
+ // in their AWS CloudTrail logs. The regex used to validate this parameter is a
+ // string of characters consisting of upper- and lower-case alphanumeric characters
+ // with no spaces. You can also include underscores or any of the following
+ // characters: =,.@-
+ //
+ // This member is required.
+ RoleSessionName *string
+
+ // The duration, in seconds, of the role session. The value can range from 900
+ // seconds (15 minutes) up to the maximum session duration setting for the role.
+ // This setting can have a value from 1 hour to 12 hours. If you specify a value
+ // higher than this setting, the operation fails. For example, if you specify a
+ // session duration of 12 hours, but your administrator set the maximum session
+ // duration to 6 hours, your operation fails. To learn how to view the maximum
+ // value for your role, see View the Maximum Session Duration Setting for a Role
+ // (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session)
+ // in the IAM User Guide. By default, the value is set to 3600 seconds. The
+ // DurationSeconds parameter is separate from the duration of a console session
+ // that you might request using the returned credentials. The request to the
+ // federation endpoint for a console sign-in token takes a SessionDuration
+ // parameter that specifies the maximum length of the console session. For more
+ // information, see Creating a URL that Enables Federated Users to Access the AWS
+ // Management Console
+ // (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html)
+ // in the IAM User Guide.
+ DurationSeconds *int32
+
+ // A unique identifier that might be required when you assume a role in another
+ // account. If the administrator of the account to which the role belongs provided
+ // you with an external ID, then provide that value in the ExternalId parameter.
+ // This value can be any string, such as a passphrase or account number. A
+ // cross-account role is usually set up to trust everyone in an account. Therefore,
+ // the administrator of the trusting account might send an external ID to the
+ // administrator of the trusted account. That way, only someone with the ID can
+ // assume the role, rather than everyone in the account. For more information about
+ // the external ID, see How to Use an External ID When Granting Access to Your AWS
+ // Resources to a Third Party
+ // (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-user_externalid.html)
+ // in the IAM User Guide. The regex used to validate this parameter is a string of
+ // characters consisting of upper- and lower-case alphanumeric characters with no
+ // spaces. You can also include underscores or any of the following characters:
+ // =,.@:/-
+ ExternalId *string
+
+ // An IAM policy in JSON format that you want to use as an inline session policy.
+ // This parameter is optional. Passing policies to this operation returns new
+ // temporary credentials. The resulting session's permissions are the intersection
+ // of the role's identity-based policy and the session policies. You can use the
+ // role's temporary credentials in subsequent AWS API calls to access resources in
+ // the account that owns the role. You cannot use session policies to grant more
+ // permissions than those allowed by the identity-based policy of the role that is
+ // being assumed. For more information, see Session Policies
+ // (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
+ // in the IAM User Guide. The plain text that you use for both inline and managed
+ // session policies can't exceed 2,048 characters. The JSON policy characters can
+ // be any ASCII character from the space character to the end of the valid
+ // character list (\u0020 through \u00FF). It can also include the tab (\u0009),
+ // linefeed (\u000A), and carriage return (\u000D) characters. An AWS conversion
+ // compresses the passed session policies and session tags into a packed binary
+ // format that has a separate limit. Your request can fail for this limit even if
+ // your plain text meets the other requirements. The PackedPolicySize response
+ // element indicates by percentage how close the policies and tags for your request
+ // are to the upper size limit.
+ Policy *string
+
+ // The Amazon Resource Names (ARNs) of the IAM managed policies that you want to
+ // use as managed session policies. The policies must exist in the same account as
+ // the role. This parameter is optional. You can provide up to 10 managed policy
+ // ARNs. However, the plain text that you use for both inline and managed session
+ // policies can't exceed 2,048 characters. For more information about ARNs, see
+ // Amazon Resource Names (ARNs) and AWS Service Namespaces
+ // (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) in
+ // the AWS General Reference. An AWS conversion compresses the passed session
+ // policies and session tags into a packed binary format that has a separate limit.
+ // Your request can fail for this limit even if your plain text meets the other
+ // requirements. The PackedPolicySize response element indicates by percentage how
+ // close the policies and tags for your request are to the upper size limit.
+ // Passing policies to this operation returns new temporary credentials. The
+ // resulting session's permissions are the intersection of the role's
+ // identity-based policy and the session policies. You can use the role's temporary
+ // credentials in subsequent AWS API calls to access resources in the account that
+ // owns the role. You cannot use session policies to grant more permissions than
+ // those allowed by the identity-based policy of the role that is being assumed.
+ // For more information, see Session Policies
+ // (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
+ // in the IAM User Guide.
+ PolicyArns []types.PolicyDescriptorType
+
+ // The identification number of the MFA device that is associated with the user who
+ // is making the AssumeRole call. Specify this value if the trust policy of the
+ // role being assumed includes a condition that requires MFA authentication. The
+ // value is either the serial number for a hardware device (such as GAHT12345678)
+ // or an Amazon Resource Name (ARN) for a virtual device (such as
+ // arn:aws:iam::123456789012:mfa/user). The regex used to validate this parameter
+ // is a string of characters consisting of upper- and lower-case alphanumeric
+ // characters with no spaces. You can also include underscores or any of the
+ // following characters: =,.@-
+ SerialNumber *string
+
+ // A list of session tags that you want to pass. Each session tag consists of a key
+ // name and an associated value. For more information about session tags, see
+ // Tagging AWS STS Sessions
+ // (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) in the
+ // IAM User Guide. This parameter is optional. You can pass up to 50 session tags.
+ // The plain text session tag keys can’t exceed 128 characters, and the values
+ // can’t exceed 256 characters. For these and additional limits, see IAM and STS
+ // Character Limits
+ // (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length)
+ // in the IAM User Guide. An AWS conversion compresses the passed session policies
+ // and session tags into a packed binary format that has a separate limit. Your
+ // request can fail for this limit even if your plain text meets the other
+ // requirements. The PackedPolicySize response element indicates by percentage how
+ // close the policies and tags for your request are to the upper size limit. You
+ // can pass a session tag with the same key as a tag that is already attached to
+ // the role. When you do, session tags override a role tag with the same key. Tag
+ // key–value pairs are not case sensitive, but case is preserved. This means that
+ // you cannot have separate Department and department tag keys. Assume that the
+ // role has the Department=Marketing tag and you pass the department=engineering
+ // session tag. Department and department are not saved as separate tags, and the
+ // session tag passed in the request takes precedence over the role tag.
+ // Additionally, if you used temporary credentials to perform this operation, the
+ // new session inherits any transitive session tags from the calling session. If
+ // you pass a session tag with the same key as an inherited tag, the operation
+ // fails. To view the inherited tags for a session, see the AWS CloudTrail logs.
+ // For more information, see Viewing Session Tags in CloudTrail
+ // (https://docs.aws.amazon.com/IAM/latest/UserGuide/session-tags.html#id_session-tags_ctlogs)
+ // in the IAM User Guide.
+ Tags []types.Tag
+
+ // The value provided by the MFA device, if the trust policy of the role being
+ // assumed requires MFA (that is, if the policy includes a condition that tests for
+ // MFA). If the role being assumed requires MFA and if the TokenCode value is
+ // missing or expired, the AssumeRole call returns an "access denied" error. The
+ // format for this parameter, as described by its regex pattern, is a sequence of
+ // six numeric digits.
+ TokenCode *string
+
+ // A list of keys for session tags that you want to set as transitive. If you set a
+ // tag key as transitive, the corresponding key and value passes to subsequent
+ // sessions in a role chain. For more information, see Chaining Roles with Session
+ // Tags
+ // (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_role-chaining)
+ // in the IAM User Guide. This parameter is optional. When you set session tags as
+ // transitive, the session policy and session tags packed binary limit is not
+ // affected. If you choose not to specify a transitive tag key, then no tags are
+ // passed from this session to any subsequent sessions.
+ TransitiveTagKeys []string
+}
+
+// Contains the response to a successful AssumeRole request, including temporary
+// AWS credentials that can be used to make AWS requests.
+type AssumeRoleOutput struct {
+
+ // The Amazon Resource Name (ARN) and the assumed role ID, which are identifiers
+ // that you can use to refer to the resulting temporary security credentials. For
+ // example, you can reference these credentials as a principal in a resource-based
+ // policy by using the ARN or assumed role ID. The ARN and ID include the
+ // RoleSessionName that you specified when you called AssumeRole.
+ AssumedRoleUser *types.AssumedRoleUser
+
+ // The temporary security credentials, which include an access key ID, a secret
+ // access key, and a security (or session) token. The size of the security token
+ // that STS API operations return is not fixed. We strongly recommend that you make
+ // no assumptions about the maximum size.
+ Credentials *types.Credentials
+
+ // A percentage value that indicates the packed size of the session policies and
+ // session tags combined passed in the request. The request fails if the packed
+ // size is greater than 100 percent, which means the policies and tags exceeded the
+ // allowed space.
+ PackedPolicySize *int32
+
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+}
+
+func addOperationAssumeRoleMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ err = stack.Serialize.Add(&awsAwsquery_serializeOpAssumeRole{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsAwsquery_deserializeOpAssumeRole{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
+ return err
+ }
+ if err = addRetryMiddlewares(stack, options); err != nil {
+ return err
+ }
+ if err = addHTTPSignerV4Middleware(stack, options); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addOpAssumeRoleValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opAssumeRole(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addRequestIDRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ return nil
+}
+
+func newServiceMetadataMiddleware_opAssumeRole(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ SigningName: "sts",
+ OperationName: "AssumeRole",
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRoleWithSAML.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRoleWithSAML.go
new file mode 100644
index 000000000..8e7690be9
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRoleWithSAML.go
@@ -0,0 +1,333 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package sts
+
+import (
+ "context"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/service/sts/types"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// Returns a set of temporary security credentials for users who have been
+// authenticated via a SAML authentication response. This operation provides a
+// mechanism for tying an enterprise identity store or directory to role-based AWS
+// access without user-specific credentials or configuration. For a comparison of
+// AssumeRoleWithSAML with the other API operations that produce temporary
+// credentials, see Requesting Temporary Security Credentials
+// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html)
+// and Comparing the AWS STS API operations
+// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison)
+// in the IAM User Guide. The temporary security credentials returned by this
+// operation consist of an access key ID, a secret access key, and a security
+// token. Applications can use these temporary security credentials to sign calls
+// to AWS services. Session Duration By default, the temporary security credentials
+// created by AssumeRoleWithSAML last for one hour. However, you can use the
+// optional DurationSeconds parameter to specify the duration of your session. Your
+// role session lasts for the duration that you specify, or until the time
+// specified in the SAML authentication response's SessionNotOnOrAfter value,
+// whichever is shorter. You can provide a DurationSeconds value from 900 seconds
+// (15 minutes) up to the maximum session duration setting for the role. This
+// setting can have a value from 1 hour to 12 hours. To learn how to view the
+// maximum value for your role, see View the Maximum Session Duration Setting for a
+// Role
+// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session)
+// in the IAM User Guide. The maximum session duration limit applies when you use
+// the AssumeRole* API operations or the assume-role* CLI commands. However the
+// limit does not apply when you use those operations to create a console URL. For
+// more information, see Using IAM Roles
+// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html) in the IAM
+// User Guide. Permissions The temporary security credentials created by
+// AssumeRoleWithSAML can be used to make API calls to any AWS service with the
+// following exception: you cannot call the STS GetFederationToken or
+// GetSessionToken API operations. (Optional) You can pass inline or managed
+// session policies
+// (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
+// to this operation. You can pass a single JSON policy document to use as an
+// inline session policy. You can also specify up to 10 managed policies to use as
+// managed session policies. The plain text that you use for both inline and
+// managed session policies can't exceed 2,048 characters. Passing policies to this
+// operation returns new temporary credentials. The resulting session's permissions
+// are the intersection of the role's identity-based policy and the session
+// policies. You can use the role's temporary credentials in subsequent AWS API
+// calls to access resources in the account that owns the role. You cannot use
+// session policies to grant more permissions than those allowed by the
+// identity-based policy of the role that is being assumed. For more information,
+// see Session Policies
+// (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
+// in the IAM User Guide. Calling AssumeRoleWithSAML does not require the use of
+// AWS security credentials. The identity of the caller is validated by using keys
+// in the metadata document that is uploaded for the SAML provider entity for your
+// identity provider. Calling AssumeRoleWithSAML can result in an entry in your AWS
+// CloudTrail logs. The entry includes the value in the NameID element of the SAML
+// assertion. We recommend that you use a NameIDType that is not associated with
+// any personally identifiable information (PII). For example, you could instead
+// use the persistent identifier
+// (urn:oasis:names:tc:SAML:2.0:nameid-format:persistent). Tags (Optional) You can
+// configure your IdP to pass attributes into your SAML assertion as session tags.
+// Each session tag consists of a key name and an associated value. For more
+// information about session tags, see Passing Session Tags in STS
+// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) in the
+// IAM User Guide. You can pass up to 50 session tags. The plain text session tag
+// keys can’t exceed 128 characters and the values can’t exceed 256 characters. For
+// these and additional limits, see IAM and STS Character Limits
+// (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length)
+// in the IAM User Guide. An AWS conversion compresses the passed session policies
+// and session tags into a packed binary format that has a separate limit. Your
+// request can fail for this limit even if your plain text meets the other
+// requirements. The PackedPolicySize response element indicates by percentage how
+// close the policies and tags for your request are to the upper size limit. You
+// can pass a session tag with the same key as a tag that is attached to the role.
+// When you do, session tags override the role's tags with the same key. An
+// administrator must grant you the permissions necessary to pass session tags. The
+// administrator can also create granular permissions to allow you to pass only
+// specific session tags. For more information, see Tutorial: Using Tags for
+// Attribute-Based Access Control
+// (https://docs.aws.amazon.com/IAM/latest/UserGuide/tutorial_attribute-based-access-control.html)
+// in the IAM User Guide. You can set the session tags as transitive. Transitive
+// tags persist during role chaining. For more information, see Chaining Roles with
+// Session Tags
+// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_role-chaining)
+// in the IAM User Guide. SAML Configuration Before your application can call
+// AssumeRoleWithSAML, you must configure your SAML identity provider (IdP) to
+// issue the claims required by AWS. Additionally, you must use AWS Identity and
+// Access Management (IAM) to create a SAML provider entity in your AWS account
+// that represents your identity provider. You must also create an IAM role that
+// specifies this SAML provider in its trust policy. For more information, see the
+// following resources:
+//
+// * About SAML 2.0-based Federation
+// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_saml.html)
+// in the IAM User Guide.
+//
+// * Creating SAML Identity Providers
+// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_create_saml.html)
+// in the IAM User Guide.
+//
+// * Configuring a Relying Party and Claims
+// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_create_saml_relying-party.html)
+// in the IAM User Guide.
+//
+// * Creating a Role for SAML 2.0 Federation
+// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-idp_saml.html)
+// in the IAM User Guide.
+func (c *Client) AssumeRoleWithSAML(ctx context.Context, params *AssumeRoleWithSAMLInput, optFns ...func(*Options)) (*AssumeRoleWithSAMLOutput, error) {
+ if params == nil {
+ params = &AssumeRoleWithSAMLInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "AssumeRoleWithSAML", params, optFns, addOperationAssumeRoleWithSAMLMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*AssumeRoleWithSAMLOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type AssumeRoleWithSAMLInput struct {
+
+ // The Amazon Resource Name (ARN) of the SAML provider in IAM that describes the
+ // IdP.
+ //
+ // This member is required.
+ PrincipalArn *string
+
+ // The Amazon Resource Name (ARN) of the role that the caller is assuming.
+ //
+ // This member is required.
+ RoleArn *string
+
+ // The base-64 encoded SAML authentication response provided by the IdP. For more
+ // information, see Configuring a Relying Party and Adding Claims
+ // (https://docs.aws.amazon.com/IAM/latest/UserGuide/create-role-saml-IdP-tasks.html)
+ // in the IAM User Guide.
+ //
+ // This member is required.
+ SAMLAssertion *string
+
+ // The duration, in seconds, of the role session. Your role session lasts for the
+ // duration that you specify for the DurationSeconds parameter, or until the time
+ // specified in the SAML authentication response's SessionNotOnOrAfter value,
+ // whichever is shorter. You can provide a DurationSeconds value from 900 seconds
+ // (15 minutes) up to the maximum session duration setting for the role. This
+ // setting can have a value from 1 hour to 12 hours. If you specify a value higher
+ // than this setting, the operation fails. For example, if you specify a session
+ // duration of 12 hours, but your administrator set the maximum session duration to
+ // 6 hours, your operation fails. To learn how to view the maximum value for your
+ // role, see View the Maximum Session Duration Setting for a Role
+ // (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session)
+ // in the IAM User Guide. By default, the value is set to 3600 seconds. The
+ // DurationSeconds parameter is separate from the duration of a console session
+ // that you might request using the returned credentials. The request to the
+ // federation endpoint for a console sign-in token takes a SessionDuration
+ // parameter that specifies the maximum length of the console session. For more
+ // information, see Creating a URL that Enables Federated Users to Access the AWS
+ // Management Console
+ // (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html)
+ // in the IAM User Guide.
+ DurationSeconds *int32
+
+ // An IAM policy in JSON format that you want to use as an inline session policy.
+ // This parameter is optional. Passing policies to this operation returns new
+ // temporary credentials. The resulting session's permissions are the intersection
+ // of the role's identity-based policy and the session policies. You can use the
+ // role's temporary credentials in subsequent AWS API calls to access resources in
+ // the account that owns the role. You cannot use session policies to grant more
+ // permissions than those allowed by the identity-based policy of the role that is
+ // being assumed. For more information, see Session Policies
+ // (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
+ // in the IAM User Guide. The plain text that you use for both inline and managed
+ // session policies can't exceed 2,048 characters. The JSON policy characters can
+ // be any ASCII character from the space character to the end of the valid
+ // character list (\u0020 through \u00FF). It can also include the tab (\u0009),
+ // linefeed (\u000A), and carriage return (\u000D) characters. An AWS conversion
+ // compresses the passed session policies and session tags into a packed binary
+ // format that has a separate limit. Your request can fail for this limit even if
+ // your plain text meets the other requirements. The PackedPolicySize response
+ // element indicates by percentage how close the policies and tags for your request
+ // are to the upper size limit.
+ Policy *string
+
+ // The Amazon Resource Names (ARNs) of the IAM managed policies that you want to
+ // use as managed session policies. The policies must exist in the same account as
+ // the role. This parameter is optional. You can provide up to 10 managed policy
+ // ARNs. However, the plain text that you use for both inline and managed session
+ // policies can't exceed 2,048 characters. For more information about ARNs, see
+ // Amazon Resource Names (ARNs) and AWS Service Namespaces
+ // (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) in
+ // the AWS General Reference. An AWS conversion compresses the passed session
+ // policies and session tags into a packed binary format that has a separate limit.
+ // Your request can fail for this limit even if your plain text meets the other
+ // requirements. The PackedPolicySize response element indicates by percentage how
+ // close the policies and tags for your request are to the upper size limit.
+ // Passing policies to this operation returns new temporary credentials. The
+ // resulting session's permissions are the intersection of the role's
+ // identity-based policy and the session policies. You can use the role's temporary
+ // credentials in subsequent AWS API calls to access resources in the account that
+ // owns the role. You cannot use session policies to grant more permissions than
+ // those allowed by the identity-based policy of the role that is being assumed.
+ // For more information, see Session Policies
+ // (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
+ // in the IAM User Guide.
+ PolicyArns []types.PolicyDescriptorType
+}
+
+// Contains the response to a successful AssumeRoleWithSAML request, including
+// temporary AWS credentials that can be used to make AWS requests.
+type AssumeRoleWithSAMLOutput struct {
+
+ // The identifiers for the temporary security credentials that the operation
+ // returns.
+ AssumedRoleUser *types.AssumedRoleUser
+
+ // The value of the Recipient attribute of the SubjectConfirmationData element of
+ // the SAML assertion.
+ Audience *string
+
+ // The temporary security credentials, which include an access key ID, a secret
+ // access key, and a security (or session) token. The size of the security token
+ // that STS API operations return is not fixed. We strongly recommend that you make
+ // no assumptions about the maximum size.
+ Credentials *types.Credentials
+
+ // The value of the Issuer element of the SAML assertion.
+ Issuer *string
+
+ // A hash value based on the concatenation of the Issuer response value, the AWS
+ // account ID, and the friendly name (the last part of the ARN) of the SAML
+ // provider in IAM. The combination of NameQualifier and Subject can be used to
+ // uniquely identify a federated user. The following pseudocode shows how the hash
+ // value is calculated: BASE64 ( SHA1 ( "https://example.com/saml" + "123456789012"
+ // + "/MySAMLIdP" ) )
+ NameQualifier *string
+
+ // A percentage value that indicates the packed size of the session policies and
+ // session tags combined passed in the request. The request fails if the packed
+ // size is greater than 100 percent, which means the policies and tags exceeded the
+ // allowed space.
+ PackedPolicySize *int32
+
+ // The value of the NameID element in the Subject element of the SAML assertion.
+ Subject *string
+
+ // The format of the name ID, as defined by the Format attribute in the NameID
+ // element of the SAML assertion. Typical examples of the format are transient or
+ // persistent. If the format includes the prefix
+ // urn:oasis:names:tc:SAML:2.0:nameid-format, that prefix is removed. For example,
+ // urn:oasis:names:tc:SAML:2.0:nameid-format:transient is returned as transient. If
+ // the format includes any other prefix, the format is returned with no
+ // modifications.
+ SubjectType *string
+
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+}
+
+func addOperationAssumeRoleWithSAMLMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ err = stack.Serialize.Add(&awsAwsquery_serializeOpAssumeRoleWithSAML{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsAwsquery_deserializeOpAssumeRoleWithSAML{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addRetryMiddlewares(stack, options); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addOpAssumeRoleWithSAMLValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opAssumeRoleWithSAML(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addRequestIDRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ return nil
+}
+
+func newServiceMetadataMiddleware_opAssumeRoleWithSAML(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ SigningName: "sts",
+ OperationName: "AssumeRoleWithSAML",
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRoleWithWebIdentity.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRoleWithWebIdentity.go
new file mode 100644
index 000000000..aaaa8a041
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRoleWithWebIdentity.go
@@ -0,0 +1,359 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package sts
+
+import (
+ "context"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/service/sts/types"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// Returns a set of temporary security credentials for users who have been
+// authenticated in a mobile or web application with a web identity provider.
+// Example providers include Amazon Cognito, Login with Amazon, Facebook, Google,
+// or any OpenID Connect-compatible identity provider. For mobile applications, we
+// recommend that you use Amazon Cognito. You can use Amazon Cognito with the AWS
+// SDK for iOS Developer Guide (http://aws.amazon.com/sdkforios/) and the AWS SDK
+// for Android Developer Guide (http://aws.amazon.com/sdkforandroid/) to uniquely
+// identify a user. You can also supply the user with a consistent identity
+// throughout the lifetime of an application. To learn more about Amazon Cognito,
+// see Amazon Cognito Overview
+// (https://docs.aws.amazon.com/mobile/sdkforandroid/developerguide/cognito-auth.html#d0e840)
+// in AWS SDK for Android Developer Guide and Amazon Cognito Overview
+// (https://docs.aws.amazon.com/mobile/sdkforios/developerguide/cognito-auth.html#d0e664)
+// in the AWS SDK for iOS Developer Guide. Calling AssumeRoleWithWebIdentity does
+// not require the use of AWS security credentials. Therefore, you can distribute
+// an application (for example, on mobile devices) that requests temporary security
+// credentials without including long-term AWS credentials in the application. You
+// also don't need to deploy server-based proxy services that use long-term AWS
+// credentials. Instead, the identity of the caller is validated by using a token
+// from the web identity provider. For a comparison of AssumeRoleWithWebIdentity
+// with the other API operations that produce temporary credentials, see Requesting
+// Temporary Security Credentials
+// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html)
+// and Comparing the AWS STS API operations
+// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison)
+// in the IAM User Guide. The temporary security credentials returned by this API
+// consist of an access key ID, a secret access key, and a security token.
+// Applications can use these temporary security credentials to sign calls to AWS
+// service API operations. Session Duration By default, the temporary security
+// credentials created by AssumeRoleWithWebIdentity last for one hour. However, you
+// can use the optional DurationSeconds parameter to specify the duration of your
+// session. You can provide a value from 900 seconds (15 minutes) up to the maximum
+// session duration setting for the role. This setting can have a value from 1 hour
+// to 12 hours. To learn how to view the maximum value for your role, see View the
+// Maximum Session Duration Setting for a Role
+// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session)
+// in the IAM User Guide. The maximum session duration limit applies when you use
+// the AssumeRole* API operations or the assume-role* CLI commands. However the
+// limit does not apply when you use those operations to create a console URL. For
+// more information, see Using IAM Roles
+// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html) in the IAM
+// User Guide. Permissions The temporary security credentials created by
+// AssumeRoleWithWebIdentity can be used to make API calls to any AWS service with
+// the following exception: you cannot call the STS GetFederationToken or
+// GetSessionToken API operations. (Optional) You can pass inline or managed
+// session policies
+// (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
+// to this operation. You can pass a single JSON policy document to use as an
+// inline session policy. You can also specify up to 10 managed policies to use as
+// managed session policies. The plain text that you use for both inline and
+// managed session policies can't exceed 2,048 characters. Passing policies to this
+// operation returns new temporary credentials. The resulting session's permissions
+// are the intersection of the role's identity-based policy and the session
+// policies. You can use the role's temporary credentials in subsequent AWS API
+// calls to access resources in the account that owns the role. You cannot use
+// session policies to grant more permissions than those allowed by the
+// identity-based policy of the role that is being assumed. For more information,
+// see Session Policies
+// (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
+// in the IAM User Guide. Tags (Optional) You can configure your IdP to pass
+// attributes into your web identity token as session tags. Each session tag
+// consists of a key name and an associated value. For more information about
+// session tags, see Passing Session Tags in STS
+// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) in the
+// IAM User Guide. You can pass up to 50 session tags. The plain text session tag
+// keys can’t exceed 128 characters and the values can’t exceed 256 characters. For
+// these and additional limits, see IAM and STS Character Limits
+// (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length)
+// in the IAM User Guide. An AWS conversion compresses the passed session policies
+// and session tags into a packed binary format that has a separate limit. Your
+// request can fail for this limit even if your plain text meets the other
+// requirements. The PackedPolicySize response element indicates by percentage how
+// close the policies and tags for your request are to the upper size limit. You
+// can pass a session tag with the same key as a tag that is attached to the role.
+// When you do, the session tag overrides the role tag with the same key. An
+// administrator must grant you the permissions necessary to pass session tags. The
+// administrator can also create granular permissions to allow you to pass only
+// specific session tags. For more information, see Tutorial: Using Tags for
+// Attribute-Based Access Control
+// (https://docs.aws.amazon.com/IAM/latest/UserGuide/tutorial_attribute-based-access-control.html)
+// in the IAM User Guide. You can set the session tags as transitive. Transitive
+// tags persist during role chaining. For more information, see Chaining Roles with
+// Session Tags
+// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_role-chaining)
+// in the IAM User Guide. Identities Before your application can call
+// AssumeRoleWithWebIdentity, you must have an identity token from a supported
+// identity provider and create a role that the application can assume. The role
+// that your application assumes must trust the identity provider that is
+// associated with the identity token. In other words, the identity provider must
+// be specified in the role's trust policy. Calling AssumeRoleWithWebIdentity can
+// result in an entry in your AWS CloudTrail logs. The entry includes the Subject
+// (http://openid.net/specs/openid-connect-core-1_0.html#Claims) of the provided
+// Web Identity Token. We recommend that you avoid using any personally
+// identifiable information (PII) in this field. For example, you could instead use
+// a GUID or a pairwise identifier, as suggested in the OIDC specification
+// (http://openid.net/specs/openid-connect-core-1_0.html#SubjectIDTypes). For more
+// information about how to use web identity federation and the
+// AssumeRoleWithWebIdentity API, see the following resources:
+//
+// * Using Web
+// Identity Federation API Operations for Mobile Apps
+// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_oidc_manual.html)
+// and Federation Through a Web-based Identity Provider
+// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity).
+//
+// *
+// Web Identity Federation Playground
+// (https://aws.amazon.com/blogs/aws/the-aws-web-identity-federation-playground/).
+// Walk through the process of authenticating through Login with Amazon, Facebook,
+// or Google, getting temporary security credentials, and then using those
+// credentials to make a request to AWS.
+//
+// * AWS SDK for iOS Developer Guide
+// (http://aws.amazon.com/sdkforios/) and AWS SDK for Android Developer Guide
+// (http://aws.amazon.com/sdkforandroid/). These toolkits contain sample apps that
+// show how to invoke the identity providers. The toolkits then show how to use the
+// information from these providers to get and use temporary security
+// credentials.
+//
+// * Web Identity Federation with Mobile Applications
+// (http://aws.amazon.com/articles/web-identity-federation-with-mobile-applications).
+// This article discusses web identity federation and shows an example of how to
+// use web identity federation to get access to content in Amazon S3.
+func (c *Client) AssumeRoleWithWebIdentity(ctx context.Context, params *AssumeRoleWithWebIdentityInput, optFns ...func(*Options)) (*AssumeRoleWithWebIdentityOutput, error) {
+ if params == nil {
+ params = &AssumeRoleWithWebIdentityInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "AssumeRoleWithWebIdentity", params, optFns, addOperationAssumeRoleWithWebIdentityMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*AssumeRoleWithWebIdentityOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type AssumeRoleWithWebIdentityInput struct {
+
+ // The Amazon Resource Name (ARN) of the role that the caller is assuming.
+ //
+ // This member is required.
+ RoleArn *string
+
+ // An identifier for the assumed role session. Typically, you pass the name or
+ // identifier that is associated with the user who is using your application. That
+ // way, the temporary security credentials that your application will use are
+ // associated with that user. This session name is included as part of the ARN and
+ // assumed role ID in the AssumedRoleUser response element. The regex used to
+ // validate this parameter is a string of characters consisting of upper- and
+ // lower-case alphanumeric characters with no spaces. You can also include
+ // underscores or any of the following characters: =,.@-
+ //
+ // This member is required.
+ RoleSessionName *string
+
+ // The OAuth 2.0 access token or OpenID Connect ID token that is provided by the
+ // identity provider. Your application must get this token by authenticating the
+ // user who is using your application with a web identity provider before the
+ // application makes an AssumeRoleWithWebIdentity call.
+ //
+ // This member is required.
+ WebIdentityToken *string
+
+ // The duration, in seconds, of the role session. The value can range from 900
+ // seconds (15 minutes) up to the maximum session duration setting for the role.
+ // This setting can have a value from 1 hour to 12 hours. If you specify a value
+ // higher than this setting, the operation fails. For example, if you specify a
+ // session duration of 12 hours, but your administrator set the maximum session
+ // duration to 6 hours, your operation fails. To learn how to view the maximum
+ // value for your role, see View the Maximum Session Duration Setting for a Role
+ // (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session)
+ // in the IAM User Guide. By default, the value is set to 3600 seconds. The
+ // DurationSeconds parameter is separate from the duration of a console session
+ // that you might request using the returned credentials. The request to the
+ // federation endpoint for a console sign-in token takes a SessionDuration
+ // parameter that specifies the maximum length of the console session. For more
+ // information, see Creating a URL that Enables Federated Users to Access the AWS
+ // Management Console
+ // (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html)
+ // in the IAM User Guide.
+ DurationSeconds *int32
+
+ // An IAM policy in JSON format that you want to use as an inline session policy.
+ // This parameter is optional. Passing policies to this operation returns new
+ // temporary credentials. The resulting session's permissions are the intersection
+ // of the role's identity-based policy and the session policies. You can use the
+ // role's temporary credentials in subsequent AWS API calls to access resources in
+ // the account that owns the role. You cannot use session policies to grant more
+ // permissions than those allowed by the identity-based policy of the role that is
+ // being assumed. For more information, see Session Policies
+ // (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
+ // in the IAM User Guide. The plain text that you use for both inline and managed
+ // session policies can't exceed 2,048 characters. The JSON policy characters can
+ // be any ASCII character from the space character to the end of the valid
+ // character list (\u0020 through \u00FF). It can also include the tab (\u0009),
+ // linefeed (\u000A), and carriage return (\u000D) characters. An AWS conversion
+ // compresses the passed session policies and session tags into a packed binary
+ // format that has a separate limit. Your request can fail for this limit even if
+ // your plain text meets the other requirements. The PackedPolicySize response
+ // element indicates by percentage how close the policies and tags for your request
+ // are to the upper size limit.
+ Policy *string
+
+ // The Amazon Resource Names (ARNs) of the IAM managed policies that you want to
+ // use as managed session policies. The policies must exist in the same account as
+ // the role. This parameter is optional. You can provide up to 10 managed policy
+ // ARNs. However, the plain text that you use for both inline and managed session
+ // policies can't exceed 2,048 characters. For more information about ARNs, see
+ // Amazon Resource Names (ARNs) and AWS Service Namespaces
+ // (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) in
+ // the AWS General Reference. An AWS conversion compresses the passed session
+ // policies and session tags into a packed binary format that has a separate limit.
+ // Your request can fail for this limit even if your plain text meets the other
+ // requirements. The PackedPolicySize response element indicates by percentage how
+ // close the policies and tags for your request are to the upper size limit.
+ // Passing policies to this operation returns new temporary credentials. The
+ // resulting session's permissions are the intersection of the role's
+ // identity-based policy and the session policies. You can use the role's temporary
+ // credentials in subsequent AWS API calls to access resources in the account that
+ // owns the role. You cannot use session policies to grant more permissions than
+ // those allowed by the identity-based policy of the role that is being assumed.
+ // For more information, see Session Policies
+ // (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
+ // in the IAM User Guide.
+ PolicyArns []types.PolicyDescriptorType
+
+ // The fully qualified host component of the domain name of the identity provider.
+ // Specify this value only for OAuth 2.0 access tokens. Currently www.amazon.com
+ // and graph.facebook.com are the only supported identity providers for OAuth 2.0
+ // access tokens. Do not include URL schemes and port numbers. Do not specify this
+ // value for OpenID Connect ID tokens.
+ ProviderId *string
+}
+
+// Contains the response to a successful AssumeRoleWithWebIdentity request,
+// including temporary AWS credentials that can be used to make AWS requests.
+type AssumeRoleWithWebIdentityOutput struct {
+
+ // The Amazon Resource Name (ARN) and the assumed role ID, which are identifiers
+ // that you can use to refer to the resulting temporary security credentials. For
+ // example, you can reference these credentials as a principal in a resource-based
+ // policy by using the ARN or assumed role ID. The ARN and ID include the
+ // RoleSessionName that you specified when you called AssumeRole.
+ AssumedRoleUser *types.AssumedRoleUser
+
+ // The intended audience (also known as client ID) of the web identity token. This
+ // is traditionally the client identifier issued to the application that requested
+ // the web identity token.
+ Audience *string
+
+ // The temporary security credentials, which include an access key ID, a secret
+ // access key, and a security token. The size of the security token that STS API
+ // operations return is not fixed. We strongly recommend that you make no
+ // assumptions about the maximum size.
+ Credentials *types.Credentials
+
+ // A percentage value that indicates the packed size of the session policies and
+ // session tags combined passed in the request. The request fails if the packed
+ // size is greater than 100 percent, which means the policies and tags exceeded the
+ // allowed space.
+ PackedPolicySize *int32
+
+ // The issuing authority of the web identity token presented. For OpenID Connect ID
+ // tokens, this contains the value of the iss field. For OAuth 2.0 access tokens,
+ // this contains the value of the ProviderId parameter that was passed in the
+ // AssumeRoleWithWebIdentity request.
+ Provider *string
+
+ // The unique user identifier that is returned by the identity provider. This
+ // identifier is associated with the WebIdentityToken that was submitted with the
+ // AssumeRoleWithWebIdentity call. The identifier is typically unique to the user
+ // and the application that acquired the WebIdentityToken (pairwise identifier).
+ // For OpenID Connect ID tokens, this field contains the value returned by the
+ // identity provider as the token's sub (Subject) claim.
+ SubjectFromWebIdentityToken *string
+
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+}
+
+func addOperationAssumeRoleWithWebIdentityMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ err = stack.Serialize.Add(&awsAwsquery_serializeOpAssumeRoleWithWebIdentity{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsAwsquery_deserializeOpAssumeRoleWithWebIdentity{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addRetryMiddlewares(stack, options); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addOpAssumeRoleWithWebIdentityValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opAssumeRoleWithWebIdentity(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addRequestIDRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ return nil
+}
+
+func newServiceMetadataMiddleware_opAssumeRoleWithWebIdentity(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ SigningName: "sts",
+ OperationName: "AssumeRoleWithWebIdentity",
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_DecodeAuthorizationMessage.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_DecodeAuthorizationMessage.go
new file mode 100644
index 000000000..a6069be32
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_DecodeAuthorizationMessage.go
@@ -0,0 +1,148 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package sts
+
+import (
+ "context"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// Decodes additional information about the authorization status of a request from
+// an encoded message returned in response to an AWS request. For example, if a
+// user is not authorized to perform an operation that he or she has requested, the
+// request returns a Client.UnauthorizedOperation response (an HTTP 403 response).
+// Some AWS operations additionally return an encoded message that can provide
+// details about this authorization failure. Only certain AWS operations return an
+// encoded authorization message. The documentation for an individual operation
+// indicates whether that operation returns an encoded message in addition to
+// returning an HTTP code. The message is encoded because the details of the
+// authorization status can constitute privileged information that the user who
+// requested the operation should not see. To decode an authorization status
+// message, a user must be granted permissions via an IAM policy to request the
+// DecodeAuthorizationMessage (sts:DecodeAuthorizationMessage) action. The decoded
+// message includes the following type of information:
+//
+// * Whether the request was
+// denied due to an explicit deny or due to the absence of an explicit allow. For
+// more information, see Determining Whether a Request is Allowed or Denied
+// (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_evaluation-logic.html#policy-eval-denyallow)
+// in the IAM User Guide.
+//
+// * The principal who made the request.
+//
+// * The requested
+// action.
+//
+// * The requested resource.
+//
+// * The values of condition keys in the
+// context of the user's request.
+func (c *Client) DecodeAuthorizationMessage(ctx context.Context, params *DecodeAuthorizationMessageInput, optFns ...func(*Options)) (*DecodeAuthorizationMessageOutput, error) {
+ if params == nil {
+ params = &DecodeAuthorizationMessageInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "DecodeAuthorizationMessage", params, optFns, addOperationDecodeAuthorizationMessageMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*DecodeAuthorizationMessageOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type DecodeAuthorizationMessageInput struct {
+
+ // The encoded message that was returned with the response.
+ //
+ // This member is required.
+ EncodedMessage *string
+}
+
+// A document that contains additional information about the authorization status
+// of a request from an encoded message that is returned in response to an AWS
+// request.
+type DecodeAuthorizationMessageOutput struct {
+
+ // An XML document that contains the decoded message.
+ DecodedMessage *string
+
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+}
+
+func addOperationDecodeAuthorizationMessageMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ err = stack.Serialize.Add(&awsAwsquery_serializeOpDecodeAuthorizationMessage{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsAwsquery_deserializeOpDecodeAuthorizationMessage{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
+ return err
+ }
+ if err = addRetryMiddlewares(stack, options); err != nil {
+ return err
+ }
+ if err = addHTTPSignerV4Middleware(stack, options); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addOpDecodeAuthorizationMessageValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDecodeAuthorizationMessage(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addRequestIDRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ return nil
+}
+
+func newServiceMetadataMiddleware_opDecodeAuthorizationMessage(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ SigningName: "sts",
+ OperationName: "DecodeAuthorizationMessage",
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetAccessKeyInfo.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetAccessKeyInfo.go
new file mode 100644
index 000000000..0a537cf14
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetAccessKeyInfo.go
@@ -0,0 +1,137 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package sts
+
+import (
+ "context"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// Returns the account identifier for the specified access key ID. Access keys
+// consist of two parts: an access key ID (for example, AKIAIOSFODNN7EXAMPLE) and a
+// secret access key (for example, wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY). For
+// more information about access keys, see Managing Access Keys for IAM Users
+// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_access-keys.html)
+// in the IAM User Guide. When you pass an access key ID to this operation, it
+// returns the ID of the AWS account to which the keys belong. Access key IDs
+// beginning with AKIA are long-term credentials for an IAM user or the AWS account
+// root user. Access key IDs beginning with ASIA are temporary credentials that are
+// created using STS operations. If the account in the response belongs to you, you
+// can sign in as the root user and review your root user access keys. Then, you
+// can pull a credentials report
+// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_getting-report.html)
+// to learn which IAM user owns the keys. To learn who requested the temporary
+// credentials for an ASIA access key, view the STS events in your CloudTrail logs
+// (https://docs.aws.amazon.com/IAM/latest/UserGuide/cloudtrail-integration.html)
+// in the IAM User Guide. This operation does not indicate the state of the access
+// key. The key might be active, inactive, or deleted. Active keys might not have
+// permissions to perform an operation. Providing a deleted access key might return
+// an error that the key doesn't exist.
+func (c *Client) GetAccessKeyInfo(ctx context.Context, params *GetAccessKeyInfoInput, optFns ...func(*Options)) (*GetAccessKeyInfoOutput, error) {
+ if params == nil {
+ params = &GetAccessKeyInfoInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "GetAccessKeyInfo", params, optFns, addOperationGetAccessKeyInfoMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*GetAccessKeyInfoOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type GetAccessKeyInfoInput struct {
+
+ // The identifier of an access key. This parameter allows (through its regex
+ // pattern) a string of characters that can consist of any upper- or lowercase
+ // letter or digit.
+ //
+ // This member is required.
+ AccessKeyId *string
+}
+
+type GetAccessKeyInfoOutput struct {
+
+ // The number used to identify the AWS account.
+ Account *string
+
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+}
+
+func addOperationGetAccessKeyInfoMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ err = stack.Serialize.Add(&awsAwsquery_serializeOpGetAccessKeyInfo{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsAwsquery_deserializeOpGetAccessKeyInfo{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
+ return err
+ }
+ if err = addRetryMiddlewares(stack, options); err != nil {
+ return err
+ }
+ if err = addHTTPSignerV4Middleware(stack, options); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addOpGetAccessKeyInfoValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetAccessKeyInfo(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addRequestIDRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ return nil
+}
+
+func newServiceMetadataMiddleware_opGetAccessKeyInfo(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ SigningName: "sts",
+ OperationName: "GetAccessKeyInfo",
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetCallerIdentity.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetCallerIdentity.go
new file mode 100644
index 000000000..6f4a73e7e
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetCallerIdentity.go
@@ -0,0 +1,153 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package sts
+
+import (
+ "context"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// Returns details about the IAM user or role whose credentials are used to call
+// the operation. No permissions are required to perform this operation. If an
+// administrator adds a policy to your IAM user or role that explicitly denies
+// access to the sts:GetCallerIdentity action, you can still perform this
+// operation. Permissions are not required because the same information is returned
+// when an IAM user or role is denied access. To view an example response, see I Am
+// Not Authorized to Perform: iam:DeleteVirtualMFADevice
+// (https://docs.aws.amazon.com/IAM/latest/UserGuide/troubleshoot_general.html#troubleshoot_general_access-denied-delete-mfa)
+// in the IAM User Guide.
+func (c *Client) GetCallerIdentity(ctx context.Context, params *GetCallerIdentityInput, optFns ...func(*Options)) (*GetCallerIdentityOutput, error) {
+ if params == nil {
+ params = &GetCallerIdentityInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "GetCallerIdentity", params, optFns, addOperationGetCallerIdentityMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*GetCallerIdentityOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type GetCallerIdentityInput struct {
+}
+
+// Contains the response to a successful GetCallerIdentity request, including
+// information about the entity making the request.
+type GetCallerIdentityOutput struct {
+
+ // The AWS account ID number of the account that owns or contains the calling
+ // entity.
+ Account *string
+
+ // The AWS ARN associated with the calling entity.
+ Arn *string
+
+ // The unique identifier of the calling entity. The exact value depends on the type
+ // of entity that is making the call. The values returned are those listed in the
+ // aws:userid column in the Principal table
+ // (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_variables.html#principaltable)
+ // found on the Policy Variables reference page in the IAM User Guide.
+ UserId *string
+
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+}
+
+func addOperationGetCallerIdentityMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ err = stack.Serialize.Add(&awsAwsquery_serializeOpGetCallerIdentity{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsAwsquery_deserializeOpGetCallerIdentity{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
+ return err
+ }
+ if err = addRetryMiddlewares(stack, options); err != nil {
+ return err
+ }
+ if err = addHTTPSignerV4Middleware(stack, options); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetCallerIdentity(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addRequestIDRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ return nil
+}
+
+func newServiceMetadataMiddleware_opGetCallerIdentity(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ SigningName: "sts",
+ OperationName: "GetCallerIdentity",
+ }
+}
+
+// PresignGetCallerIdentity is used to generate a presigned HTTP Request which
+// contains presigned URL, signed headers and HTTP method used.
+func (c *PresignClient) PresignGetCallerIdentity(ctx context.Context, params *GetCallerIdentityInput, optFns ...func(*PresignOptions)) (*v4.PresignedHTTPRequest, error) {
+ if params == nil {
+ params = &GetCallerIdentityInput{}
+ }
+ options := c.options.copy()
+ for _, fn := range optFns {
+ fn(&options)
+ }
+ clientOptFns := append(options.ClientOptions, withNopHTTPClientAPIOption)
+
+ result, _, err := c.client.invokeOperation(ctx, "GetCallerIdentity", params, clientOptFns,
+ addOperationGetCallerIdentityMiddlewares,
+ presignConverter(options).convertToPresignMiddleware,
+ )
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*v4.PresignedHTTPRequest)
+ return out, nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetFederationToken.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetFederationToken.go
new file mode 100644
index 000000000..415352578
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetFederationToken.go
@@ -0,0 +1,310 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package sts
+
+import (
+ "context"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
+ "github.com/aws/aws-sdk-go-v2/service/sts/types"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// Returns a set of temporary security credentials (consisting of an access key ID,
+// a secret access key, and a security token) for a federated user. A typical use
+// is in a proxy application that gets temporary security credentials on behalf of
+// distributed applications inside a corporate network. You must call the
+// GetFederationToken operation using the long-term security credentials of an IAM
+// user. As a result, this call is appropriate in contexts where those credentials
+// can be safely stored, usually in a server-based application. For a comparison of
+// GetFederationToken with the other API operations that produce temporary
+// credentials, see Requesting Temporary Security Credentials
+// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html)
+// and Comparing the AWS STS API operations
+// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison)
+// in the IAM User Guide. You can create a mobile-based or browser-based app that
+// can authenticate users using a web identity provider like Login with Amazon,
+// Facebook, Google, or an OpenID Connect-compatible identity provider. In this
+// case, we recommend that you use Amazon Cognito (http://aws.amazon.com/cognito/)
+// or AssumeRoleWithWebIdentity. For more information, see Federation Through a
+// Web-based Identity Provider
+// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity)
+// in the IAM User Guide. You can also call GetFederationToken using the security
+// credentials of an AWS account root user, but we do not recommend it. Instead, we
+// recommend that you create an IAM user for the purpose of the proxy application.
+// Then attach a policy to the IAM user that limits federated users to only the
+// actions and resources that they need to access. For more information, see IAM
+// Best Practices
+// (https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html) in the
+// IAM User Guide. Session duration The temporary credentials are valid for the
+// specified duration, from 900 seconds (15 minutes) up to a maximum of 129,600
+// seconds (36 hours). The default session duration is 43,200 seconds (12 hours).
+// Temporary credentials that are obtained by using AWS account root user
+// credentials have a maximum duration of 3,600 seconds (1 hour). Permissions You
+// can use the temporary credentials created by GetFederationToken in any AWS
+// service except the following:
+//
+// * You cannot call any IAM operations using the
+// AWS CLI or the AWS API.
+//
+// * You cannot call any STS operations except
+// GetCallerIdentity.
+//
+// You must pass an inline or managed session policy
+// (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
+// to this operation. You can pass a single JSON policy document to use as an
+// inline session policy. You can also specify up to 10 managed policies to use as
+// managed session policies. The plain text that you use for both inline and
+// managed session policies can't exceed 2,048 characters. Though the session
+// policy parameters are optional, if you do not pass a policy, then the resulting
+// federated user session has no permissions. When you pass session policies, the
+// session permissions are the intersection of the IAM user policies and the
+// session policies that you pass. This gives you a way to further restrict the
+// permissions for a federated user. You cannot use session policies to grant more
+// permissions than those that are defined in the permissions policy of the IAM
+// user. For more information, see Session Policies
+// (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
+// in the IAM User Guide. For information about using GetFederationToken to create
+// temporary security credentials, see GetFederationToken—Federation Through a
+// Custom Identity Broker
+// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_getfederationtoken).
+// You can use the credentials to access a resource that has a resource-based
+// policy. If that policy specifically references the federated user session in the
+// Principal element of the policy, the session has the permissions allowed by the
+// policy. These permissions are granted in addition to the permissions granted by
+// the session policies. Tags (Optional) You can pass tag key-value pairs to your
+// session. These are called session tags. For more information about session tags,
+// see Passing Session Tags in STS
+// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) in the
+// IAM User Guide. An administrator must grant you the permissions necessary to
+// pass session tags. The administrator can also create granular permissions to
+// allow you to pass only specific session tags. For more information, see
+// Tutorial: Using Tags for Attribute-Based Access Control
+// (https://docs.aws.amazon.com/IAM/latest/UserGuide/tutorial_attribute-based-access-control.html)
+// in the IAM User Guide. Tag key–value pairs are not case sensitive, but case is
+// preserved. This means that you cannot have separate Department and department
+// tag keys. Assume that the user that you are federating has the
+// Department=Marketing tag and you pass the department=engineering session tag.
+// Department and department are not saved as separate tags, and the session tag
+// passed in the request takes precedence over the user tag.
+func (c *Client) GetFederationToken(ctx context.Context, params *GetFederationTokenInput, optFns ...func(*Options)) (*GetFederationTokenOutput, error) {
+ if params == nil {
+ params = &GetFederationTokenInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "GetFederationToken", params, optFns, addOperationGetFederationTokenMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*GetFederationTokenOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type GetFederationTokenInput struct {
+
+ // The name of the federated user. The name is used as an identifier for the
+ // temporary security credentials (such as Bob). For example, you can reference the
+ // federated user name in a resource-based policy, such as in an Amazon S3 bucket
+ // policy. The regex used to validate this parameter is a string of characters
+ // consisting of upper- and lower-case alphanumeric characters with no spaces. You
+ // can also include underscores or any of the following characters: =,.@-
+ //
+ // This member is required.
+ Name *string
+
+ // The duration, in seconds, that the session should last. Acceptable durations for
+ // federation sessions range from 900 seconds (15 minutes) to 129,600 seconds (36
+ // hours), with 43,200 seconds (12 hours) as the default. Sessions obtained using
+ // AWS account root user credentials are restricted to a maximum of 3,600 seconds
+ // (one hour). If the specified duration is longer than one hour, the session
+ // obtained by using root user credentials defaults to one hour.
+ DurationSeconds *int32
+
+ // An IAM policy in JSON format that you want to use as an inline session policy.
+ // You must pass an inline or managed session policy
+ // (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
+ // to this operation. You can pass a single JSON policy document to use as an
+ // inline session policy. You can also specify up to 10 managed policies to use as
+ // managed session policies. This parameter is optional. However, if you do not
+ // pass any session policies, then the resulting federated user session has no
+ // permissions. When you pass session policies, the session permissions are the
+ // intersection of the IAM user policies and the session policies that you pass.
+ // This gives you a way to further restrict the permissions for a federated user.
+ // You cannot use session policies to grant more permissions than those that are
+ // defined in the permissions policy of the IAM user. For more information, see
+ // Session Policies
+ // (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
+ // in the IAM User Guide. The resulting credentials can be used to access a
+ // resource that has a resource-based policy. If that policy specifically
+ // references the federated user session in the Principal element of the policy,
+ // the session has the permissions allowed by the policy. These permissions are
+ // granted in addition to the permissions that are granted by the session policies.
+ // The plain text that you use for both inline and managed session policies can't
+ // exceed 2,048 characters. The JSON policy characters can be any ASCII character
+ // from the space character to the end of the valid character list (\u0020 through
+ // \u00FF). It can also include the tab (\u0009), linefeed (\u000A), and carriage
+ // return (\u000D) characters. An AWS conversion compresses the passed session
+ // policies and session tags into a packed binary format that has a separate limit.
+ // Your request can fail for this limit even if your plain text meets the other
+ // requirements. The PackedPolicySize response element indicates by percentage how
+ // close the policies and tags for your request are to the upper size limit.
+ Policy *string
+
+ // The Amazon Resource Names (ARNs) of the IAM managed policies that you want to
+ // use as a managed session policy. The policies must exist in the same account as
+ // the IAM user that is requesting federated access. You must pass an inline or
+ // managed session policy
+ // (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
+ // to this operation. You can pass a single JSON policy document to use as an
+ // inline session policy. You can also specify up to 10 managed policies to use as
+ // managed session policies. The plain text that you use for both inline and
+ // managed session policies can't exceed 2,048 characters. You can provide up to 10
+ // managed policy ARNs. For more information about ARNs, see Amazon Resource Names
+ // (ARNs) and AWS Service Namespaces
+ // (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) in
+ // the AWS General Reference. This parameter is optional. However, if you do not
+ // pass any session policies, then the resulting federated user session has no
+ // permissions. When you pass session policies, the session permissions are the
+ // intersection of the IAM user policies and the session policies that you pass.
+ // This gives you a way to further restrict the permissions for a federated user.
+ // You cannot use session policies to grant more permissions than those that are
+ // defined in the permissions policy of the IAM user. For more information, see
+ // Session Policies
+ // (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
+ // in the IAM User Guide. The resulting credentials can be used to access a
+ // resource that has a resource-based policy. If that policy specifically
+ // references the federated user session in the Principal element of the policy,
+ // the session has the permissions allowed by the policy. These permissions are
+ // granted in addition to the permissions that are granted by the session policies.
+ // An AWS conversion compresses the passed session policies and session tags into a
+ // packed binary format that has a separate limit. Your request can fail for this
+ // limit even if your plain text meets the other requirements. The PackedPolicySize
+ // response element indicates by percentage how close the policies and tags for
+ // your request are to the upper size limit.
+ PolicyArns []types.PolicyDescriptorType
+
+ // A list of session tags. Each session tag consists of a key name and an
+ // associated value. For more information about session tags, see Passing Session
+ // Tags in STS
+ // (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) in the
+ // IAM User Guide. This parameter is optional. You can pass up to 50 session tags.
+ // The plain text session tag keys can’t exceed 128 characters and the values can’t
+ // exceed 256 characters. For these and additional limits, see IAM and STS
+ // Character Limits
+ // (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length)
+ // in the IAM User Guide. An AWS conversion compresses the passed session policies
+ // and session tags into a packed binary format that has a separate limit. Your
+ // request can fail for this limit even if your plain text meets the other
+ // requirements. The PackedPolicySize response element indicates by percentage how
+ // close the policies and tags for your request are to the upper size limit. You
+ // can pass a session tag with the same key as a tag that is already attached to
+ // the user you are federating. When you do, session tags override a user tag with
+ // the same key. Tag key–value pairs are not case sensitive, but case is preserved.
+ // This means that you cannot have separate Department and department tag keys.
+ // Assume that the role has the Department=Marketing tag and you pass the
+ // department=engineering session tag. Department and department are not saved as
+ // separate tags, and the session tag passed in the request takes precedence over
+ // the role tag.
+ Tags []types.Tag
+}
+
+// Contains the response to a successful GetFederationToken request, including
+// temporary AWS credentials that can be used to make AWS requests.
+type GetFederationTokenOutput struct {
+
+ // The temporary security credentials, which include an access key ID, a secret
+ // access key, and a security (or session) token. The size of the security token
+ // that STS API operations return is not fixed. We strongly recommend that you make
+ // no assumptions about the maximum size.
+ Credentials *types.Credentials
+
+ // Identifiers for the federated user associated with the credentials (such as
+ // arn:aws:sts::123456789012:federated-user/Bob or 123456789012:Bob). You can use
+ // the federated user's ARN in your resource-based policies, such as an Amazon S3
+ // bucket policy.
+ FederatedUser *types.FederatedUser
+
+ // A percentage value that indicates the packed size of the session policies and
+ // session tags combined passed in the request. The request fails if the packed
+ // size is greater than 100 percent, which means the policies and tags exceeded the
+ // allowed space.
+ PackedPolicySize *int32
+
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+}
+
+func addOperationGetFederationTokenMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ err = stack.Serialize.Add(&awsAwsquery_serializeOpGetFederationToken{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsAwsquery_deserializeOpGetFederationToken{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
+ return err
+ }
+ if err = addRetryMiddlewares(stack, options); err != nil {
+ return err
+ }
+ if err = addHTTPSignerV4Middleware(stack, options); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addOpGetFederationTokenValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetFederationToken(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addRequestIDRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ return nil
+}
+
+func newServiceMetadataMiddleware_opGetFederationToken(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ SigningName: "sts",
+ OperationName: "GetFederationToken",
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetSessionToken.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetSessionToken.go
new file mode 100644
index 000000000..45dd19704
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetSessionToken.go
@@ -0,0 +1,187 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package sts
+
+import (
+ "context"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
+ "github.com/aws/aws-sdk-go-v2/service/sts/types"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// Returns a set of temporary credentials for an AWS account or IAM user. The
+// credentials consist of an access key ID, a secret access key, and a security
+// token. Typically, you use GetSessionToken if you want to use MFA to protect
+// programmatic calls to specific AWS API operations like Amazon EC2 StopInstances.
+// MFA-enabled IAM users would need to call GetSessionToken and submit an MFA code
+// that is associated with their MFA device. Using the temporary security
+// credentials that are returned from the call, IAM users can then make
+// programmatic calls to API operations that require MFA authentication. If you do
+// not supply a correct MFA code, then the API returns an access denied error. For
+// a comparison of GetSessionToken with the other API operations that produce
+// temporary credentials, see Requesting Temporary Security Credentials
+// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html)
+// and Comparing the AWS STS API operations
+// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison)
+// in the IAM User Guide. Session Duration The GetSessionToken operation must be
+// called by using the long-term AWS security credentials of the AWS account root
+// user or an IAM user. Credentials that are created by IAM users are valid for the
+// duration that you specify. This duration can range from 900 seconds (15 minutes)
+// up to a maximum of 129,600 seconds (36 hours), with a default of 43,200 seconds
+// (12 hours). Credentials based on account credentials can range from 900 seconds
+// (15 minutes) up to 3,600 seconds (1 hour), with a default of 1 hour. Permissions
+// The temporary security credentials created by GetSessionToken can be used to
+// make API calls to any AWS service with the following exceptions:
+//
+// * You cannot
+// call any IAM API operations unless MFA authentication information is included in
+// the request.
+//
+// * You cannot call any STS API except AssumeRole or
+// GetCallerIdentity.
+//
+// We recommend that you do not call GetSessionToken with AWS
+// account root user credentials. Instead, follow our best practices
+// (https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html#create-iam-users)
+// by creating one or more IAM users, giving them the necessary permissions, and
+// using IAM users for everyday interaction with AWS. The credentials that are
+// returned by GetSessionToken are based on permissions associated with the user
+// whose credentials were used to call the operation. If GetSessionToken is called
+// using AWS account root user credentials, the temporary credentials have root
+// user permissions. Similarly, if GetSessionToken is called using the credentials
+// of an IAM user, the temporary credentials have the same permissions as the IAM
+// user. For more information about using GetSessionToken to create temporary
+// credentials, go to Temporary Credentials for Users in Untrusted Environments
+// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_getsessiontoken)
+// in the IAM User Guide.
+func (c *Client) GetSessionToken(ctx context.Context, params *GetSessionTokenInput, optFns ...func(*Options)) (*GetSessionTokenOutput, error) {
+ if params == nil {
+ params = &GetSessionTokenInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "GetSessionToken", params, optFns, addOperationGetSessionTokenMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*GetSessionTokenOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type GetSessionTokenInput struct {
+
+ // The duration, in seconds, that the credentials should remain valid. Acceptable
+ // durations for IAM user sessions range from 900 seconds (15 minutes) to 129,600
+ // seconds (36 hours), with 43,200 seconds (12 hours) as the default. Sessions for
+ // AWS account owners are restricted to a maximum of 3,600 seconds (one hour). If
+ // the duration is longer than one hour, the session for AWS account owners
+ // defaults to one hour.
+ DurationSeconds *int32
+
+ // The identification number of the MFA device that is associated with the IAM user
+ // who is making the GetSessionToken call. Specify this value if the IAM user has a
+ // policy that requires MFA authentication. The value is either the serial number
+ // for a hardware device (such as GAHT12345678) or an Amazon Resource Name (ARN)
+ // for a virtual device (such as arn:aws:iam::123456789012:mfa/user). You can find
+ // the device for an IAM user by going to the AWS Management Console and viewing
+ // the user's security credentials. The regex used to validate this parameter is a
+ // string of characters consisting of upper- and lower-case alphanumeric characters
+ // with no spaces. You can also include underscores or any of the following
+ // characters: =,.@:/-
+ SerialNumber *string
+
+ // The value provided by the MFA device, if MFA is required. If any policy requires
+ // the IAM user to submit an MFA code, specify this value. If MFA authentication is
+ // required, the user must provide a code when requesting a set of temporary
+ // security credentials. A user who fails to provide the code receives an "access
+ // denied" response when requesting resources that require MFA authentication. The
+ // format for this parameter, as described by its regex pattern, is a sequence of
+ // six numeric digits.
+ TokenCode *string
+}
+
+// Contains the response to a successful GetSessionToken request, including
+// temporary AWS credentials that can be used to make AWS requests.
+type GetSessionTokenOutput struct {
+
+ // The temporary security credentials, which include an access key ID, a secret
+ // access key, and a security (or session) token. The size of the security token
+ // that STS API operations return is not fixed. We strongly recommend that you make
+ // no assumptions about the maximum size.
+ Credentials *types.Credentials
+
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+}
+
+func addOperationGetSessionTokenMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ err = stack.Serialize.Add(&awsAwsquery_serializeOpGetSessionToken{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsAwsquery_deserializeOpGetSessionToken{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
+ return err
+ }
+ if err = addRetryMiddlewares(stack, options); err != nil {
+ return err
+ }
+ if err = addHTTPSignerV4Middleware(stack, options); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetSessionToken(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addRequestIDRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ return nil
+}
+
+func newServiceMetadataMiddleware_opGetSessionToken(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ SigningName: "sts",
+ OperationName: "GetSessionToken",
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/deserializers.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/deserializers.go
new file mode 100644
index 000000000..cfa3c38c6
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/deserializers.go
@@ -0,0 +1,2468 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package sts
+
+import (
+ "bytes"
+ "context"
+ "encoding/xml"
+ "fmt"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ awsxml "github.com/aws/aws-sdk-go-v2/aws/protocol/xml"
+ "github.com/aws/aws-sdk-go-v2/service/sts/types"
+ smithy "github.com/aws/smithy-go"
+ smithyxml "github.com/aws/smithy-go/encoding/xml"
+ smithyio "github.com/aws/smithy-go/io"
+ "github.com/aws/smithy-go/middleware"
+ "github.com/aws/smithy-go/ptr"
+ smithytime "github.com/aws/smithy-go/time"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+ "io"
+ "strconv"
+ "strings"
+)
+
+type awsAwsquery_deserializeOpAssumeRole struct {
+}
+
+func (*awsAwsquery_deserializeOpAssumeRole) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsAwsquery_deserializeOpAssumeRole) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsAwsquery_deserializeOpErrorAssumeRole(response, &metadata)
+ }
+ output := &AssumeRoleOutput{}
+ out.Result = output
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+ body := io.TeeReader(response.Body, ringBuffer)
+ rootDecoder := xml.NewDecoder(body)
+ t, err := smithyxml.FetchRootElement(rootDecoder)
+ if err == io.EOF {
+ return out, metadata, nil
+ }
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ decoder := smithyxml.WrapNodeDecoder(rootDecoder, t)
+ t, err = decoder.GetElement("AssumeRoleResult")
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return out, metadata, err
+ }
+
+ decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ err = awsAwsquery_deserializeOpDocumentAssumeRoleOutput(&output, decoder)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return out, metadata, err
+ }
+
+ return out, metadata, err
+}
+
+func awsAwsquery_deserializeOpErrorAssumeRole(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ errorComponents, err := awsxml.GetErrorResponseComponents(errorBody, false)
+ if err != nil {
+ return err
+ }
+ if reqID := errorComponents.RequestID; len(reqID) != 0 {
+ awsmiddleware.SetRequestIDMetadata(metadata, reqID)
+ }
+ if len(errorComponents.Code) != 0 {
+ errorCode = errorComponents.Code
+ }
+ if len(errorComponents.Message) != 0 {
+ errorMessage = errorComponents.Message
+ }
+ errorBody.Seek(0, io.SeekStart)
+ switch {
+ case strings.EqualFold("ExpiredTokenException", errorCode):
+ return awsAwsquery_deserializeErrorExpiredTokenException(response, errorBody)
+
+ case strings.EqualFold("MalformedPolicyDocumentException", errorCode):
+ return awsAwsquery_deserializeErrorMalformedPolicyDocumentException(response, errorBody)
+
+ case strings.EqualFold("PackedPolicyTooLargeException", errorCode):
+ return awsAwsquery_deserializeErrorPackedPolicyTooLargeException(response, errorBody)
+
+ case strings.EqualFold("RegionDisabledException", errorCode):
+ return awsAwsquery_deserializeErrorRegionDisabledException(response, errorBody)
+
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+type awsAwsquery_deserializeOpAssumeRoleWithSAML struct {
+}
+
+func (*awsAwsquery_deserializeOpAssumeRoleWithSAML) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsAwsquery_deserializeOpAssumeRoleWithSAML) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsAwsquery_deserializeOpErrorAssumeRoleWithSAML(response, &metadata)
+ }
+ output := &AssumeRoleWithSAMLOutput{}
+ out.Result = output
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+ body := io.TeeReader(response.Body, ringBuffer)
+ rootDecoder := xml.NewDecoder(body)
+ t, err := smithyxml.FetchRootElement(rootDecoder)
+ if err == io.EOF {
+ return out, metadata, nil
+ }
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ decoder := smithyxml.WrapNodeDecoder(rootDecoder, t)
+ t, err = decoder.GetElement("AssumeRoleWithSAMLResult")
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return out, metadata, err
+ }
+
+ decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ err = awsAwsquery_deserializeOpDocumentAssumeRoleWithSAMLOutput(&output, decoder)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return out, metadata, err
+ }
+
+ return out, metadata, err
+}
+
+func awsAwsquery_deserializeOpErrorAssumeRoleWithSAML(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ errorComponents, err := awsxml.GetErrorResponseComponents(errorBody, false)
+ if err != nil {
+ return err
+ }
+ if reqID := errorComponents.RequestID; len(reqID) != 0 {
+ awsmiddleware.SetRequestIDMetadata(metadata, reqID)
+ }
+ if len(errorComponents.Code) != 0 {
+ errorCode = errorComponents.Code
+ }
+ if len(errorComponents.Message) != 0 {
+ errorMessage = errorComponents.Message
+ }
+ errorBody.Seek(0, io.SeekStart)
+ switch {
+ case strings.EqualFold("ExpiredTokenException", errorCode):
+ return awsAwsquery_deserializeErrorExpiredTokenException(response, errorBody)
+
+ case strings.EqualFold("IDPRejectedClaimException", errorCode):
+ return awsAwsquery_deserializeErrorIDPRejectedClaimException(response, errorBody)
+
+ case strings.EqualFold("InvalidIdentityTokenException", errorCode):
+ return awsAwsquery_deserializeErrorInvalidIdentityTokenException(response, errorBody)
+
+ case strings.EqualFold("MalformedPolicyDocumentException", errorCode):
+ return awsAwsquery_deserializeErrorMalformedPolicyDocumentException(response, errorBody)
+
+ case strings.EqualFold("PackedPolicyTooLargeException", errorCode):
+ return awsAwsquery_deserializeErrorPackedPolicyTooLargeException(response, errorBody)
+
+ case strings.EqualFold("RegionDisabledException", errorCode):
+ return awsAwsquery_deserializeErrorRegionDisabledException(response, errorBody)
+
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+type awsAwsquery_deserializeOpAssumeRoleWithWebIdentity struct {
+}
+
+func (*awsAwsquery_deserializeOpAssumeRoleWithWebIdentity) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsAwsquery_deserializeOpAssumeRoleWithWebIdentity) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsAwsquery_deserializeOpErrorAssumeRoleWithWebIdentity(response, &metadata)
+ }
+ output := &AssumeRoleWithWebIdentityOutput{}
+ out.Result = output
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+ body := io.TeeReader(response.Body, ringBuffer)
+ rootDecoder := xml.NewDecoder(body)
+ t, err := smithyxml.FetchRootElement(rootDecoder)
+ if err == io.EOF {
+ return out, metadata, nil
+ }
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ decoder := smithyxml.WrapNodeDecoder(rootDecoder, t)
+ t, err = decoder.GetElement("AssumeRoleWithWebIdentityResult")
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return out, metadata, err
+ }
+
+ decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ err = awsAwsquery_deserializeOpDocumentAssumeRoleWithWebIdentityOutput(&output, decoder)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return out, metadata, err
+ }
+
+ return out, metadata, err
+}
+
+func awsAwsquery_deserializeOpErrorAssumeRoleWithWebIdentity(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ errorComponents, err := awsxml.GetErrorResponseComponents(errorBody, false)
+ if err != nil {
+ return err
+ }
+ if reqID := errorComponents.RequestID; len(reqID) != 0 {
+ awsmiddleware.SetRequestIDMetadata(metadata, reqID)
+ }
+ if len(errorComponents.Code) != 0 {
+ errorCode = errorComponents.Code
+ }
+ if len(errorComponents.Message) != 0 {
+ errorMessage = errorComponents.Message
+ }
+ errorBody.Seek(0, io.SeekStart)
+ switch {
+ case strings.EqualFold("ExpiredTokenException", errorCode):
+ return awsAwsquery_deserializeErrorExpiredTokenException(response, errorBody)
+
+ case strings.EqualFold("IDPCommunicationErrorException", errorCode):
+ return awsAwsquery_deserializeErrorIDPCommunicationErrorException(response, errorBody)
+
+ case strings.EqualFold("IDPRejectedClaimException", errorCode):
+ return awsAwsquery_deserializeErrorIDPRejectedClaimException(response, errorBody)
+
+ case strings.EqualFold("InvalidIdentityTokenException", errorCode):
+ return awsAwsquery_deserializeErrorInvalidIdentityTokenException(response, errorBody)
+
+ case strings.EqualFold("MalformedPolicyDocumentException", errorCode):
+ return awsAwsquery_deserializeErrorMalformedPolicyDocumentException(response, errorBody)
+
+ case strings.EqualFold("PackedPolicyTooLargeException", errorCode):
+ return awsAwsquery_deserializeErrorPackedPolicyTooLargeException(response, errorBody)
+
+ case strings.EqualFold("RegionDisabledException", errorCode):
+ return awsAwsquery_deserializeErrorRegionDisabledException(response, errorBody)
+
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+type awsAwsquery_deserializeOpDecodeAuthorizationMessage struct {
+}
+
+func (*awsAwsquery_deserializeOpDecodeAuthorizationMessage) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsAwsquery_deserializeOpDecodeAuthorizationMessage) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsAwsquery_deserializeOpErrorDecodeAuthorizationMessage(response, &metadata)
+ }
+ output := &DecodeAuthorizationMessageOutput{}
+ out.Result = output
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+ body := io.TeeReader(response.Body, ringBuffer)
+ rootDecoder := xml.NewDecoder(body)
+ t, err := smithyxml.FetchRootElement(rootDecoder)
+ if err == io.EOF {
+ return out, metadata, nil
+ }
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ decoder := smithyxml.WrapNodeDecoder(rootDecoder, t)
+ t, err = decoder.GetElement("DecodeAuthorizationMessageResult")
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return out, metadata, err
+ }
+
+ decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ err = awsAwsquery_deserializeOpDocumentDecodeAuthorizationMessageOutput(&output, decoder)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return out, metadata, err
+ }
+
+ return out, metadata, err
+}
+
+func awsAwsquery_deserializeOpErrorDecodeAuthorizationMessage(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ errorComponents, err := awsxml.GetErrorResponseComponents(errorBody, false)
+ if err != nil {
+ return err
+ }
+ if reqID := errorComponents.RequestID; len(reqID) != 0 {
+ awsmiddleware.SetRequestIDMetadata(metadata, reqID)
+ }
+ if len(errorComponents.Code) != 0 {
+ errorCode = errorComponents.Code
+ }
+ if len(errorComponents.Message) != 0 {
+ errorMessage = errorComponents.Message
+ }
+ errorBody.Seek(0, io.SeekStart)
+ switch {
+ case strings.EqualFold("InvalidAuthorizationMessageException", errorCode):
+ return awsAwsquery_deserializeErrorInvalidAuthorizationMessageException(response, errorBody)
+
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+type awsAwsquery_deserializeOpGetAccessKeyInfo struct {
+}
+
+func (*awsAwsquery_deserializeOpGetAccessKeyInfo) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsAwsquery_deserializeOpGetAccessKeyInfo) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsAwsquery_deserializeOpErrorGetAccessKeyInfo(response, &metadata)
+ }
+ output := &GetAccessKeyInfoOutput{}
+ out.Result = output
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+ body := io.TeeReader(response.Body, ringBuffer)
+ rootDecoder := xml.NewDecoder(body)
+ t, err := smithyxml.FetchRootElement(rootDecoder)
+ if err == io.EOF {
+ return out, metadata, nil
+ }
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ decoder := smithyxml.WrapNodeDecoder(rootDecoder, t)
+ t, err = decoder.GetElement("GetAccessKeyInfoResult")
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return out, metadata, err
+ }
+
+ decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ err = awsAwsquery_deserializeOpDocumentGetAccessKeyInfoOutput(&output, decoder)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return out, metadata, err
+ }
+
+ return out, metadata, err
+}
+
+func awsAwsquery_deserializeOpErrorGetAccessKeyInfo(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ errorComponents, err := awsxml.GetErrorResponseComponents(errorBody, false)
+ if err != nil {
+ return err
+ }
+ if reqID := errorComponents.RequestID; len(reqID) != 0 {
+ awsmiddleware.SetRequestIDMetadata(metadata, reqID)
+ }
+ if len(errorComponents.Code) != 0 {
+ errorCode = errorComponents.Code
+ }
+ if len(errorComponents.Message) != 0 {
+ errorMessage = errorComponents.Message
+ }
+ errorBody.Seek(0, io.SeekStart)
+ switch {
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+type awsAwsquery_deserializeOpGetCallerIdentity struct {
+}
+
+func (*awsAwsquery_deserializeOpGetCallerIdentity) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsAwsquery_deserializeOpGetCallerIdentity) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsAwsquery_deserializeOpErrorGetCallerIdentity(response, &metadata)
+ }
+ output := &GetCallerIdentityOutput{}
+ out.Result = output
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+ body := io.TeeReader(response.Body, ringBuffer)
+ rootDecoder := xml.NewDecoder(body)
+ t, err := smithyxml.FetchRootElement(rootDecoder)
+ if err == io.EOF {
+ return out, metadata, nil
+ }
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ decoder := smithyxml.WrapNodeDecoder(rootDecoder, t)
+ t, err = decoder.GetElement("GetCallerIdentityResult")
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return out, metadata, err
+ }
+
+ decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ err = awsAwsquery_deserializeOpDocumentGetCallerIdentityOutput(&output, decoder)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return out, metadata, err
+ }
+
+ return out, metadata, err
+}
+
+func awsAwsquery_deserializeOpErrorGetCallerIdentity(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ errorComponents, err := awsxml.GetErrorResponseComponents(errorBody, false)
+ if err != nil {
+ return err
+ }
+ if reqID := errorComponents.RequestID; len(reqID) != 0 {
+ awsmiddleware.SetRequestIDMetadata(metadata, reqID)
+ }
+ if len(errorComponents.Code) != 0 {
+ errorCode = errorComponents.Code
+ }
+ if len(errorComponents.Message) != 0 {
+ errorMessage = errorComponents.Message
+ }
+ errorBody.Seek(0, io.SeekStart)
+ switch {
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+type awsAwsquery_deserializeOpGetFederationToken struct {
+}
+
+func (*awsAwsquery_deserializeOpGetFederationToken) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsAwsquery_deserializeOpGetFederationToken) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsAwsquery_deserializeOpErrorGetFederationToken(response, &metadata)
+ }
+ output := &GetFederationTokenOutput{}
+ out.Result = output
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+ body := io.TeeReader(response.Body, ringBuffer)
+ rootDecoder := xml.NewDecoder(body)
+ t, err := smithyxml.FetchRootElement(rootDecoder)
+ if err == io.EOF {
+ return out, metadata, nil
+ }
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ decoder := smithyxml.WrapNodeDecoder(rootDecoder, t)
+ t, err = decoder.GetElement("GetFederationTokenResult")
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return out, metadata, err
+ }
+
+ decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ err = awsAwsquery_deserializeOpDocumentGetFederationTokenOutput(&output, decoder)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return out, metadata, err
+ }
+
+ return out, metadata, err
+}
+
+func awsAwsquery_deserializeOpErrorGetFederationToken(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ errorComponents, err := awsxml.GetErrorResponseComponents(errorBody, false)
+ if err != nil {
+ return err
+ }
+ if reqID := errorComponents.RequestID; len(reqID) != 0 {
+ awsmiddleware.SetRequestIDMetadata(metadata, reqID)
+ }
+ if len(errorComponents.Code) != 0 {
+ errorCode = errorComponents.Code
+ }
+ if len(errorComponents.Message) != 0 {
+ errorMessage = errorComponents.Message
+ }
+ errorBody.Seek(0, io.SeekStart)
+ switch {
+ case strings.EqualFold("MalformedPolicyDocumentException", errorCode):
+ return awsAwsquery_deserializeErrorMalformedPolicyDocumentException(response, errorBody)
+
+ case strings.EqualFold("PackedPolicyTooLargeException", errorCode):
+ return awsAwsquery_deserializeErrorPackedPolicyTooLargeException(response, errorBody)
+
+ case strings.EqualFold("RegionDisabledException", errorCode):
+ return awsAwsquery_deserializeErrorRegionDisabledException(response, errorBody)
+
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+type awsAwsquery_deserializeOpGetSessionToken struct {
+}
+
+func (*awsAwsquery_deserializeOpGetSessionToken) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsAwsquery_deserializeOpGetSessionToken) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsAwsquery_deserializeOpErrorGetSessionToken(response, &metadata)
+ }
+ output := &GetSessionTokenOutput{}
+ out.Result = output
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+ body := io.TeeReader(response.Body, ringBuffer)
+ rootDecoder := xml.NewDecoder(body)
+ t, err := smithyxml.FetchRootElement(rootDecoder)
+ if err == io.EOF {
+ return out, metadata, nil
+ }
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ decoder := smithyxml.WrapNodeDecoder(rootDecoder, t)
+ t, err = decoder.GetElement("GetSessionTokenResult")
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return out, metadata, err
+ }
+
+ decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ err = awsAwsquery_deserializeOpDocumentGetSessionTokenOutput(&output, decoder)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return out, metadata, err
+ }
+
+ return out, metadata, err
+}
+
+func awsAwsquery_deserializeOpErrorGetSessionToken(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ errorComponents, err := awsxml.GetErrorResponseComponents(errorBody, false)
+ if err != nil {
+ return err
+ }
+ if reqID := errorComponents.RequestID; len(reqID) != 0 {
+ awsmiddleware.SetRequestIDMetadata(metadata, reqID)
+ }
+ if len(errorComponents.Code) != 0 {
+ errorCode = errorComponents.Code
+ }
+ if len(errorComponents.Message) != 0 {
+ errorMessage = errorComponents.Message
+ }
+ errorBody.Seek(0, io.SeekStart)
+ switch {
+ case strings.EqualFold("RegionDisabledException", errorCode):
+ return awsAwsquery_deserializeErrorRegionDisabledException(response, errorBody)
+
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+func awsAwsquery_deserializeErrorExpiredTokenException(response *smithyhttp.Response, errorBody *bytes.Reader) error {
+ output := &types.ExpiredTokenException{}
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+ body := io.TeeReader(errorBody, ringBuffer)
+ rootDecoder := xml.NewDecoder(body)
+ t, err := smithyxml.FetchRootElement(rootDecoder)
+ if err == io.EOF {
+ return output
+ }
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ decoder := smithyxml.WrapNodeDecoder(rootDecoder, t)
+ t, err = decoder.GetElement("Error")
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ err = awsAwsquery_deserializeDocumentExpiredTokenException(&output, decoder)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ return output
+}
+
+func awsAwsquery_deserializeErrorIDPCommunicationErrorException(response *smithyhttp.Response, errorBody *bytes.Reader) error {
+ output := &types.IDPCommunicationErrorException{}
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+ body := io.TeeReader(errorBody, ringBuffer)
+ rootDecoder := xml.NewDecoder(body)
+ t, err := smithyxml.FetchRootElement(rootDecoder)
+ if err == io.EOF {
+ return output
+ }
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ decoder := smithyxml.WrapNodeDecoder(rootDecoder, t)
+ t, err = decoder.GetElement("Error")
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ err = awsAwsquery_deserializeDocumentIDPCommunicationErrorException(&output, decoder)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ return output
+}
+
+func awsAwsquery_deserializeErrorIDPRejectedClaimException(response *smithyhttp.Response, errorBody *bytes.Reader) error {
+ output := &types.IDPRejectedClaimException{}
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+ body := io.TeeReader(errorBody, ringBuffer)
+ rootDecoder := xml.NewDecoder(body)
+ t, err := smithyxml.FetchRootElement(rootDecoder)
+ if err == io.EOF {
+ return output
+ }
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ decoder := smithyxml.WrapNodeDecoder(rootDecoder, t)
+ t, err = decoder.GetElement("Error")
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ err = awsAwsquery_deserializeDocumentIDPRejectedClaimException(&output, decoder)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ return output
+}
+
+func awsAwsquery_deserializeErrorInvalidAuthorizationMessageException(response *smithyhttp.Response, errorBody *bytes.Reader) error {
+ output := &types.InvalidAuthorizationMessageException{}
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+ body := io.TeeReader(errorBody, ringBuffer)
+ rootDecoder := xml.NewDecoder(body)
+ t, err := smithyxml.FetchRootElement(rootDecoder)
+ if err == io.EOF {
+ return output
+ }
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ decoder := smithyxml.WrapNodeDecoder(rootDecoder, t)
+ t, err = decoder.GetElement("Error")
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ err = awsAwsquery_deserializeDocumentInvalidAuthorizationMessageException(&output, decoder)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ return output
+}
+
+func awsAwsquery_deserializeErrorInvalidIdentityTokenException(response *smithyhttp.Response, errorBody *bytes.Reader) error {
+ output := &types.InvalidIdentityTokenException{}
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+ body := io.TeeReader(errorBody, ringBuffer)
+ rootDecoder := xml.NewDecoder(body)
+ t, err := smithyxml.FetchRootElement(rootDecoder)
+ if err == io.EOF {
+ return output
+ }
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ decoder := smithyxml.WrapNodeDecoder(rootDecoder, t)
+ t, err = decoder.GetElement("Error")
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ err = awsAwsquery_deserializeDocumentInvalidIdentityTokenException(&output, decoder)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ return output
+}
+
+func awsAwsquery_deserializeErrorMalformedPolicyDocumentException(response *smithyhttp.Response, errorBody *bytes.Reader) error {
+ output := &types.MalformedPolicyDocumentException{}
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+ body := io.TeeReader(errorBody, ringBuffer)
+ rootDecoder := xml.NewDecoder(body)
+ t, err := smithyxml.FetchRootElement(rootDecoder)
+ if err == io.EOF {
+ return output
+ }
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ decoder := smithyxml.WrapNodeDecoder(rootDecoder, t)
+ t, err = decoder.GetElement("Error")
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ err = awsAwsquery_deserializeDocumentMalformedPolicyDocumentException(&output, decoder)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ return output
+}
+
+func awsAwsquery_deserializeErrorPackedPolicyTooLargeException(response *smithyhttp.Response, errorBody *bytes.Reader) error {
+ output := &types.PackedPolicyTooLargeException{}
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+ body := io.TeeReader(errorBody, ringBuffer)
+ rootDecoder := xml.NewDecoder(body)
+ t, err := smithyxml.FetchRootElement(rootDecoder)
+ if err == io.EOF {
+ return output
+ }
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ decoder := smithyxml.WrapNodeDecoder(rootDecoder, t)
+ t, err = decoder.GetElement("Error")
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ err = awsAwsquery_deserializeDocumentPackedPolicyTooLargeException(&output, decoder)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ return output
+}
+
+func awsAwsquery_deserializeErrorRegionDisabledException(response *smithyhttp.Response, errorBody *bytes.Reader) error {
+ output := &types.RegionDisabledException{}
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+ body := io.TeeReader(errorBody, ringBuffer)
+ rootDecoder := xml.NewDecoder(body)
+ t, err := smithyxml.FetchRootElement(rootDecoder)
+ if err == io.EOF {
+ return output
+ }
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ decoder := smithyxml.WrapNodeDecoder(rootDecoder, t)
+ t, err = decoder.GetElement("Error")
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ err = awsAwsquery_deserializeDocumentRegionDisabledException(&output, decoder)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ return output
+}
+
+func awsAwsquery_deserializeDocumentAssumedRoleUser(v **types.AssumedRoleUser, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *types.AssumedRoleUser
+ if *v == nil {
+ sv = &types.AssumedRoleUser{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("Arn", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.Arn = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("AssumedRoleId", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.AssumedRoleId = ptr.String(xtv)
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsAwsquery_deserializeDocumentCredentials(v **types.Credentials, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *types.Credentials
+ if *v == nil {
+ sv = &types.Credentials{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("AccessKeyId", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.AccessKeyId = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("Expiration", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ t, err := smithytime.ParseDateTime(xtv)
+ if err != nil {
+ return err
+ }
+ sv.Expiration = ptr.Time(t)
+ }
+
+ case strings.EqualFold("SecretAccessKey", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.SecretAccessKey = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("SessionToken", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.SessionToken = ptr.String(xtv)
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsAwsquery_deserializeDocumentExpiredTokenException(v **types.ExpiredTokenException, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *types.ExpiredTokenException
+ if *v == nil {
+ sv = &types.ExpiredTokenException{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("message", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.Message = ptr.String(xtv)
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsAwsquery_deserializeDocumentFederatedUser(v **types.FederatedUser, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *types.FederatedUser
+ if *v == nil {
+ sv = &types.FederatedUser{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("Arn", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.Arn = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("FederatedUserId", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.FederatedUserId = ptr.String(xtv)
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsAwsquery_deserializeDocumentIDPCommunicationErrorException(v **types.IDPCommunicationErrorException, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *types.IDPCommunicationErrorException
+ if *v == nil {
+ sv = &types.IDPCommunicationErrorException{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("message", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.Message = ptr.String(xtv)
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsAwsquery_deserializeDocumentIDPRejectedClaimException(v **types.IDPRejectedClaimException, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *types.IDPRejectedClaimException
+ if *v == nil {
+ sv = &types.IDPRejectedClaimException{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("message", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.Message = ptr.String(xtv)
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsAwsquery_deserializeDocumentInvalidAuthorizationMessageException(v **types.InvalidAuthorizationMessageException, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *types.InvalidAuthorizationMessageException
+ if *v == nil {
+ sv = &types.InvalidAuthorizationMessageException{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("message", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.Message = ptr.String(xtv)
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsAwsquery_deserializeDocumentInvalidIdentityTokenException(v **types.InvalidIdentityTokenException, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *types.InvalidIdentityTokenException
+ if *v == nil {
+ sv = &types.InvalidIdentityTokenException{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("message", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.Message = ptr.String(xtv)
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsAwsquery_deserializeDocumentMalformedPolicyDocumentException(v **types.MalformedPolicyDocumentException, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *types.MalformedPolicyDocumentException
+ if *v == nil {
+ sv = &types.MalformedPolicyDocumentException{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("message", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.Message = ptr.String(xtv)
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsAwsquery_deserializeDocumentPackedPolicyTooLargeException(v **types.PackedPolicyTooLargeException, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *types.PackedPolicyTooLargeException
+ if *v == nil {
+ sv = &types.PackedPolicyTooLargeException{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("message", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.Message = ptr.String(xtv)
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsAwsquery_deserializeDocumentRegionDisabledException(v **types.RegionDisabledException, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *types.RegionDisabledException
+ if *v == nil {
+ sv = &types.RegionDisabledException{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("message", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.Message = ptr.String(xtv)
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsAwsquery_deserializeOpDocumentAssumeRoleOutput(v **AssumeRoleOutput, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *AssumeRoleOutput
+ if *v == nil {
+ sv = &AssumeRoleOutput{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("AssumedRoleUser", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsAwsquery_deserializeDocumentAssumedRoleUser(&sv.AssumedRoleUser, nodeDecoder); err != nil {
+ return err
+ }
+
+ case strings.EqualFold("Credentials", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsAwsquery_deserializeDocumentCredentials(&sv.Credentials, nodeDecoder); err != nil {
+ return err
+ }
+
+ case strings.EqualFold("PackedPolicySize", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ i64, err := strconv.ParseInt(xtv, 10, 64)
+ if err != nil {
+ return err
+ }
+ sv.PackedPolicySize = ptr.Int32(int32(i64))
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsAwsquery_deserializeOpDocumentAssumeRoleWithSAMLOutput(v **AssumeRoleWithSAMLOutput, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *AssumeRoleWithSAMLOutput
+ if *v == nil {
+ sv = &AssumeRoleWithSAMLOutput{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("AssumedRoleUser", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsAwsquery_deserializeDocumentAssumedRoleUser(&sv.AssumedRoleUser, nodeDecoder); err != nil {
+ return err
+ }
+
+ case strings.EqualFold("Audience", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.Audience = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("Credentials", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsAwsquery_deserializeDocumentCredentials(&sv.Credentials, nodeDecoder); err != nil {
+ return err
+ }
+
+ case strings.EqualFold("Issuer", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.Issuer = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("NameQualifier", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.NameQualifier = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("PackedPolicySize", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ i64, err := strconv.ParseInt(xtv, 10, 64)
+ if err != nil {
+ return err
+ }
+ sv.PackedPolicySize = ptr.Int32(int32(i64))
+ }
+
+ case strings.EqualFold("Subject", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.Subject = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("SubjectType", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.SubjectType = ptr.String(xtv)
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsAwsquery_deserializeOpDocumentAssumeRoleWithWebIdentityOutput(v **AssumeRoleWithWebIdentityOutput, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *AssumeRoleWithWebIdentityOutput
+ if *v == nil {
+ sv = &AssumeRoleWithWebIdentityOutput{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("AssumedRoleUser", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsAwsquery_deserializeDocumentAssumedRoleUser(&sv.AssumedRoleUser, nodeDecoder); err != nil {
+ return err
+ }
+
+ case strings.EqualFold("Audience", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.Audience = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("Credentials", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsAwsquery_deserializeDocumentCredentials(&sv.Credentials, nodeDecoder); err != nil {
+ return err
+ }
+
+ case strings.EqualFold("PackedPolicySize", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ i64, err := strconv.ParseInt(xtv, 10, 64)
+ if err != nil {
+ return err
+ }
+ sv.PackedPolicySize = ptr.Int32(int32(i64))
+ }
+
+ case strings.EqualFold("Provider", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.Provider = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("SubjectFromWebIdentityToken", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.SubjectFromWebIdentityToken = ptr.String(xtv)
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsAwsquery_deserializeOpDocumentDecodeAuthorizationMessageOutput(v **DecodeAuthorizationMessageOutput, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *DecodeAuthorizationMessageOutput
+ if *v == nil {
+ sv = &DecodeAuthorizationMessageOutput{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("DecodedMessage", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.DecodedMessage = ptr.String(xtv)
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsAwsquery_deserializeOpDocumentGetAccessKeyInfoOutput(v **GetAccessKeyInfoOutput, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *GetAccessKeyInfoOutput
+ if *v == nil {
+ sv = &GetAccessKeyInfoOutput{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("Account", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.Account = ptr.String(xtv)
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsAwsquery_deserializeOpDocumentGetCallerIdentityOutput(v **GetCallerIdentityOutput, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *GetCallerIdentityOutput
+ if *v == nil {
+ sv = &GetCallerIdentityOutput{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("Account", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.Account = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("Arn", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.Arn = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("UserId", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.UserId = ptr.String(xtv)
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsAwsquery_deserializeOpDocumentGetFederationTokenOutput(v **GetFederationTokenOutput, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *GetFederationTokenOutput
+ if *v == nil {
+ sv = &GetFederationTokenOutput{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("Credentials", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsAwsquery_deserializeDocumentCredentials(&sv.Credentials, nodeDecoder); err != nil {
+ return err
+ }
+
+ case strings.EqualFold("FederatedUser", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsAwsquery_deserializeDocumentFederatedUser(&sv.FederatedUser, nodeDecoder); err != nil {
+ return err
+ }
+
+ case strings.EqualFold("PackedPolicySize", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ i64, err := strconv.ParseInt(xtv, 10, 64)
+ if err != nil {
+ return err
+ }
+ sv.PackedPolicySize = ptr.Int32(int32(i64))
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsAwsquery_deserializeOpDocumentGetSessionTokenOutput(v **GetSessionTokenOutput, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *GetSessionTokenOutput
+ if *v == nil {
+ sv = &GetSessionTokenOutput{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("Credentials", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsAwsquery_deserializeDocumentCredentials(&sv.Credentials, nodeDecoder); err != nil {
+ return err
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/doc.go
new file mode 100644
index 000000000..b5a0a7151
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/doc.go
@@ -0,0 +1,12 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+// Package sts provides the API client, operations, and parameter types for AWS
+// Security Token Service.
+//
+// AWS Security Token Service AWS Security Token Service (STS) enables you to
+// request temporary, limited-privilege credentials for AWS Identity and Access
+// Management (IAM) users or for users that you authenticate (federated users).
+// This guide provides descriptions of the STS API. For more information about
+// using this service, see Temporary Security Credentials
+// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp.html).
+package sts
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/endpoints.go
new file mode 100644
index 000000000..5b6aa7b17
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/endpoints.go
@@ -0,0 +1,160 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package sts
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "github.com/aws/aws-sdk-go-v2/aws"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ internalendpoints "github.com/aws/aws-sdk-go-v2/service/sts/internal/endpoints"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+ "net/url"
+)
+
+// EndpointResolverOptions is the service endpoint resolver options
+type EndpointResolverOptions = internalendpoints.Options
+
+// EndpointResolver interface for resolving service endpoints.
+type EndpointResolver interface {
+ ResolveEndpoint(region string, options EndpointResolverOptions) (aws.Endpoint, error)
+}
+
+var _ EndpointResolver = &internalendpoints.Resolver{}
+
+// NewDefaultEndpointResolver constructs a new service endpoint resolver
+func NewDefaultEndpointResolver() *internalendpoints.Resolver {
+ return internalendpoints.New()
+}
+
+// EndpointResolverFunc is a helper utility that wraps a function so it satisfies
+// the EndpointResolver interface. This is useful when you want to add additional
+// endpoint resolving logic, or stub out specific endpoints with custom values.
+type EndpointResolverFunc func(region string, options EndpointResolverOptions) (aws.Endpoint, error)
+
+func (fn EndpointResolverFunc) ResolveEndpoint(region string, options EndpointResolverOptions) (endpoint aws.Endpoint, err error) {
+ return fn(region, options)
+}
+
+func resolveDefaultEndpointConfiguration(o *Options) {
+ if o.EndpointResolver != nil {
+ return
+ }
+ o.EndpointResolver = NewDefaultEndpointResolver()
+}
+
+// EndpointResolverFromURL returns an EndpointResolver configured using the
+// provided endpoint url. By default, the resolved endpoint resolver uses the
+// client region as signing region, and the endpoint source is set to
+// EndpointSourceCustom.You can provide functional options to configure endpoint
+// values for the resolved endpoint.
+func EndpointResolverFromURL(url string, optFns ...func(*aws.Endpoint)) EndpointResolver {
+ e := aws.Endpoint{URL: url, Source: aws.EndpointSourceCustom}
+ for _, fn := range optFns {
+ fn(&e)
+ }
+
+ return EndpointResolverFunc(
+ func(region string, options EndpointResolverOptions) (aws.Endpoint, error) {
+ if len(e.SigningRegion) == 0 {
+ e.SigningRegion = region
+ }
+ return e, nil
+ },
+ )
+}
+
+type ResolveEndpoint struct {
+ Resolver EndpointResolver
+ Options EndpointResolverOptions
+}
+
+func (*ResolveEndpoint) ID() string {
+ return "ResolveEndpoint"
+}
+
+func (m *ResolveEndpoint) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ req, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown transport type %T", in.Request)
+ }
+
+ if m.Resolver == nil {
+ return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil")
+ }
+
+ var endpoint aws.Endpoint
+ endpoint, err = m.Resolver.ResolveEndpoint(awsmiddleware.GetRegion(ctx), m.Options)
+ if err != nil {
+ return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err)
+ }
+
+ req.URL, err = url.Parse(endpoint.URL)
+ if err != nil {
+ return out, metadata, fmt.Errorf("failed to parse endpoint URL: %w", err)
+ }
+
+ if len(awsmiddleware.GetSigningName(ctx)) == 0 {
+ signingName := endpoint.SigningName
+ if len(signingName) == 0 {
+ signingName = "sts"
+ }
+ ctx = awsmiddleware.SetSigningName(ctx, signingName)
+ }
+ ctx = awsmiddleware.SetEndpointSource(ctx, endpoint.Source)
+ ctx = smithyhttp.SetHostnameImmutable(ctx, endpoint.HostnameImmutable)
+ ctx = awsmiddleware.SetSigningRegion(ctx, endpoint.SigningRegion)
+ ctx = awsmiddleware.SetPartitionID(ctx, endpoint.PartitionID)
+ return next.HandleSerialize(ctx, in)
+}
+func addResolveEndpointMiddleware(stack *middleware.Stack, o Options) error {
+ return stack.Serialize.Insert(&ResolveEndpoint{
+ Resolver: o.EndpointResolver,
+ Options: o.EndpointOptions,
+ }, "OperationSerializer", middleware.Before)
+}
+
+func removeResolveEndpointMiddleware(stack *middleware.Stack) error {
+ _, err := stack.Serialize.Remove((&ResolveEndpoint{}).ID())
+ return err
+}
+
+type wrappedEndpointResolver struct {
+ awsResolver aws.EndpointResolver
+ resolver EndpointResolver
+}
+
+func (w *wrappedEndpointResolver) ResolveEndpoint(region string, options EndpointResolverOptions) (endpoint aws.Endpoint, err error) {
+ if w.awsResolver == nil {
+ goto fallback
+ }
+ endpoint, err = w.awsResolver.ResolveEndpoint(ServiceID, region)
+ if err == nil {
+ return endpoint, nil
+ }
+
+ if nf := (&aws.EndpointNotFoundError{}); !errors.As(err, &nf) {
+ return endpoint, err
+ }
+
+fallback:
+ if w.resolver == nil {
+ return endpoint, fmt.Errorf("default endpoint resolver provided was nil")
+ }
+ return w.resolver.ResolveEndpoint(region, options)
+}
+
+// withEndpointResolver returns an EndpointResolver that first delegates endpoint
+// resolution to the awsResolver. If awsResolver returns aws.EndpointNotFoundError
+// error, the resolver will use the the provided fallbackResolver for resolution.
+// awsResolver and fallbackResolver must not be nil
+func withEndpointResolver(awsResolver aws.EndpointResolver, fallbackResolver EndpointResolver) EndpointResolver {
+ return &wrappedEndpointResolver{
+ awsResolver: awsResolver,
+ resolver: fallbackResolver,
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/go.mod b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/go.mod
new file mode 100644
index 000000000..57fd395ef
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/go.mod
@@ -0,0 +1,13 @@
+module github.com/aws/aws-sdk-go-v2/service/sts
+
+go 1.15
+
+require (
+ github.com/aws/aws-sdk-go-v2 v1.2.1
+ github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.0.3
+ github.com/aws/smithy-go v1.2.0
+)
+
+replace github.com/aws/aws-sdk-go-v2 => ../../
+
+replace github.com/aws/aws-sdk-go-v2/service/internal/presigned-url => ../../service/internal/presigned-url/
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/go.sum b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/go.sum
new file mode 100644
index 000000000..c3783ae60
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/go.sum
@@ -0,0 +1,13 @@
+github.com/aws/smithy-go v1.2.0 h1:0PoGBWXkXDIyVdPaZW9gMhaGzj3UOAgTdiVoHuuZAFA=
+github.com/aws/smithy-go v1.2.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAmR5n+E=
+github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/google/go-cmp v0.5.4 h1:L8R9j+yAqZuZjsqh/z+F1NCffTKKLShY6zXTItVIZ8M=
+github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
+github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U=
+github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4=
+golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/internal/endpoints/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/internal/endpoints/endpoints.go
new file mode 100644
index 000000000..262f289bc
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/internal/endpoints/endpoints.go
@@ -0,0 +1,169 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package endpoints
+
+import (
+ "github.com/aws/aws-sdk-go-v2/aws"
+ "github.com/aws/aws-sdk-go-v2/internal/endpoints"
+ "regexp"
+)
+
+// Options is the endpoint resolver configuration options
+type Options struct {
+ DisableHTTPS bool
+}
+
+// Resolver STS endpoint resolver
+type Resolver struct {
+ partitions endpoints.Partitions
+}
+
+// ResolveEndpoint resolves the service endpoint for the given region and options
+func (r *Resolver) ResolveEndpoint(region string, options Options) (endpoint aws.Endpoint, err error) {
+ if len(region) == 0 {
+ return endpoint, &aws.MissingRegionError{}
+ }
+
+ opt := endpoints.Options{
+ DisableHTTPS: options.DisableHTTPS,
+ }
+ return r.partitions.ResolveEndpoint(region, opt)
+}
+
+// New returns a new Resolver
+func New() *Resolver {
+ return &Resolver{
+ partitions: defaultPartitions,
+ }
+}
+
+var defaultPartitions = endpoints.Partitions{
+ {
+ ID: "aws",
+ Defaults: endpoints.Endpoint{
+ Hostname: "sts.{region}.amazonaws.com",
+ Protocols: []string{"https"},
+ SignatureVersions: []string{"v4"},
+ },
+ RegionRegex: regexp.MustCompile("^(us|eu|ap|sa|ca|me|af)\\-\\w+\\-\\d+$"),
+ IsRegionalized: true,
+ Endpoints: endpoints.Endpoints{
+ "af-south-1": endpoints.Endpoint{},
+ "ap-east-1": endpoints.Endpoint{},
+ "ap-northeast-1": endpoints.Endpoint{},
+ "ap-northeast-2": endpoints.Endpoint{},
+ "ap-south-1": endpoints.Endpoint{},
+ "ap-southeast-1": endpoints.Endpoint{},
+ "ap-southeast-2": endpoints.Endpoint{},
+ "aws-global": endpoints.Endpoint{
+ Hostname: "sts.amazonaws.com",
+ CredentialScope: endpoints.CredentialScope{
+ Region: "us-east-1",
+ },
+ },
+ "ca-central-1": endpoints.Endpoint{},
+ "eu-central-1": endpoints.Endpoint{},
+ "eu-north-1": endpoints.Endpoint{},
+ "eu-south-1": endpoints.Endpoint{},
+ "eu-west-1": endpoints.Endpoint{},
+ "eu-west-2": endpoints.Endpoint{},
+ "eu-west-3": endpoints.Endpoint{},
+ "me-south-1": endpoints.Endpoint{},
+ "sa-east-1": endpoints.Endpoint{},
+ "us-east-1": endpoints.Endpoint{},
+ "us-east-1-fips": endpoints.Endpoint{
+ Hostname: "sts-fips.us-east-1.amazonaws.com",
+ CredentialScope: endpoints.CredentialScope{
+ Region: "us-east-1",
+ },
+ },
+ "us-east-2": endpoints.Endpoint{},
+ "us-east-2-fips": endpoints.Endpoint{
+ Hostname: "sts-fips.us-east-2.amazonaws.com",
+ CredentialScope: endpoints.CredentialScope{
+ Region: "us-east-2",
+ },
+ },
+ "us-west-1": endpoints.Endpoint{},
+ "us-west-1-fips": endpoints.Endpoint{
+ Hostname: "sts-fips.us-west-1.amazonaws.com",
+ CredentialScope: endpoints.CredentialScope{
+ Region: "us-west-1",
+ },
+ },
+ "us-west-2": endpoints.Endpoint{},
+ "us-west-2-fips": endpoints.Endpoint{
+ Hostname: "sts-fips.us-west-2.amazonaws.com",
+ CredentialScope: endpoints.CredentialScope{
+ Region: "us-west-2",
+ },
+ },
+ },
+ },
+ {
+ ID: "aws-cn",
+ Defaults: endpoints.Endpoint{
+ Hostname: "sts.{region}.amazonaws.com.cn",
+ Protocols: []string{"https"},
+ SignatureVersions: []string{"v4"},
+ },
+ RegionRegex: regexp.MustCompile("^cn\\-\\w+\\-\\d+$"),
+ IsRegionalized: true,
+ Endpoints: endpoints.Endpoints{
+ "cn-north-1": endpoints.Endpoint{},
+ "cn-northwest-1": endpoints.Endpoint{},
+ },
+ },
+ {
+ ID: "aws-iso",
+ Defaults: endpoints.Endpoint{
+ Hostname: "sts.{region}.c2s.ic.gov",
+ Protocols: []string{"https"},
+ SignatureVersions: []string{"v4"},
+ },
+ RegionRegex: regexp.MustCompile("^us\\-iso\\-\\w+\\-\\d+$"),
+ IsRegionalized: true,
+ Endpoints: endpoints.Endpoints{
+ "us-iso-east-1": endpoints.Endpoint{},
+ },
+ },
+ {
+ ID: "aws-iso-b",
+ Defaults: endpoints.Endpoint{
+ Hostname: "sts.{region}.sc2s.sgov.gov",
+ Protocols: []string{"https"},
+ SignatureVersions: []string{"v4"},
+ },
+ RegionRegex: regexp.MustCompile("^us\\-isob\\-\\w+\\-\\d+$"),
+ IsRegionalized: true,
+ Endpoints: endpoints.Endpoints{
+ "us-isob-east-1": endpoints.Endpoint{},
+ },
+ },
+ {
+ ID: "aws-us-gov",
+ Defaults: endpoints.Endpoint{
+ Hostname: "sts.{region}.amazonaws.com",
+ Protocols: []string{"https"},
+ SignatureVersions: []string{"v4"},
+ },
+ RegionRegex: regexp.MustCompile("^us\\-gov\\-\\w+\\-\\d+$"),
+ IsRegionalized: true,
+ Endpoints: endpoints.Endpoints{
+ "us-gov-east-1": endpoints.Endpoint{},
+ "us-gov-east-1-fips": endpoints.Endpoint{
+ Hostname: "sts.us-gov-east-1.amazonaws.com",
+ CredentialScope: endpoints.CredentialScope{
+ Region: "us-gov-east-1",
+ },
+ },
+ "us-gov-west-1": endpoints.Endpoint{},
+ "us-gov-west-1-fips": endpoints.Endpoint{
+ Hostname: "sts.us-gov-west-1.amazonaws.com",
+ CredentialScope: endpoints.CredentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ },
+ },
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/serializers.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/serializers.go
new file mode 100644
index 000000000..b224780f2
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/serializers.go
@@ -0,0 +1,765 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package sts
+
+import (
+ "bytes"
+ "context"
+ "fmt"
+ "github.com/aws/aws-sdk-go-v2/aws/protocol/query"
+ "github.com/aws/aws-sdk-go-v2/service/sts/types"
+ smithy "github.com/aws/smithy-go"
+ "github.com/aws/smithy-go/encoding/httpbinding"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+type awsAwsquery_serializeOpAssumeRole struct {
+}
+
+func (*awsAwsquery_serializeOpAssumeRole) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsAwsquery_serializeOpAssumeRole) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*AssumeRoleInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ request.Request.URL.Path = "/"
+ request.Request.Method = "POST"
+ httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ httpBindingEncoder.SetHeader("Content-Type").String("application/x-www-form-urlencoded")
+
+ bodyWriter := bytes.NewBuffer(nil)
+ bodyEncoder := query.NewEncoder(bodyWriter)
+ body := bodyEncoder.Object()
+ body.Key("Action").String("AssumeRole")
+ body.Key("Version").String("2011-06-15")
+
+ if err := awsAwsquery_serializeOpDocumentAssumeRoleInput(input, bodyEncoder.Value); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ err = bodyEncoder.Encode()
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request, err = request.SetStream(bytes.NewReader(bodyWriter.Bytes())); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ return next.HandleSerialize(ctx, in)
+}
+
+type awsAwsquery_serializeOpAssumeRoleWithSAML struct {
+}
+
+func (*awsAwsquery_serializeOpAssumeRoleWithSAML) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsAwsquery_serializeOpAssumeRoleWithSAML) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*AssumeRoleWithSAMLInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ request.Request.URL.Path = "/"
+ request.Request.Method = "POST"
+ httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ httpBindingEncoder.SetHeader("Content-Type").String("application/x-www-form-urlencoded")
+
+ bodyWriter := bytes.NewBuffer(nil)
+ bodyEncoder := query.NewEncoder(bodyWriter)
+ body := bodyEncoder.Object()
+ body.Key("Action").String("AssumeRoleWithSAML")
+ body.Key("Version").String("2011-06-15")
+
+ if err := awsAwsquery_serializeOpDocumentAssumeRoleWithSAMLInput(input, bodyEncoder.Value); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ err = bodyEncoder.Encode()
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request, err = request.SetStream(bytes.NewReader(bodyWriter.Bytes())); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ return next.HandleSerialize(ctx, in)
+}
+
+type awsAwsquery_serializeOpAssumeRoleWithWebIdentity struct {
+}
+
+func (*awsAwsquery_serializeOpAssumeRoleWithWebIdentity) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsAwsquery_serializeOpAssumeRoleWithWebIdentity) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*AssumeRoleWithWebIdentityInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ request.Request.URL.Path = "/"
+ request.Request.Method = "POST"
+ httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ httpBindingEncoder.SetHeader("Content-Type").String("application/x-www-form-urlencoded")
+
+ bodyWriter := bytes.NewBuffer(nil)
+ bodyEncoder := query.NewEncoder(bodyWriter)
+ body := bodyEncoder.Object()
+ body.Key("Action").String("AssumeRoleWithWebIdentity")
+ body.Key("Version").String("2011-06-15")
+
+ if err := awsAwsquery_serializeOpDocumentAssumeRoleWithWebIdentityInput(input, bodyEncoder.Value); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ err = bodyEncoder.Encode()
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request, err = request.SetStream(bytes.NewReader(bodyWriter.Bytes())); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ return next.HandleSerialize(ctx, in)
+}
+
+type awsAwsquery_serializeOpDecodeAuthorizationMessage struct {
+}
+
+func (*awsAwsquery_serializeOpDecodeAuthorizationMessage) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsAwsquery_serializeOpDecodeAuthorizationMessage) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*DecodeAuthorizationMessageInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ request.Request.URL.Path = "/"
+ request.Request.Method = "POST"
+ httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ httpBindingEncoder.SetHeader("Content-Type").String("application/x-www-form-urlencoded")
+
+ bodyWriter := bytes.NewBuffer(nil)
+ bodyEncoder := query.NewEncoder(bodyWriter)
+ body := bodyEncoder.Object()
+ body.Key("Action").String("DecodeAuthorizationMessage")
+ body.Key("Version").String("2011-06-15")
+
+ if err := awsAwsquery_serializeOpDocumentDecodeAuthorizationMessageInput(input, bodyEncoder.Value); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ err = bodyEncoder.Encode()
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request, err = request.SetStream(bytes.NewReader(bodyWriter.Bytes())); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ return next.HandleSerialize(ctx, in)
+}
+
+type awsAwsquery_serializeOpGetAccessKeyInfo struct {
+}
+
+func (*awsAwsquery_serializeOpGetAccessKeyInfo) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsAwsquery_serializeOpGetAccessKeyInfo) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*GetAccessKeyInfoInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ request.Request.URL.Path = "/"
+ request.Request.Method = "POST"
+ httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ httpBindingEncoder.SetHeader("Content-Type").String("application/x-www-form-urlencoded")
+
+ bodyWriter := bytes.NewBuffer(nil)
+ bodyEncoder := query.NewEncoder(bodyWriter)
+ body := bodyEncoder.Object()
+ body.Key("Action").String("GetAccessKeyInfo")
+ body.Key("Version").String("2011-06-15")
+
+ if err := awsAwsquery_serializeOpDocumentGetAccessKeyInfoInput(input, bodyEncoder.Value); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ err = bodyEncoder.Encode()
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request, err = request.SetStream(bytes.NewReader(bodyWriter.Bytes())); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ return next.HandleSerialize(ctx, in)
+}
+
+type awsAwsquery_serializeOpGetCallerIdentity struct {
+}
+
+func (*awsAwsquery_serializeOpGetCallerIdentity) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsAwsquery_serializeOpGetCallerIdentity) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*GetCallerIdentityInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ request.Request.URL.Path = "/"
+ request.Request.Method = "POST"
+ httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ httpBindingEncoder.SetHeader("Content-Type").String("application/x-www-form-urlencoded")
+
+ bodyWriter := bytes.NewBuffer(nil)
+ bodyEncoder := query.NewEncoder(bodyWriter)
+ body := bodyEncoder.Object()
+ body.Key("Action").String("GetCallerIdentity")
+ body.Key("Version").String("2011-06-15")
+
+ err = bodyEncoder.Encode()
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request, err = request.SetStream(bytes.NewReader(bodyWriter.Bytes())); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ return next.HandleSerialize(ctx, in)
+}
+
+type awsAwsquery_serializeOpGetFederationToken struct {
+}
+
+func (*awsAwsquery_serializeOpGetFederationToken) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsAwsquery_serializeOpGetFederationToken) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*GetFederationTokenInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ request.Request.URL.Path = "/"
+ request.Request.Method = "POST"
+ httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ httpBindingEncoder.SetHeader("Content-Type").String("application/x-www-form-urlencoded")
+
+ bodyWriter := bytes.NewBuffer(nil)
+ bodyEncoder := query.NewEncoder(bodyWriter)
+ body := bodyEncoder.Object()
+ body.Key("Action").String("GetFederationToken")
+ body.Key("Version").String("2011-06-15")
+
+ if err := awsAwsquery_serializeOpDocumentGetFederationTokenInput(input, bodyEncoder.Value); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ err = bodyEncoder.Encode()
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request, err = request.SetStream(bytes.NewReader(bodyWriter.Bytes())); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ return next.HandleSerialize(ctx, in)
+}
+
+type awsAwsquery_serializeOpGetSessionToken struct {
+}
+
+func (*awsAwsquery_serializeOpGetSessionToken) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsAwsquery_serializeOpGetSessionToken) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*GetSessionTokenInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ request.Request.URL.Path = "/"
+ request.Request.Method = "POST"
+ httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ httpBindingEncoder.SetHeader("Content-Type").String("application/x-www-form-urlencoded")
+
+ bodyWriter := bytes.NewBuffer(nil)
+ bodyEncoder := query.NewEncoder(bodyWriter)
+ body := bodyEncoder.Object()
+ body.Key("Action").String("GetSessionToken")
+ body.Key("Version").String("2011-06-15")
+
+ if err := awsAwsquery_serializeOpDocumentGetSessionTokenInput(input, bodyEncoder.Value); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ err = bodyEncoder.Encode()
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request, err = request.SetStream(bytes.NewReader(bodyWriter.Bytes())); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ return next.HandleSerialize(ctx, in)
+}
+func awsAwsquery_serializeDocumentPolicyDescriptorListType(v []types.PolicyDescriptorType, value query.Value) error {
+ if len(v) == 0 {
+ return nil
+ }
+ array := value.Array("member")
+
+ for i := range v {
+ av := array.Value()
+ if err := awsAwsquery_serializeDocumentPolicyDescriptorType(&v[i], av); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func awsAwsquery_serializeDocumentPolicyDescriptorType(v *types.PolicyDescriptorType, value query.Value) error {
+ object := value.Object()
+ _ = object
+
+ if v.Arn != nil {
+ objectKey := object.Key("arn")
+ objectKey.String(*v.Arn)
+ }
+
+ return nil
+}
+
+func awsAwsquery_serializeDocumentTag(v *types.Tag, value query.Value) error {
+ object := value.Object()
+ _ = object
+
+ if v.Key != nil {
+ objectKey := object.Key("Key")
+ objectKey.String(*v.Key)
+ }
+
+ if v.Value != nil {
+ objectKey := object.Key("Value")
+ objectKey.String(*v.Value)
+ }
+
+ return nil
+}
+
+func awsAwsquery_serializeDocumentTagKeyListType(v []string, value query.Value) error {
+ if len(v) == 0 {
+ return nil
+ }
+ array := value.Array("member")
+
+ for i := range v {
+ av := array.Value()
+ av.String(v[i])
+ }
+ return nil
+}
+
+func awsAwsquery_serializeDocumentTagListType(v []types.Tag, value query.Value) error {
+ if len(v) == 0 {
+ return nil
+ }
+ array := value.Array("member")
+
+ for i := range v {
+ av := array.Value()
+ if err := awsAwsquery_serializeDocumentTag(&v[i], av); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func awsAwsquery_serializeOpDocumentAssumeRoleInput(v *AssumeRoleInput, value query.Value) error {
+ object := value.Object()
+ _ = object
+
+ if v.DurationSeconds != nil {
+ objectKey := object.Key("DurationSeconds")
+ objectKey.Integer(*v.DurationSeconds)
+ }
+
+ if v.ExternalId != nil {
+ objectKey := object.Key("ExternalId")
+ objectKey.String(*v.ExternalId)
+ }
+
+ if v.Policy != nil {
+ objectKey := object.Key("Policy")
+ objectKey.String(*v.Policy)
+ }
+
+ if v.PolicyArns != nil {
+ objectKey := object.Key("PolicyArns")
+ if err := awsAwsquery_serializeDocumentPolicyDescriptorListType(v.PolicyArns, objectKey); err != nil {
+ return err
+ }
+ }
+
+ if v.RoleArn != nil {
+ objectKey := object.Key("RoleArn")
+ objectKey.String(*v.RoleArn)
+ }
+
+ if v.RoleSessionName != nil {
+ objectKey := object.Key("RoleSessionName")
+ objectKey.String(*v.RoleSessionName)
+ }
+
+ if v.SerialNumber != nil {
+ objectKey := object.Key("SerialNumber")
+ objectKey.String(*v.SerialNumber)
+ }
+
+ if v.Tags != nil {
+ objectKey := object.Key("Tags")
+ if err := awsAwsquery_serializeDocumentTagListType(v.Tags, objectKey); err != nil {
+ return err
+ }
+ }
+
+ if v.TokenCode != nil {
+ objectKey := object.Key("TokenCode")
+ objectKey.String(*v.TokenCode)
+ }
+
+ if v.TransitiveTagKeys != nil {
+ objectKey := object.Key("TransitiveTagKeys")
+ if err := awsAwsquery_serializeDocumentTagKeyListType(v.TransitiveTagKeys, objectKey); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func awsAwsquery_serializeOpDocumentAssumeRoleWithSAMLInput(v *AssumeRoleWithSAMLInput, value query.Value) error {
+ object := value.Object()
+ _ = object
+
+ if v.DurationSeconds != nil {
+ objectKey := object.Key("DurationSeconds")
+ objectKey.Integer(*v.DurationSeconds)
+ }
+
+ if v.Policy != nil {
+ objectKey := object.Key("Policy")
+ objectKey.String(*v.Policy)
+ }
+
+ if v.PolicyArns != nil {
+ objectKey := object.Key("PolicyArns")
+ if err := awsAwsquery_serializeDocumentPolicyDescriptorListType(v.PolicyArns, objectKey); err != nil {
+ return err
+ }
+ }
+
+ if v.PrincipalArn != nil {
+ objectKey := object.Key("PrincipalArn")
+ objectKey.String(*v.PrincipalArn)
+ }
+
+ if v.RoleArn != nil {
+ objectKey := object.Key("RoleArn")
+ objectKey.String(*v.RoleArn)
+ }
+
+ if v.SAMLAssertion != nil {
+ objectKey := object.Key("SAMLAssertion")
+ objectKey.String(*v.SAMLAssertion)
+ }
+
+ return nil
+}
+
+func awsAwsquery_serializeOpDocumentAssumeRoleWithWebIdentityInput(v *AssumeRoleWithWebIdentityInput, value query.Value) error {
+ object := value.Object()
+ _ = object
+
+ if v.DurationSeconds != nil {
+ objectKey := object.Key("DurationSeconds")
+ objectKey.Integer(*v.DurationSeconds)
+ }
+
+ if v.Policy != nil {
+ objectKey := object.Key("Policy")
+ objectKey.String(*v.Policy)
+ }
+
+ if v.PolicyArns != nil {
+ objectKey := object.Key("PolicyArns")
+ if err := awsAwsquery_serializeDocumentPolicyDescriptorListType(v.PolicyArns, objectKey); err != nil {
+ return err
+ }
+ }
+
+ if v.ProviderId != nil {
+ objectKey := object.Key("ProviderId")
+ objectKey.String(*v.ProviderId)
+ }
+
+ if v.RoleArn != nil {
+ objectKey := object.Key("RoleArn")
+ objectKey.String(*v.RoleArn)
+ }
+
+ if v.RoleSessionName != nil {
+ objectKey := object.Key("RoleSessionName")
+ objectKey.String(*v.RoleSessionName)
+ }
+
+ if v.WebIdentityToken != nil {
+ objectKey := object.Key("WebIdentityToken")
+ objectKey.String(*v.WebIdentityToken)
+ }
+
+ return nil
+}
+
+func awsAwsquery_serializeOpDocumentDecodeAuthorizationMessageInput(v *DecodeAuthorizationMessageInput, value query.Value) error {
+ object := value.Object()
+ _ = object
+
+ if v.EncodedMessage != nil {
+ objectKey := object.Key("EncodedMessage")
+ objectKey.String(*v.EncodedMessage)
+ }
+
+ return nil
+}
+
+func awsAwsquery_serializeOpDocumentGetAccessKeyInfoInput(v *GetAccessKeyInfoInput, value query.Value) error {
+ object := value.Object()
+ _ = object
+
+ if v.AccessKeyId != nil {
+ objectKey := object.Key("AccessKeyId")
+ objectKey.String(*v.AccessKeyId)
+ }
+
+ return nil
+}
+
+func awsAwsquery_serializeOpDocumentGetCallerIdentityInput(v *GetCallerIdentityInput, value query.Value) error {
+ object := value.Object()
+ _ = object
+
+ return nil
+}
+
+func awsAwsquery_serializeOpDocumentGetFederationTokenInput(v *GetFederationTokenInput, value query.Value) error {
+ object := value.Object()
+ _ = object
+
+ if v.DurationSeconds != nil {
+ objectKey := object.Key("DurationSeconds")
+ objectKey.Integer(*v.DurationSeconds)
+ }
+
+ if v.Name != nil {
+ objectKey := object.Key("Name")
+ objectKey.String(*v.Name)
+ }
+
+ if v.Policy != nil {
+ objectKey := object.Key("Policy")
+ objectKey.String(*v.Policy)
+ }
+
+ if v.PolicyArns != nil {
+ objectKey := object.Key("PolicyArns")
+ if err := awsAwsquery_serializeDocumentPolicyDescriptorListType(v.PolicyArns, objectKey); err != nil {
+ return err
+ }
+ }
+
+ if v.Tags != nil {
+ objectKey := object.Key("Tags")
+ if err := awsAwsquery_serializeDocumentTagListType(v.Tags, objectKey); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func awsAwsquery_serializeOpDocumentGetSessionTokenInput(v *GetSessionTokenInput, value query.Value) error {
+ object := value.Object()
+ _ = object
+
+ if v.DurationSeconds != nil {
+ objectKey := object.Key("DurationSeconds")
+ objectKey.Integer(*v.DurationSeconds)
+ }
+
+ if v.SerialNumber != nil {
+ objectKey := object.Key("SerialNumber")
+ objectKey.String(*v.SerialNumber)
+ }
+
+ if v.TokenCode != nil {
+ objectKey := object.Key("TokenCode")
+ objectKey.String(*v.TokenCode)
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/types/errors.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/types/errors.go
new file mode 100644
index 000000000..3734fa4ac
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/types/errors.go
@@ -0,0 +1,178 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package types
+
+import (
+ "fmt"
+ smithy "github.com/aws/smithy-go"
+)
+
+// The web identity token that was passed is expired or is not valid. Get a new
+// identity token from the identity provider and then retry the request.
+type ExpiredTokenException struct {
+ Message *string
+}
+
+func (e *ExpiredTokenException) Error() string {
+ return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage())
+}
+func (e *ExpiredTokenException) ErrorMessage() string {
+ if e.Message == nil {
+ return ""
+ }
+ return *e.Message
+}
+func (e *ExpiredTokenException) ErrorCode() string { return "ExpiredTokenException" }
+func (e *ExpiredTokenException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient }
+
+// The request could not be fulfilled because the identity provider (IDP) that was
+// asked to verify the incoming identity token could not be reached. This is often
+// a transient error caused by network conditions. Retry the request a limited
+// number of times so that you don't exceed the request rate. If the error
+// persists, the identity provider might be down or not responding.
+type IDPCommunicationErrorException struct {
+ Message *string
+}
+
+func (e *IDPCommunicationErrorException) Error() string {
+ return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage())
+}
+func (e *IDPCommunicationErrorException) ErrorMessage() string {
+ if e.Message == nil {
+ return ""
+ }
+ return *e.Message
+}
+func (e *IDPCommunicationErrorException) ErrorCode() string { return "IDPCommunicationErrorException" }
+func (e *IDPCommunicationErrorException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient }
+
+// The identity provider (IdP) reported that authentication failed. This might be
+// because the claim is invalid. If this error is returned for the
+// AssumeRoleWithWebIdentity operation, it can also mean that the claim has expired
+// or has been explicitly revoked.
+type IDPRejectedClaimException struct {
+ Message *string
+}
+
+func (e *IDPRejectedClaimException) Error() string {
+ return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage())
+}
+func (e *IDPRejectedClaimException) ErrorMessage() string {
+ if e.Message == nil {
+ return ""
+ }
+ return *e.Message
+}
+func (e *IDPRejectedClaimException) ErrorCode() string { return "IDPRejectedClaimException" }
+func (e *IDPRejectedClaimException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient }
+
+// The error returned if the message passed to DecodeAuthorizationMessage was
+// invalid. This can happen if the token contains invalid characters, such as
+// linebreaks.
+type InvalidAuthorizationMessageException struct {
+ Message *string
+}
+
+func (e *InvalidAuthorizationMessageException) Error() string {
+ return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage())
+}
+func (e *InvalidAuthorizationMessageException) ErrorMessage() string {
+ if e.Message == nil {
+ return ""
+ }
+ return *e.Message
+}
+func (e *InvalidAuthorizationMessageException) ErrorCode() string {
+ return "InvalidAuthorizationMessageException"
+}
+func (e *InvalidAuthorizationMessageException) ErrorFault() smithy.ErrorFault {
+ return smithy.FaultClient
+}
+
+// The web identity token that was passed could not be validated by AWS. Get a new
+// identity token from the identity provider and then retry the request.
+type InvalidIdentityTokenException struct {
+ Message *string
+}
+
+func (e *InvalidIdentityTokenException) Error() string {
+ return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage())
+}
+func (e *InvalidIdentityTokenException) ErrorMessage() string {
+ if e.Message == nil {
+ return ""
+ }
+ return *e.Message
+}
+func (e *InvalidIdentityTokenException) ErrorCode() string { return "InvalidIdentityTokenException" }
+func (e *InvalidIdentityTokenException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient }
+
+// The request was rejected because the policy document was malformed. The error
+// message describes the specific error.
+type MalformedPolicyDocumentException struct {
+ Message *string
+}
+
+func (e *MalformedPolicyDocumentException) Error() string {
+ return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage())
+}
+func (e *MalformedPolicyDocumentException) ErrorMessage() string {
+ if e.Message == nil {
+ return ""
+ }
+ return *e.Message
+}
+func (e *MalformedPolicyDocumentException) ErrorCode() string {
+ return "MalformedPolicyDocumentException"
+}
+func (e *MalformedPolicyDocumentException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient }
+
+// The request was rejected because the total packed size of the session policies
+// and session tags combined was too large. An AWS conversion compresses the
+// session policy document, session policy ARNs, and session tags into a packed
+// binary format that has a separate limit. The error message indicates by
+// percentage how close the policies and tags are to the upper size limit. For more
+// information, see Passing Session Tags in STS
+// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) in the
+// IAM User Guide. You could receive this error even though you meet other defined
+// session policy and session tag limits. For more information, see IAM and STS
+// Entity Character Limits
+// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html)
+// in the IAM User Guide.
+type PackedPolicyTooLargeException struct {
+ Message *string
+}
+
+func (e *PackedPolicyTooLargeException) Error() string {
+ return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage())
+}
+func (e *PackedPolicyTooLargeException) ErrorMessage() string {
+ if e.Message == nil {
+ return ""
+ }
+ return *e.Message
+}
+func (e *PackedPolicyTooLargeException) ErrorCode() string { return "PackedPolicyTooLargeException" }
+func (e *PackedPolicyTooLargeException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient }
+
+// STS is not activated in the requested region for the account that is being asked
+// to generate credentials. The account administrator must use the IAM console to
+// activate STS in that region. For more information, see Activating and
+// Deactivating AWS STS in an AWS Region
+// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html)
+// in the IAM User Guide.
+type RegionDisabledException struct {
+ Message *string
+}
+
+func (e *RegionDisabledException) Error() string {
+ return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage())
+}
+func (e *RegionDisabledException) ErrorMessage() string {
+ if e.Message == nil {
+ return ""
+ }
+ return *e.Message
+}
+func (e *RegionDisabledException) ErrorCode() string { return "RegionDisabledException" }
+func (e *RegionDisabledException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient }
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/types/types.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/types/types.go
new file mode 100644
index 000000000..b4dc167f5
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/types/types.go
@@ -0,0 +1,110 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package types
+
+import (
+ "time"
+)
+
+// The identifiers for the temporary security credentials that the operation
+// returns.
+type AssumedRoleUser struct {
+
+ // The ARN of the temporary security credentials that are returned from the
+ // AssumeRole action. For more information about ARNs and how to use them in
+ // policies, see IAM Identifiers
+ // (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html) in
+ // the IAM User Guide.
+ //
+ // This member is required.
+ Arn *string
+
+ // A unique identifier that contains the role ID and the role session name of the
+ // role that is being assumed. The role ID is generated by AWS when the role is
+ // created.
+ //
+ // This member is required.
+ AssumedRoleId *string
+}
+
+// AWS credentials for API authentication.
+type Credentials struct {
+
+ // The access key ID that identifies the temporary security credentials.
+ //
+ // This member is required.
+ AccessKeyId *string
+
+ // The date on which the current credentials expire.
+ //
+ // This member is required.
+ Expiration *time.Time
+
+ // The secret access key that can be used to sign requests.
+ //
+ // This member is required.
+ SecretAccessKey *string
+
+ // The token that users must pass to the service API to use the temporary
+ // credentials.
+ //
+ // This member is required.
+ SessionToken *string
+}
+
+// Identifiers for the federated user that is associated with the credentials.
+type FederatedUser struct {
+
+ // The ARN that specifies the federated user that is associated with the
+ // credentials. For more information about ARNs and how to use them in policies,
+ // see IAM Identifiers
+ // (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html) in
+ // the IAM User Guide.
+ //
+ // This member is required.
+ Arn *string
+
+ // The string that identifies the federated user associated with the credentials,
+ // similar to the unique ID of an IAM user.
+ //
+ // This member is required.
+ FederatedUserId *string
+}
+
+// A reference to the IAM managed policy that is passed as a session policy for a
+// role session or a federated user session.
+type PolicyDescriptorType struct {
+
+ // The Amazon Resource Name (ARN) of the IAM managed policy to use as a session
+ // policy for the role. For more information about ARNs, see Amazon Resource Names
+ // (ARNs) and AWS Service Namespaces
+ // (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) in
+ // the AWS General Reference.
+ Arn *string
+}
+
+// You can pass custom key-value pair attributes when you assume a role or federate
+// a user. These are called session tags. You can then use the session tags to
+// control access to resources. For more information, see Tagging AWS STS Sessions
+// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) in the
+// IAM User Guide.
+type Tag struct {
+
+ // The key for a session tag. You can pass up to 50 session tags. The plain text
+ // session tag keys can’t exceed 128 characters. For these and additional limits,
+ // see IAM and STS Character Limits
+ // (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length)
+ // in the IAM User Guide.
+ //
+ // This member is required.
+ Key *string
+
+ // The value for a session tag. You can pass up to 50 session tags. The plain text
+ // session tag values can’t exceed 256 characters. For these and additional limits,
+ // see IAM and STS Character Limits
+ // (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length)
+ // in the IAM User Guide.
+ //
+ // This member is required.
+ Value *string
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/validators.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/validators.go
new file mode 100644
index 000000000..3e4bad2a9
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/validators.go
@@ -0,0 +1,305 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package sts
+
+import (
+ "context"
+ "fmt"
+ "github.com/aws/aws-sdk-go-v2/service/sts/types"
+ smithy "github.com/aws/smithy-go"
+ "github.com/aws/smithy-go/middleware"
+)
+
+type validateOpAssumeRole struct {
+}
+
+func (*validateOpAssumeRole) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpAssumeRole) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*AssumeRoleInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpAssumeRoleInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpAssumeRoleWithSAML struct {
+}
+
+func (*validateOpAssumeRoleWithSAML) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpAssumeRoleWithSAML) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*AssumeRoleWithSAMLInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpAssumeRoleWithSAMLInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpAssumeRoleWithWebIdentity struct {
+}
+
+func (*validateOpAssumeRoleWithWebIdentity) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpAssumeRoleWithWebIdentity) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*AssumeRoleWithWebIdentityInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpAssumeRoleWithWebIdentityInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpDecodeAuthorizationMessage struct {
+}
+
+func (*validateOpDecodeAuthorizationMessage) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpDecodeAuthorizationMessage) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*DecodeAuthorizationMessageInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpDecodeAuthorizationMessageInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpGetAccessKeyInfo struct {
+}
+
+func (*validateOpGetAccessKeyInfo) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpGetAccessKeyInfo) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*GetAccessKeyInfoInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpGetAccessKeyInfoInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpGetFederationToken struct {
+}
+
+func (*validateOpGetFederationToken) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpGetFederationToken) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*GetFederationTokenInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpGetFederationTokenInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+func addOpAssumeRoleValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpAssumeRole{}, middleware.After)
+}
+
+func addOpAssumeRoleWithSAMLValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpAssumeRoleWithSAML{}, middleware.After)
+}
+
+func addOpAssumeRoleWithWebIdentityValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpAssumeRoleWithWebIdentity{}, middleware.After)
+}
+
+func addOpDecodeAuthorizationMessageValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpDecodeAuthorizationMessage{}, middleware.After)
+}
+
+func addOpGetAccessKeyInfoValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpGetAccessKeyInfo{}, middleware.After)
+}
+
+func addOpGetFederationTokenValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpGetFederationToken{}, middleware.After)
+}
+
+func validateTag(v *types.Tag) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "Tag"}
+ if v.Key == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Key"))
+ }
+ if v.Value == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Value"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateTagListType(v []types.Tag) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "TagListType"}
+ for i := range v {
+ if err := validateTag(&v[i]); err != nil {
+ invalidParams.AddNested(fmt.Sprintf("[%d]", i), err.(smithy.InvalidParamsError))
+ }
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpAssumeRoleInput(v *AssumeRoleInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "AssumeRoleInput"}
+ if v.RoleArn == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("RoleArn"))
+ }
+ if v.RoleSessionName == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("RoleSessionName"))
+ }
+ if v.Tags != nil {
+ if err := validateTagListType(v.Tags); err != nil {
+ invalidParams.AddNested("Tags", err.(smithy.InvalidParamsError))
+ }
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpAssumeRoleWithSAMLInput(v *AssumeRoleWithSAMLInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "AssumeRoleWithSAMLInput"}
+ if v.RoleArn == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("RoleArn"))
+ }
+ if v.PrincipalArn == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("PrincipalArn"))
+ }
+ if v.SAMLAssertion == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("SAMLAssertion"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpAssumeRoleWithWebIdentityInput(v *AssumeRoleWithWebIdentityInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "AssumeRoleWithWebIdentityInput"}
+ if v.RoleArn == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("RoleArn"))
+ }
+ if v.RoleSessionName == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("RoleSessionName"))
+ }
+ if v.WebIdentityToken == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("WebIdentityToken"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpDecodeAuthorizationMessageInput(v *DecodeAuthorizationMessageInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "DecodeAuthorizationMessageInput"}
+ if v.EncodedMessage == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("EncodedMessage"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpGetAccessKeyInfoInput(v *GetAccessKeyInfoInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "GetAccessKeyInfoInput"}
+ if v.AccessKeyId == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("AccessKeyId"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpGetFederationTokenInput(v *GetFederationTokenInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "GetFederationTokenInput"}
+ if v.Name == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Name"))
+ }
+ if v.Tags != nil {
+ if err := validateTagListType(v.Tags); err != nil {
+ invalidParams.AddNested("Tags", err.(smithy.InvalidParamsError))
+ }
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
diff --git a/vendor/github.com/aws/smithy-go/.gitignore b/vendor/github.com/aws/smithy-go/.gitignore
new file mode 100644
index 000000000..c01141aa4
--- /dev/null
+++ b/vendor/github.com/aws/smithy-go/.gitignore
@@ -0,0 +1,22 @@
+# Eclipse
+.classpath
+.project
+.settings/
+
+# Intellij
+.idea/
+*.iml
+*.iws
+
+# Mac
+.DS_Store
+
+# Maven
+target/
+**/dependency-reduced-pom.xml
+
+# Gradle
+/.gradle
+build/
+*/out/
+*/*/out/
diff --git a/vendor/github.com/aws/smithy-go/.travis.yml b/vendor/github.com/aws/smithy-go/.travis.yml
new file mode 100644
index 000000000..b02e9903d
--- /dev/null
+++ b/vendor/github.com/aws/smithy-go/.travis.yml
@@ -0,0 +1,37 @@
+language: java
+sudo: true
+dist: bionic
+
+install:
+ - eval "$(curl -sL https://raw.githubusercontent.com/travis-ci/gimme/master/gimme | GIMME_GO_VERSION=1.16.x bash)"
+ - echo `go env`
+ - echo `which go`
+
+script: cd codegen && ./gradlew clean build -Plog-tests
+
+matrix:
+ include:
+ - language: java
+ go: 1.16.x
+ jdk: openjdk8
+
+ - language: java
+ go: 1.16.x
+ jdk: openjdk11
+
+ - language: go
+ go: 1.15.x
+ script: go test -v ./...
+
+ - language: go
+ go: 1.16.x
+ script: go test -v ./...
+
+ allow_failures:
+ - language: go
+ go: tip
+ script: go test -v ./...
+
+branches:
+ only:
+ - main
diff --git a/vendor/github.com/aws/smithy-go/CHANGELOG.md b/vendor/github.com/aws/smithy-go/CHANGELOG.md
new file mode 100644
index 000000000..8a35893d1
--- /dev/null
+++ b/vendor/github.com/aws/smithy-go/CHANGELOG.md
@@ -0,0 +1,12 @@
+# Release 2021-03-12 v1.2.0
+
+### Smithy Go module
+* Fix support for parsing shortened year format in HTTP Date header.
+* Fix GitHub APIDiff action workflow to get gorelease tool correctly.
+* Fix codegen artifact unit test for Go 1.16
+
+### Codegen
+* Fix generating paginator nil parameter handling before usage.
+* Fix Serialize unboxed members decorated as required.
+* Add ability to define resolvers at both client construction and operation invocation.
+* Support for extending paginators with custom runtime trait
diff --git a/vendor/github.com/aws/smithy-go/CODE_OF_CONDUCT.md b/vendor/github.com/aws/smithy-go/CODE_OF_CONDUCT.md
new file mode 100644
index 000000000..5b627cfa6
--- /dev/null
+++ b/vendor/github.com/aws/smithy-go/CODE_OF_CONDUCT.md
@@ -0,0 +1,4 @@
+## Code of Conduct
+This project has adopted the [Amazon Open Source Code of Conduct](https://aws.github.io/code-of-conduct).
+For more information see the [Code of Conduct FAQ](https://aws.github.io/code-of-conduct-faq) or contact
+opensource-codeofconduct@amazon.com with any additional questions or comments.
diff --git a/vendor/github.com/aws/smithy-go/CONTRIBUTING.md b/vendor/github.com/aws/smithy-go/CONTRIBUTING.md
new file mode 100644
index 000000000..c4b6a1c50
--- /dev/null
+++ b/vendor/github.com/aws/smithy-go/CONTRIBUTING.md
@@ -0,0 +1,59 @@
+# Contributing Guidelines
+
+Thank you for your interest in contributing to our project. Whether it's a bug report, new feature, correction, or additional
+documentation, we greatly value feedback and contributions from our community.
+
+Please read through this document before submitting any issues or pull requests to ensure we have all the necessary
+information to effectively respond to your bug report or contribution.
+
+
+## Reporting Bugs/Feature Requests
+
+We welcome you to use the GitHub issue tracker to report bugs or suggest features.
+
+When filing an issue, please check existing open, or recently closed, issues to make sure somebody else hasn't already
+reported the issue. Please try to include as much information as you can. Details like these are incredibly useful:
+
+* A reproducible test case or series of steps
+* The version of our code being used
+* Any modifications you've made relevant to the bug
+* Anything unusual about your environment or deployment
+
+
+## Contributing via Pull Requests
+Contributions via pull requests are much appreciated. Before sending us a pull request, please ensure that:
+
+1. You are working against the latest source on the *main* branch.
+2. You check existing open, and recently merged, pull requests to make sure someone else hasn't addressed the problem already.
+3. You open an issue to discuss any significant work - we would hate for your time to be wasted.
+
+To send us a pull request, please:
+
+1. Fork the repository.
+2. Modify the source; please focus on the specific change you are contributing. If you also reformat all the code, it will be hard for us to focus on your change.
+3. Ensure local tests pass.
+4. Commit to your fork using clear commit messages.
+5. Send us a pull request, answering any default questions in the pull request interface.
+6. Pay attention to any automated CI failures reported in the pull request, and stay involved in the conversation.
+
+GitHub provides additional document on [forking a repository](https://help.github.com/articles/fork-a-repo/) and
+[creating a pull request](https://help.github.com/articles/creating-a-pull-request/).
+
+
+## Finding contributions to work on
+Looking at the existing issues is a great way to find something to contribute on. As our projects, by default, use the default GitHub issue labels (enhancement/bug/duplicate/help wanted/invalid/question/wontfix), looking at any 'help wanted' issues is a great place to start.
+
+
+## Code of Conduct
+This project has adopted the [Amazon Open Source Code of Conduct](https://aws.github.io/code-of-conduct).
+For more information see the [Code of Conduct FAQ](https://aws.github.io/code-of-conduct-faq) or contact
+opensource-codeofconduct@amazon.com with any additional questions or comments.
+
+
+## Security issue notifications
+If you discover a potential security issue in this project we ask that you notify AWS/Amazon Security via our [vulnerability reporting page](http://aws.amazon.com/security/vulnerability-reporting/). Please do **not** create a public github issue.
+
+
+## Licensing
+
+See the [LICENSE](LICENSE) file for our project's licensing. We will ask you to confirm the licensing of your contribution.
diff --git a/vendor/github.com/aws/smithy-go/LICENSE b/vendor/github.com/aws/smithy-go/LICENSE
new file mode 100644
index 000000000..67db85882
--- /dev/null
+++ b/vendor/github.com/aws/smithy-go/LICENSE
@@ -0,0 +1,175 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
diff --git a/vendor/github.com/aws/smithy-go/Makefile b/vendor/github.com/aws/smithy-go/Makefile
new file mode 100644
index 000000000..89dd6fbb0
--- /dev/null
+++ b/vendor/github.com/aws/smithy-go/Makefile
@@ -0,0 +1,9 @@
+
+smithy-publish-local:
+ cd codegen && ./gradlew publishToMavenLocal
+
+smithy-build:
+ cd codegen && ./gradlew build
+
+smithy-clean:
+ cd codegen && ./gradlew clean
diff --git a/vendor/github.com/aws/smithy-go/NOTICE b/vendor/github.com/aws/smithy-go/NOTICE
new file mode 100644
index 000000000..616fc5889
--- /dev/null
+++ b/vendor/github.com/aws/smithy-go/NOTICE
@@ -0,0 +1 @@
+Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
diff --git a/vendor/github.com/aws/smithy-go/README.md b/vendor/github.com/aws/smithy-go/README.md
new file mode 100644
index 000000000..80c458dbd
--- /dev/null
+++ b/vendor/github.com/aws/smithy-go/README.md
@@ -0,0 +1,10 @@
+## Smithy Go
+
+Smithy code generators for Go.
+
+**WARNING: All interfaces are subject to change.**
+
+## License
+
+This project is licensed under the Apache-2.0 License.
+
diff --git a/vendor/github.com/aws/smithy-go/doc.go b/vendor/github.com/aws/smithy-go/doc.go
new file mode 100644
index 000000000..87b0c74b7
--- /dev/null
+++ b/vendor/github.com/aws/smithy-go/doc.go
@@ -0,0 +1,2 @@
+// Package smithy provides the core components for a Smithy SDK.
+package smithy
diff --git a/vendor/github.com/aws/smithy-go/document.go b/vendor/github.com/aws/smithy-go/document.go
new file mode 100644
index 000000000..bd9318805
--- /dev/null
+++ b/vendor/github.com/aws/smithy-go/document.go
@@ -0,0 +1,8 @@
+package smithy
+
+// Document provides access to loosely structured data in a document-like
+// format.
+type Document interface {
+ UnmarshalDocument(interface{}) error
+ GetValue() (interface{}, error)
+}
diff --git a/vendor/github.com/aws/smithy-go/encoding/doc.go b/vendor/github.com/aws/smithy-go/encoding/doc.go
new file mode 100644
index 000000000..792fdfa08
--- /dev/null
+++ b/vendor/github.com/aws/smithy-go/encoding/doc.go
@@ -0,0 +1,4 @@
+// Package encoding provides utilities for encoding values for specific
+// document encodings.
+
+package encoding
diff --git a/vendor/github.com/aws/smithy-go/encoding/encoding.go b/vendor/github.com/aws/smithy-go/encoding/encoding.go
new file mode 100644
index 000000000..2fdfb5225
--- /dev/null
+++ b/vendor/github.com/aws/smithy-go/encoding/encoding.go
@@ -0,0 +1,40 @@
+package encoding
+
+import (
+ "fmt"
+ "math"
+ "strconv"
+)
+
+// EncodeFloat encodes a float value as per the stdlib encoder for json and xml protocol
+// This encodes a float value into dst while attempting to conform to ES6 ToString for Numbers
+//
+// Based on encoding/json floatEncoder from the Go Standard Library
+// https://golang.org/src/encoding/json/encode.go
+func EncodeFloat(dst []byte, v float64, bits int) []byte {
+ if math.IsInf(v, 0) || math.IsNaN(v) {
+ panic(fmt.Sprintf("invalid float value: %s", strconv.FormatFloat(v, 'g', -1, bits)))
+ }
+
+ abs := math.Abs(v)
+ fmt := byte('f')
+
+ if abs != 0 {
+ if bits == 64 && (abs < 1e-6 || abs >= 1e21) || bits == 32 && (float32(abs) < 1e-6 || float32(abs) >= 1e21) {
+ fmt = 'e'
+ }
+ }
+
+ dst = strconv.AppendFloat(dst, v, fmt, -1, bits)
+
+ if fmt == 'e' {
+ // clean up e-09 to e-9
+ n := len(dst)
+ if n >= 4 && dst[n-4] == 'e' && dst[n-3] == '-' && dst[n-2] == '0' {
+ dst[n-2] = dst[n-1]
+ dst = dst[:n-1]
+ }
+ }
+
+ return dst
+}
diff --git a/vendor/github.com/aws/smithy-go/encoding/httpbinding/encode.go b/vendor/github.com/aws/smithy-go/encoding/httpbinding/encode.go
new file mode 100644
index 000000000..70dc2e891
--- /dev/null
+++ b/vendor/github.com/aws/smithy-go/encoding/httpbinding/encode.go
@@ -0,0 +1,105 @@
+package httpbinding
+
+import (
+ "fmt"
+ "net/http"
+ "net/url"
+ "strconv"
+ "strings"
+)
+
+const contentLengthHeader = "Content-Length"
+
+// An Encoder provides encoding of REST URI path, query, and header components
+// of an HTTP request. Can also encode a stream as the payload.
+//
+// Does not support SetFields.
+type Encoder struct {
+ path, rawPath, pathBuffer []byte
+
+ query url.Values
+ header http.Header
+}
+
+// NewEncoder creates a new encoder from the passed in request. All query and
+// header values will be added on top of the request's existing values. Overwriting
+// duplicate values.
+func NewEncoder(path, query string, headers http.Header) (*Encoder, error) {
+ parseQuery, err := url.ParseQuery(query)
+ if err != nil {
+ return nil, fmt.Errorf("failed to parse query string: %w", err)
+ }
+
+ e := &Encoder{
+ path: []byte(path),
+ rawPath: []byte(path),
+ query: parseQuery,
+ header: headers.Clone(),
+ }
+
+ return e, nil
+}
+
+// Encode returns a REST protocol encoder for encoding HTTP bindings.
+//
+// Due net/http requiring `Content-Length` to be specified on the http.Request#ContentLength directly. Encode
+// will look for whether the header is present, and if so will remove it and set the respective value on http.Request.
+//
+// Returns any error if one occurred during encoding.
+func (e *Encoder) Encode(req *http.Request) (*http.Request, error) {
+ req.URL.Path, req.URL.RawPath = string(e.path), string(e.rawPath)
+ req.URL.RawQuery = e.query.Encode()
+
+ // net/http ignores Content-Length header and requires it to be set on http.Request
+ if v := e.header.Get(contentLengthHeader); len(v) > 0 {
+ iv, err := strconv.ParseInt(v, 10, 64)
+ if err != nil {
+ return nil, err
+ }
+ req.ContentLength = iv
+ e.header.Del(contentLengthHeader)
+ }
+
+ req.Header = e.header
+
+ return req, nil
+}
+
+// AddHeader returns a HeaderValue for appending to the given header name
+func (e *Encoder) AddHeader(key string) HeaderValue {
+ return newHeaderValue(e.header, key, true)
+}
+
+// SetHeader returns a HeaderValue for setting the given header name
+func (e *Encoder) SetHeader(key string) HeaderValue {
+ return newHeaderValue(e.header, key, false)
+}
+
+// Headers returns a Header used encoding headers with the given prefix
+func (e *Encoder) Headers(prefix string) Headers {
+ return Headers{
+ header: e.header,
+ prefix: strings.TrimSpace(prefix),
+ }
+}
+
+// HasHeader returns if a header with the key specified exists with one more
+// more value.
+func (e Encoder) HasHeader(key string) bool {
+ return len(e.header[key]) != 0
+}
+
+// SetURI returns a URIValue used for setting the given path key
+func (e *Encoder) SetURI(key string) URIValue {
+ return newURIValue(&e.path, &e.rawPath, &e.pathBuffer, key)
+}
+
+// SetQuery returns a QueryValue used for setting the given query key
+func (e *Encoder) SetQuery(key string) QueryValue {
+ return NewQueryValue(e.query, key, false)
+}
+
+// AddQuery returns a QueryValue used for appending the given query key
+func (e *Encoder) AddQuery(key string) QueryValue {
+ return NewQueryValue(e.query, key, true)
+}
diff --git a/vendor/github.com/aws/smithy-go/encoding/httpbinding/header.go b/vendor/github.com/aws/smithy-go/encoding/httpbinding/header.go
new file mode 100644
index 000000000..618ef6585
--- /dev/null
+++ b/vendor/github.com/aws/smithy-go/encoding/httpbinding/header.go
@@ -0,0 +1,112 @@
+package httpbinding
+
+import (
+ "encoding/base64"
+ "math/big"
+ "net/http"
+ "strconv"
+ "strings"
+)
+
+// Headers is used to encode header keys using a provided prefix
+type Headers struct {
+ header http.Header
+ prefix string
+}
+
+// AddHeader returns a HeaderValue used to append values to prefix+key
+func (h Headers) AddHeader(key string) HeaderValue {
+ return h.newHeaderValue(key, true)
+}
+
+// SetHeader returns a HeaderValue used to set the value of prefix+key
+func (h Headers) SetHeader(key string) HeaderValue {
+ return h.newHeaderValue(key, false)
+}
+
+func (h Headers) newHeaderValue(key string, append bool) HeaderValue {
+ return newHeaderValue(h.header, h.prefix+strings.TrimSpace(key), append)
+}
+
+// HeaderValue is used to encode values to an HTTP header
+type HeaderValue struct {
+ header http.Header
+ key string
+ append bool
+}
+
+func newHeaderValue(header http.Header, key string, append bool) HeaderValue {
+ return HeaderValue{header: header, key: strings.TrimSpace(key), append: append}
+}
+
+func (h HeaderValue) modifyHeader(value string) {
+ if h.append {
+ h.header[h.key] = append(h.header[h.key], value)
+ } else {
+ h.header[h.key] = append(h.header[h.key][:0], value)
+ }
+}
+
+// String encodes the value v as the header string value
+func (h HeaderValue) String(v string) {
+ h.modifyHeader(v)
+}
+
+// Byte encodes the value v as a query string value
+func (h HeaderValue) Byte(v int8) {
+ h.Long(int64(v))
+}
+
+// Short encodes the value v as a query string value
+func (h HeaderValue) Short(v int16) {
+ h.Long(int64(v))
+}
+
+// Integer encodes the value v as the header string value
+func (h HeaderValue) Integer(v int32) {
+ h.Long(int64(v))
+}
+
+// Long encodes the value v as the header string value
+func (h HeaderValue) Long(v int64) {
+ h.modifyHeader(strconv.FormatInt(v, 10))
+}
+
+// Boolean encodes the value v as a query string value
+func (h HeaderValue) Boolean(v bool) {
+ h.modifyHeader(strconv.FormatBool(v))
+}
+
+// Float encodes the value v as a query string value
+func (h HeaderValue) Float(v float32) {
+ h.float(float64(v), 32)
+}
+
+// Double encodes the value v as a query string value
+func (h HeaderValue) Double(v float64) {
+ h.float(v, 64)
+}
+
+func (h HeaderValue) float(v float64, bitSize int) {
+ h.modifyHeader(strconv.FormatFloat(v, 'f', -1, bitSize))
+}
+
+// BigInteger encodes the value v as a query string value
+func (h HeaderValue) BigInteger(v *big.Int) {
+ h.modifyHeader(v.String())
+}
+
+// BigDecimal encodes the value v as a query string value
+func (h HeaderValue) BigDecimal(v *big.Float) {
+ if i, accuracy := v.Int64(); accuracy == big.Exact {
+ h.Long(i)
+ return
+ }
+ h.modifyHeader(v.Text('e', -1))
+}
+
+// Blob encodes the value v as a base64 header string value
+func (h HeaderValue) Blob(v []byte) {
+ encodeToString := base64.StdEncoding.EncodeToString(v)
+ h.modifyHeader(encodeToString)
+}
diff --git a/vendor/github.com/aws/smithy-go/encoding/httpbinding/path_replace.go b/vendor/github.com/aws/smithy-go/encoding/httpbinding/path_replace.go
new file mode 100644
index 000000000..e78926c9a
--- /dev/null
+++ b/vendor/github.com/aws/smithy-go/encoding/httpbinding/path_replace.go
@@ -0,0 +1,108 @@
+package httpbinding
+
+import (
+ "bytes"
+ "fmt"
+)
+
+const (
+ uriTokenStart = '{'
+ uriTokenStop = '}'
+ uriTokenSkip = '+'
+)
+
+func bufCap(b []byte, n int) []byte {
+ if cap(b) < n {
+ return make([]byte, 0, n)
+ }
+
+ return b[0:0]
+}
+
+// replacePathElement replaces a single element in the path []byte.
+// Escape is used to control whether the value will be escaped using Amazon path escape style.
+func replacePathElement(path, fieldBuf []byte, key, val string, escape bool) ([]byte, []byte, error) {
+ fieldBuf = bufCap(fieldBuf, len(key)+3) // { [+] }
+ fieldBuf = append(fieldBuf, uriTokenStart)
+ fieldBuf = append(fieldBuf, key...)
+
+ start := bytes.Index(path, fieldBuf)
+ end := start + len(fieldBuf)
+ if start < 0 || len(path[end:]) == 0 {
+ // TODO what to do about error?
+ return path, fieldBuf, fmt.Errorf("invalid path index, start=%d,end=%d. %s", start, end, path)
+ }
+
+ encodeSep := true
+ if path[end] == uriTokenSkip {
+ // '+' token means do not escape slashes
+ encodeSep = false
+ end++
+ }
+
+ if escape {
+ val = EscapePath(val, encodeSep)
+ }
+
+ if path[end] != uriTokenStop {
+ return path, fieldBuf, fmt.Errorf("invalid path element, does not contain token stop, %s", path)
+ }
+ end++
+
+ fieldBuf = bufCap(fieldBuf, len(val))
+ fieldBuf = append(fieldBuf, val...)
+
+ keyLen := end - start
+ valLen := len(fieldBuf)
+
+ if keyLen == valLen {
+ copy(path[start:], fieldBuf)
+ return path, fieldBuf, nil
+ }
+
+ newLen := len(path) + (valLen - keyLen)
+ if len(path) < newLen {
+ path = path[:cap(path)]
+ }
+ if cap(path) < newLen {
+ newURI := make([]byte, newLen)
+ copy(newURI, path)
+ path = newURI
+ }
+
+ // shift
+ copy(path[start+valLen:], path[end:])
+ path = path[:newLen]
+ copy(path[start:], fieldBuf)
+
+ return path, fieldBuf, nil
+}
+
+// EscapePath escapes part of a URL path in Amazon style.
+func EscapePath(path string, encodeSep bool) string {
+ var buf bytes.Buffer
+ for i := 0; i < len(path); i++ {
+ c := path[i]
+ if noEscape[c] || (c == '/' && !encodeSep) {
+ buf.WriteByte(c)
+ } else {
+ fmt.Fprintf(&buf, "%%%02X", c)
+ }
+ }
+ return buf.String()
+}
+
+var noEscape [256]bool
+
+func init() {
+ for i := 0; i < len(noEscape); i++ {
+ // AWS expects every character except these to be escaped
+ noEscape[i] = (i >= 'A' && i <= 'Z') ||
+ (i >= 'a' && i <= 'z') ||
+ (i >= '0' && i <= '9') ||
+ i == '-' ||
+ i == '.' ||
+ i == '_' ||
+ i == '~'
+ }
+}
diff --git a/vendor/github.com/aws/smithy-go/encoding/httpbinding/query.go b/vendor/github.com/aws/smithy-go/encoding/httpbinding/query.go
new file mode 100644
index 000000000..ee7791f51
--- /dev/null
+++ b/vendor/github.com/aws/smithy-go/encoding/httpbinding/query.go
@@ -0,0 +1,97 @@
+package httpbinding
+
+import (
+ "encoding/base64"
+ "math/big"
+ "net/url"
+ "strconv"
+)
+
+// QueryValue is used to encode query key values
+type QueryValue struct {
+ query url.Values
+ key string
+ append bool
+}
+
+// NewQueryValue creates a new QueryValue which enables encoding
+// a query value into the given url.Values.
+func NewQueryValue(query url.Values, key string, append bool) QueryValue {
+ return QueryValue{
+ query: query,
+ key: key,
+ append: append,
+ }
+}
+
+func (qv QueryValue) updateKey(value string) {
+ if qv.append {
+ qv.query.Add(qv.key, value)
+ } else {
+ qv.query.Set(qv.key, value)
+ }
+}
+
+// Blob encodes v as a base64 query string value
+func (qv QueryValue) Blob(v []byte) {
+ encodeToString := base64.StdEncoding.EncodeToString(v)
+ qv.updateKey(encodeToString)
+}
+
+// Boolean encodes v as a query string value
+func (qv QueryValue) Boolean(v bool) {
+ qv.updateKey(strconv.FormatBool(v))
+}
+
+// String encodes v as a query string value
+func (qv QueryValue) String(v string) {
+ qv.updateKey(v)
+}
+
+// Byte encodes v as a query string value
+func (qv QueryValue) Byte(v int8) {
+ qv.Long(int64(v))
+}
+
+// Short encodes v as a query string value
+func (qv QueryValue) Short(v int16) {
+ qv.Long(int64(v))
+}
+
+// Integer encodes v as a query string value
+func (qv QueryValue) Integer(v int32) {
+ qv.Long(int64(v))
+}
+
+// Long encodes v as a query string value
+func (qv QueryValue) Long(v int64) {
+ qv.updateKey(strconv.FormatInt(v, 10))
+}
+
+// Float encodes v as a query string value
+func (qv QueryValue) Float(v float32) {
+ qv.float(float64(v), 32)
+}
+
+// Double encodes v as a query string value
+func (qv QueryValue) Double(v float64) {
+ qv.float(v, 64)
+}
+
+func (qv QueryValue) float(v float64, bitSize int) {
+ qv.updateKey(strconv.FormatFloat(v, 'f', -1, bitSize))
+}
+
+// BigInteger encodes v as a query string value
+func (qv QueryValue) BigInteger(v *big.Int) {
+ qv.updateKey(v.String())
+}
+
+// BigDecimal encodes v as a query string value
+func (qv QueryValue) BigDecimal(v *big.Float) {
+ if i, accuracy := v.Int64(); accuracy == big.Exact {
+ qv.Long(i)
+ return
+ }
+ qv.updateKey(v.Text('e', -1))
+}
diff --git a/vendor/github.com/aws/smithy-go/encoding/httpbinding/uri.go b/vendor/github.com/aws/smithy-go/encoding/httpbinding/uri.go
new file mode 100644
index 000000000..89cfae6d6
--- /dev/null
+++ b/vendor/github.com/aws/smithy-go/encoding/httpbinding/uri.go
@@ -0,0 +1,98 @@
+package httpbinding
+
+import (
+ "math/big"
+ "strconv"
+ "strings"
+)
+
+// URIValue is used to encode named URI parameters
+type URIValue struct {
+ path, rawPath, buffer *[]byte
+
+ key string
+}
+
+func newURIValue(path *[]byte, rawPath *[]byte, buffer *[]byte, key string) URIValue {
+ return URIValue{path: path, rawPath: rawPath, buffer: buffer, key: key}
+}
+
+func (u URIValue) modifyURI(value string) (err error) {
+ *u.path, *u.buffer, err = replacePathElement(*u.path, *u.buffer, u.key, value, false)
+ *u.rawPath, *u.buffer, err = replacePathElement(*u.rawPath, *u.buffer, u.key, value, true)
+ return err
+}
+
+// Boolean encodes v as a URI string value
+func (u URIValue) Boolean(v bool) error {
+ return u.modifyURI(strconv.FormatBool(v))
+}
+
+// String encodes v as a URI string value
+func (u URIValue) String(v string) error {
+ return u.modifyURI(v)
+}
+
+// Byte encodes v as a URI string value
+func (u URIValue) Byte(v int8) error {
+ return u.Long(int64(v))
+}
+
+// Short encodes v as a URI string value
+func (u URIValue) Short(v int16) error {
+ return u.Long(int64(v))
+}
+
+// Integer encodes v as a URI string value
+func (u URIValue) Integer(v int32) error {
+ return u.Long(int64(v))
+}
+
+// Long encodes v as a URI string value
+func (u URIValue) Long(v int64) error {
+ return u.modifyURI(strconv.FormatInt(v, 10))
+}
+
+// Float encodes v as a query string value
+func (u URIValue) Float(v float32) error {
+ return u.float(float64(v), 32)
+}
+
+// Double encodes v as a query string value
+func (u URIValue) Double(v float64) error {
+ return u.float(v, 64)
+}
+
+func (u URIValue) float(v float64, bitSize int) error {
+ return u.modifyURI(strconv.FormatFloat(v, 'f', -1, bitSize))
+}
+
+// BigInteger encodes v as a query string value
+func (u URIValue) BigInteger(v *big.Int) error {
+ return u.modifyURI(v.String())
+}
+
+// BigDecimal encodes v as a query string value
+func (u URIValue) BigDecimal(v *big.Float) error {
+ if i, accuracy := v.Int64(); accuracy == big.Exact {
+ return u.Long(i)
+ }
+ return u.modifyURI(v.Text('e', -1))
+}
+
+// SplitURI parses a Smithy HTTP binding trait URI
+func SplitURI(uri string) (path, query string) {
+ queryStart := strings.IndexRune(uri, '?')
+ if queryStart == -1 {
+ path = uri
+ return path, query
+ }
+
+ path = uri[:queryStart]
+ if queryStart+1 >= len(uri) {
+ return path, query
+ }
+ query = uri[queryStart+1:]
+
+ return path, query
+}
diff --git a/vendor/github.com/aws/smithy-go/encoding/xml/array.go b/vendor/github.com/aws/smithy-go/encoding/xml/array.go
new file mode 100644
index 000000000..508f3c997
--- /dev/null
+++ b/vendor/github.com/aws/smithy-go/encoding/xml/array.go
@@ -0,0 +1,49 @@
+package xml
+
+// arrayMemberWrapper is the default member wrapper tag name for XML Array type
+var arrayMemberWrapper = StartElement{
+ Name: Name{Local: "member"},
+}
+
+// Array represents the encoding of a XML array type
+type Array struct {
+ w writer
+ scratch *[]byte
+
+ // member start element is the array member wrapper start element
+ memberStartElement StartElement
+
+ // isFlattened indicates if the array is a flattened array.
+ isFlattened bool
+}
+
+// newArray returns an array encoder.
+// It also takes in the member start element, array start element.
+// It takes in a isFlattened bool, indicating that an array is flattened array.
+//
+// A wrapped array ["value1", "value2"] is represented as
+// `value1value2
`.
+
+// A flattened array `someList: ["value1", "value2"]` is represented as
+// `value1value2`.
+func newArray(w writer, scratch *[]byte, memberStartElement StartElement, arrayStartElement StartElement, isFlattened bool) *Array {
+ var memberWrapper = memberStartElement
+ if isFlattened {
+ memberWrapper = arrayStartElement
+ }
+
+ return &Array{
+ w: w,
+ scratch: scratch,
+ memberStartElement: memberWrapper,
+ isFlattened: isFlattened,
+ }
+}
+
+// Member adds a new member to the XML array.
+// It returns a Value encoder.
+func (a *Array) Member() Value {
+ v := newValue(a.w, a.scratch, a.memberStartElement)
+ v.isFlattened = a.isFlattened
+ return v
+}
diff --git a/vendor/github.com/aws/smithy-go/encoding/xml/constants.go b/vendor/github.com/aws/smithy-go/encoding/xml/constants.go
new file mode 100644
index 000000000..ccee90a63
--- /dev/null
+++ b/vendor/github.com/aws/smithy-go/encoding/xml/constants.go
@@ -0,0 +1,10 @@
+package xml
+
+const (
+ leftAngleBracket = '<'
+ rightAngleBracket = '>'
+ forwardSlash = '/'
+ colon = ':'
+ equals = '='
+ quote = '"'
+)
diff --git a/vendor/github.com/aws/smithy-go/encoding/xml/doc.go b/vendor/github.com/aws/smithy-go/encoding/xml/doc.go
new file mode 100644
index 000000000..d6e1e41e1
--- /dev/null
+++ b/vendor/github.com/aws/smithy-go/encoding/xml/doc.go
@@ -0,0 +1,49 @@
+/*
+Package xml holds the XMl encoder utility. This utility is written in accordance to our design to delegate to
+shape serializer function in which a xml.Value will be passed around.
+
+Resources followed: https://awslabs.github.io/smithy/1.0/spec/core/xml-traits.html#
+
+Member Element
+
+Member element should be used to encode xml shapes into xml elements except for flattened xml shapes. Member element
+write their own element start tag. These elements should always be closed.
+
+Flattened Element
+
+Flattened element should be used to encode shapes marked with flattened trait into xml elements. Flattened element
+do not write a start tag, and thus should not be closed.
+
+Simple types encoding
+
+All simple type methods on value such as String(), Long() etc; auto close the associated member element.
+
+Array
+
+Array returns the collection encoder. It has two modes, wrapped and flattened encoding.
+
+Wrapped arrays have two methods Array() and ArrayWithCustomName() which facilitate array member wrapping.
+By default, a wrapped array members are wrapped with `member` named start element.
+
+ appletree
+
+Flattened arrays rely on Value being marked as flattened.
+If a shape is marked as flattened, Array() will use the shape element name as wrapper for array elements.
+
+ appletree
+
+Map
+
+Map is the map encoder. It has two modes, wrapped and flattened encoding.
+
+Wrapped map has Array() method, which facilitate map member wrapping.
+By default, a wrapped map members are wrapped with `entry` named start element.
+
+ appletreesnowice
+
+Flattened map rely on Value being marked as flattened.
+If a shape is marked as flattened, Map() will use the shape element name as wrapper for map entry elements.
+
+ appletreesnowice
+*/
+package xml
diff --git a/vendor/github.com/aws/smithy-go/encoding/xml/element.go b/vendor/github.com/aws/smithy-go/encoding/xml/element.go
new file mode 100644
index 000000000..ae84e7999
--- /dev/null
+++ b/vendor/github.com/aws/smithy-go/encoding/xml/element.go
@@ -0,0 +1,91 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Copied and modified from Go 1.14 stdlib's encoding/xml
+
+package xml
+
+// A Name represents an XML name (Local) annotated
+// with a name space identifier (Space).
+// In tokens returned by Decoder.Token, the Space identifier
+// is given as a canonical URL, not the short prefix used
+// in the document being parsed.
+type Name struct {
+ Space, Local string
+}
+
+// An Attr represents an attribute in an XML element (Name=Value).
+type Attr struct {
+ Name Name
+ Value string
+}
+
+/*
+NewAttribute returns a pointer to an attribute.
+It takes in a local name aka attribute name, and value
+representing the attribute value.
+*/
+func NewAttribute(local, value string) Attr {
+ return Attr{
+ Name: Name{
+ Local: local,
+ },
+ Value: value,
+ }
+}
+
+/*
+NewNamespaceAttribute returns a pointer to an attribute.
+It takes in a local name aka attribute name, and value
+representing the attribute value.
+
+NewNamespaceAttribute appends `xmlns:` in front of namespace
+prefix.
+
+For creating a name space attribute representing
+`xmlns:prefix="http://example.com`, the breakdown would be:
+local = "prefix"
+value = "http://example.com"
+*/
+func NewNamespaceAttribute(local, value string) Attr {
+ attr := NewAttribute(local, value)
+
+ // default name space identifier
+ attr.Name.Space = "xmlns"
+ return attr
+}
+
+// A StartElement represents an XML start element.
+type StartElement struct {
+ Name Name
+ Attr []Attr
+}
+
+// Copy creates a new copy of StartElement.
+func (e StartElement) Copy() StartElement {
+ attrs := make([]Attr, len(e.Attr))
+ copy(attrs, e.Attr)
+ e.Attr = attrs
+ return e
+}
+
+// End returns the corresponding XML end element.
+func (e StartElement) End() EndElement {
+ return EndElement{e.Name}
+}
+
+// returns true if start element local name is empty
+func (e StartElement) isZero() bool {
+ return len(e.Name.Local) == 0
+}
+
+// An EndElement represents an XML end element.
+type EndElement struct {
+ Name Name
+}
+
+// returns true if end element local name is empty
+func (e EndElement) isZero() bool {
+ return len(e.Name.Local) == 0
+}
diff --git a/vendor/github.com/aws/smithy-go/encoding/xml/encoder.go b/vendor/github.com/aws/smithy-go/encoding/xml/encoder.go
new file mode 100644
index 000000000..16fb3dddb
--- /dev/null
+++ b/vendor/github.com/aws/smithy-go/encoding/xml/encoder.go
@@ -0,0 +1,51 @@
+package xml
+
+// writer interface used by the xml encoder to write an encoded xml
+// document in a writer.
+type writer interface {
+
+ // Write takes in a byte slice and returns number of bytes written and error
+ Write(p []byte) (n int, err error)
+
+ // WriteRune takes in a rune and returns number of bytes written and error
+ WriteRune(r rune) (n int, err error)
+
+ // WriteString takes in a string and returns number of bytes written and error
+ WriteString(s string) (n int, err error)
+
+ // String method returns a string
+ String() string
+
+ // Bytes return a byte slice.
+ Bytes() []byte
+}
+
+// Encoder is an XML encoder that supports construction of XML values
+// using methods. The encoder takes in a writer and maintains a scratch buffer.
+type Encoder struct {
+ w writer
+ scratch *[]byte
+}
+
+// NewEncoder returns an XML encoder
+func NewEncoder(w writer) *Encoder {
+ scratch := make([]byte, 64)
+
+ return &Encoder{w: w, scratch: &scratch}
+}
+
+// String returns the string output of the XML encoder
+func (e Encoder) String() string {
+ return e.w.String()
+}
+
+// Bytes returns the []byte slice of the XML encoder
+func (e Encoder) Bytes() []byte {
+ return e.w.Bytes()
+}
+
+// RootElement builds a root element encoding
+// It writes it's start element tag. The value should be closed.
+func (e Encoder) RootElement(element StartElement) Value {
+ return newValue(e.w, e.scratch, element)
+}
diff --git a/vendor/github.com/aws/smithy-go/encoding/xml/error_utils.go b/vendor/github.com/aws/smithy-go/encoding/xml/error_utils.go
new file mode 100644
index 000000000..f3db6ccca
--- /dev/null
+++ b/vendor/github.com/aws/smithy-go/encoding/xml/error_utils.go
@@ -0,0 +1,51 @@
+package xml
+
+import (
+ "encoding/xml"
+ "fmt"
+ "io"
+)
+
+// ErrorComponents represents the error response fields
+// that will be deserialized from an xml error response body
+type ErrorComponents struct {
+ Code string
+ Message string
+}
+
+// GetErrorResponseComponents returns the error fields from an xml error response body
+func GetErrorResponseComponents(r io.Reader, noErrorWrapping bool) (ErrorComponents, error) {
+ if noErrorWrapping {
+ var errResponse noWrappedErrorResponse
+ if err := xml.NewDecoder(r).Decode(&errResponse); err != nil && err != io.EOF {
+ return ErrorComponents{}, fmt.Errorf("error while deserializing xml error response: %w", err)
+ }
+ return ErrorComponents{
+ Code: errResponse.Code,
+ Message: errResponse.Message,
+ }, nil
+ }
+
+ var errResponse wrappedErrorResponse
+ if err := xml.NewDecoder(r).Decode(&errResponse); err != nil && err != io.EOF {
+ return ErrorComponents{}, fmt.Errorf("error while deserializing xml error response: %w", err)
+ }
+ return ErrorComponents{
+ Code: errResponse.Code,
+ Message: errResponse.Message,
+ }, nil
+}
+
+// noWrappedErrorResponse represents the error response body with
+// no internal ...
+type wrappedErrorResponse struct {
+ Code string `xml:"Error>Code"`
+ Message string `xml:"Error>Message"`
+}
diff --git a/vendor/github.com/aws/smithy-go/encoding/xml/escape.go b/vendor/github.com/aws/smithy-go/encoding/xml/escape.go
new file mode 100644
index 000000000..be223e09a
--- /dev/null
+++ b/vendor/github.com/aws/smithy-go/encoding/xml/escape.go
@@ -0,0 +1,121 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Copied and modified from Go 1.14 stdlib's encoding/xml
+
+package xml
+
+import (
+ "unicode/utf8"
+)
+
+// Copied from Go 1.14 stdlib's encoding/xml
+var (
+ escQuot = []byte(""") // shorter than """
+ escApos = []byte("'") // shorter than "'"
+ escAmp = []byte("&")
+ escLT = []byte("<")
+ escGT = []byte(">")
+ escTab = []byte(" ")
+ escNL = []byte("
")
+ escCR = []byte("
")
+ escFFFD = []byte("\uFFFD") // Unicode replacement character
+)
+
+// Decide whether the given rune is in the XML Character Range, per
+// the Char production of https://www.xml.com/axml/testaxml.htm,
+// Section 2.2 Characters.
+func isInCharacterRange(r rune) (inrange bool) {
+ return r == 0x09 ||
+ r == 0x0A ||
+ r == 0x0D ||
+ r >= 0x20 && r <= 0xD7FF ||
+ r >= 0xE000 && r <= 0xFFFD ||
+ r >= 0x10000 && r <= 0x10FFFF
+}
+
+// TODO: When do we need to escape the string?
+// Based on encoding/xml escapeString from the Go Standard Library.
+// https://golang.org/src/encoding/xml/xml.go
+func escapeString(e writer, s string) {
+ var esc []byte
+ last := 0
+ for i := 0; i < len(s); {
+ r, width := utf8.DecodeRuneInString(s[i:])
+ i += width
+ switch r {
+ case '"':
+ esc = escQuot
+ case '\'':
+ esc = escApos
+ case '&':
+ esc = escAmp
+ case '<':
+ esc = escLT
+ case '>':
+ esc = escGT
+ case '\t':
+ esc = escTab
+ case '\n':
+ esc = escNL
+ case '\r':
+ esc = escCR
+ default:
+ if !isInCharacterRange(r) || (r == 0xFFFD && width == 1) {
+ esc = escFFFD
+ break
+ }
+ continue
+ }
+ e.WriteString(s[last : i-width])
+ e.Write(esc)
+ last = i
+ }
+ e.WriteString(s[last:])
+}
+
+// escapeText writes to w the properly escaped XML equivalent
+// of the plain text data s. If escapeNewline is true, newline
+// characters will be escaped.
+//
+// Based on encoding/xml escapeText from the Go Standard Library.
+// https://golang.org/src/encoding/xml/xml.go
+func escapeText(e writer, s []byte) {
+ var esc []byte
+ last := 0
+ for i := 0; i < len(s); {
+ r, width := utf8.DecodeRune(s[i:])
+ i += width
+ switch r {
+ case '"':
+ esc = escQuot
+ case '\'':
+ esc = escApos
+ case '&':
+ esc = escAmp
+ case '<':
+ esc = escLT
+ case '>':
+ esc = escGT
+ case '\t':
+ esc = escTab
+ case '\n':
+ // TODO: This always escapes newline, which is different than stdlib's optional
+ // escape of new line
+ esc = escNL
+ case '\r':
+ esc = escCR
+ default:
+ if !isInCharacterRange(r) || (r == 0xFFFD && width == 1) {
+ esc = escFFFD
+ break
+ }
+ continue
+ }
+ e.Write(s[last : i-width])
+ e.Write(esc)
+ last = i
+ }
+ e.Write(s[last:])
+}
diff --git a/vendor/github.com/aws/smithy-go/encoding/xml/map.go b/vendor/github.com/aws/smithy-go/encoding/xml/map.go
new file mode 100644
index 000000000..e42858965
--- /dev/null
+++ b/vendor/github.com/aws/smithy-go/encoding/xml/map.go
@@ -0,0 +1,53 @@
+package xml
+
+// mapEntryWrapper is the default member wrapper start element for XML Map entry
+var mapEntryWrapper = StartElement{
+ Name: Name{Local: "entry"},
+}
+
+// Map represents the encoding of a XML map type
+type Map struct {
+ w writer
+ scratch *[]byte
+
+ // member start element is the map entry wrapper start element
+ memberStartElement StartElement
+
+ // isFlattened returns true if the map is a flattened map
+ isFlattened bool
+}
+
+// newMap returns a map encoder which sets the default map
+// entry wrapper to `entry`.
+//
+// A map `someMap : {{key:"abc", value:"123"}}` is represented as
+// `abc123`.
+func newMap(w writer, scratch *[]byte) *Map {
+ return &Map{
+ w: w,
+ scratch: scratch,
+ memberStartElement: mapEntryWrapper,
+ }
+}
+
+// newFlattenedMap returns a map encoder which sets the map
+// entry wrapper to the passed in memberWrapper`.
+//
+// A flattened map `someMap : {{key:"abc", value:"123"}}` is represented as
+// `abc123`.
+func newFlattenedMap(w writer, scratch *[]byte, memberWrapper StartElement) *Map {
+ return &Map{
+ w: w,
+ scratch: scratch,
+ memberStartElement: memberWrapper,
+ isFlattened: true,
+ }
+}
+
+// Entry returns a Value encoder with map's element.
+// It writes the member wrapper start tag for each entry.
+func (m *Map) Entry() Value {
+ v := newValue(m.w, m.scratch, m.memberStartElement)
+ v.isFlattened = m.isFlattened
+ return v
+}
diff --git a/vendor/github.com/aws/smithy-go/encoding/xml/value.go b/vendor/github.com/aws/smithy-go/encoding/xml/value.go
new file mode 100644
index 000000000..09434b2c0
--- /dev/null
+++ b/vendor/github.com/aws/smithy-go/encoding/xml/value.go
@@ -0,0 +1,302 @@
+package xml
+
+import (
+ "encoding/base64"
+ "fmt"
+ "math/big"
+ "strconv"
+
+ "github.com/aws/smithy-go/encoding"
+)
+
+// Value represents an XML Value type
+// XML Value types: Object, Array, Map, String, Number, Boolean.
+type Value struct {
+ w writer
+ scratch *[]byte
+
+ // xml start element is the associated start element for the Value
+ startElement StartElement
+
+ // indicates if the Value represents a flattened shape
+ isFlattened bool
+}
+
+// newFlattenedValue returns a Value encoder. newFlattenedValue does NOT write the start element tag
+func newFlattenedValue(w writer, scratch *[]byte, startElement StartElement) Value {
+ return Value{
+ w: w,
+ scratch: scratch,
+ startElement: startElement,
+ }
+}
+
+// newValue writes the start element xml tag and returns a Value
+func newValue(w writer, scratch *[]byte, startElement StartElement) Value {
+ writeStartElement(w, startElement)
+ return Value{w: w, scratch: scratch, startElement: startElement}
+}
+
+// writeStartElement takes in a start element and writes it.
+// It handles namespace, attributes in start element.
+func writeStartElement(w writer, el StartElement) error {
+ if el.isZero() {
+ return fmt.Errorf("xml start element cannot be nil")
+ }
+
+ w.WriteRune(leftAngleBracket)
+
+ if len(el.Name.Space) != 0 {
+ escapeString(w, el.Name.Space)
+ w.WriteRune(colon)
+ }
+ escapeString(w, el.Name.Local)
+ for _, attr := range el.Attr {
+ w.WriteRune(' ')
+ writeAttribute(w, &attr)
+ }
+
+ w.WriteRune(rightAngleBracket)
+ return nil
+}
+
+// writeAttribute writes an attribute from a provided Attribute
+// For a namespace attribute, the attr.Name.Space must be defined as "xmlns".
+// https://www.w3.org/TR/REC-xml-names/#NT-DefaultAttName
+func writeAttribute(w writer, attr *Attr) {
+ // if local, space both are not empty
+ if len(attr.Name.Space) != 0 && len(attr.Name.Local) != 0 {
+ escapeString(w, attr.Name.Space)
+ w.WriteRune(colon)
+ }
+
+ // if prefix is empty, the default `xmlns` space should be used as prefix.
+ if len(attr.Name.Local) == 0 {
+ attr.Name.Local = attr.Name.Space
+ }
+
+ escapeString(w, attr.Name.Local)
+ w.WriteRune(equals)
+ w.WriteRune(quote)
+ escapeString(w, attr.Value)
+ w.WriteRune(quote)
+}
+
+// writeEndElement takes in a end element and writes it.
+func writeEndElement(w writer, el EndElement) error {
+ if el.isZero() {
+ return fmt.Errorf("xml end element cannot be nil")
+ }
+
+ w.WriteRune(leftAngleBracket)
+ w.WriteRune(forwardSlash)
+
+ if len(el.Name.Space) != 0 {
+ escapeString(w, el.Name.Space)
+ w.WriteRune(colon)
+ }
+ escapeString(w, el.Name.Local)
+ w.WriteRune(rightAngleBracket)
+
+ return nil
+}
+
+// String encodes v as a XML string.
+// It will auto close the parent xml element tag.
+func (xv Value) String(v string) {
+ escapeString(xv.w, v)
+ xv.Close()
+}
+
+// Byte encodes v as a XML number.
+// It will auto close the parent xml element tag.
+func (xv Value) Byte(v int8) {
+ xv.Long(int64(v))
+}
+
+// Short encodes v as a XML number.
+// It will auto close the parent xml element tag.
+func (xv Value) Short(v int16) {
+ xv.Long(int64(v))
+}
+
+// Integer encodes v as a XML number.
+// It will auto close the parent xml element tag.
+func (xv Value) Integer(v int32) {
+ xv.Long(int64(v))
+}
+
+// Long encodes v as a XML number.
+// It will auto close the parent xml element tag.
+func (xv Value) Long(v int64) {
+ *xv.scratch = strconv.AppendInt((*xv.scratch)[:0], v, 10)
+ xv.w.Write(*xv.scratch)
+
+ xv.Close()
+}
+
+// Float encodes v as a XML number.
+// It will auto close the parent xml element tag.
+func (xv Value) Float(v float32) {
+ xv.float(float64(v), 32)
+ xv.Close()
+}
+
+// Double encodes v as a XML number.
+// It will auto close the parent xml element tag.
+func (xv Value) Double(v float64) {
+ xv.float(v, 64)
+ xv.Close()
+}
+
+func (xv Value) float(v float64, bits int) {
+ *xv.scratch = encoding.EncodeFloat((*xv.scratch)[:0], v, bits)
+ xv.w.Write(*xv.scratch)
+}
+
+// Boolean encodes v as a XML boolean.
+// It will auto close the parent xml element tag.
+func (xv Value) Boolean(v bool) {
+ *xv.scratch = strconv.AppendBool((*xv.scratch)[:0], v)
+ xv.w.Write(*xv.scratch)
+
+ xv.Close()
+}
+
+// Base64EncodeBytes writes v as a base64 value in XML string.
+// It will auto close the parent xml element tag.
+func (xv Value) Base64EncodeBytes(v []byte) {
+ encodeByteSlice(xv.w, (*xv.scratch)[:0], v)
+ xv.Close()
+}
+
+// BigInteger encodes v big.Int as XML value.
+// It will auto close the parent xml element tag.
+func (xv Value) BigInteger(v *big.Int) {
+ xv.w.Write([]byte(v.Text(10)))
+ xv.Close()
+}
+
+// BigDecimal encodes v big.Float as XML value.
+// It will auto close the parent xml element tag.
+func (xv Value) BigDecimal(v *big.Float) {
+ if i, accuracy := v.Int64(); accuracy == big.Exact {
+ xv.Long(i)
+ return
+ }
+
+ xv.w.Write([]byte(v.Text('e', -1)))
+ xv.Close()
+}
+
+// Write writes v directly to the xml document
+// if escapeXMLText is set to true, write will escape text.
+// It will auto close the parent xml element tag.
+func (xv Value) Write(v []byte, escapeXMLText bool) {
+ // escape and write xml text
+ if escapeXMLText {
+ escapeText(xv.w, v)
+ } else {
+ // write xml directly
+ xv.w.Write(v)
+ }
+
+ xv.Close()
+}
+
+// MemberElement does member element encoding. It returns a Value.
+// Member Element method should be used for all shapes except flattened shapes.
+//
+// A call to MemberElement will write nested element tags directly using the
+// provided start element. The value returned by MemberElement should be closed.
+func (xv Value) MemberElement(element StartElement) Value {
+ return newValue(xv.w, xv.scratch, element)
+}
+
+// FlattenedElement returns flattened element encoding. It returns a Value.
+// This method should be used for flattened shapes.
+//
+// Unlike MemberElement, flattened element will NOT write element tags
+// directly for the associated start element.
+//
+// The value returned by the FlattenedElement does not need to be closed.
+func (xv Value) FlattenedElement(element StartElement) Value {
+ v := newFlattenedValue(xv.w, xv.scratch, element)
+ v.isFlattened = true
+ return v
+}
+
+// Array returns an array encoder. By default, the members of array are
+// wrapped with `` element tag.
+// If value is marked as flattened, the start element is used to wrap the members instead of
+// the `` element.
+func (xv Value) Array() *Array {
+ return newArray(xv.w, xv.scratch, arrayMemberWrapper, xv.startElement, xv.isFlattened)
+}
+
+/*
+ArrayWithCustomName returns an array encoder.
+
+It takes named start element as an argument, the named start element will used to wrap xml array entries.
+for eg, `entry1`
+Here `customName` named start element will be wrapped on each array member.
+*/
+func (xv Value) ArrayWithCustomName(element StartElement) *Array {
+ return newArray(xv.w, xv.scratch, element, xv.startElement, xv.isFlattened)
+}
+
+/*
+Map returns a map encoder. By default, the map entries are
+wrapped with `` element tag.
+
+If value is marked as flattened, the start element is used to wrap the entry instead of
+the `` element.
+*/
+func (xv Value) Map() *Map {
+ // flattened map
+ if xv.isFlattened {
+ return newFlattenedMap(xv.w, xv.scratch, xv.startElement)
+ }
+
+ // un-flattened map
+ return newMap(xv.w, xv.scratch)
+}
+
+// encodeByteSlice is modified copy of json encoder's encodeByteSlice.
+// It is used to base64 encode a byte slice.
+func encodeByteSlice(w writer, scratch []byte, v []byte) {
+ if v == nil {
+ return
+ }
+
+ encodedLen := base64.StdEncoding.EncodedLen(len(v))
+ if encodedLen <= len(scratch) {
+ // If the encoded bytes fit in e.scratch, avoid an extra
+ // allocation and use the cheaper Encoding.Encode.
+ dst := scratch[:encodedLen]
+ base64.StdEncoding.Encode(dst, v)
+ w.Write(dst)
+ } else if encodedLen <= 1024 {
+ // The encoded bytes are short enough to allocate for, and
+ // Encoding.Encode is still cheaper.
+ dst := make([]byte, encodedLen)
+ base64.StdEncoding.Encode(dst, v)
+ w.Write(dst)
+ } else {
+ // The encoded bytes are too long to cheaply allocate, and
+ // Encoding.Encode is no longer noticeably cheaper.
+ enc := base64.NewEncoder(base64.StdEncoding, w)
+ enc.Write(v)
+ enc.Close()
+ }
+}
+
+// IsFlattened returns true if value is for flattened shape.
+func (xv Value) IsFlattened() bool {
+ return xv.isFlattened
+}
+
+// Close closes the value.
+func (xv Value) Close() {
+ writeEndElement(xv.w, xv.startElement.End())
+}
diff --git a/vendor/github.com/aws/smithy-go/encoding/xml/xml_decoder.go b/vendor/github.com/aws/smithy-go/encoding/xml/xml_decoder.go
new file mode 100644
index 000000000..dc4eebdff
--- /dev/null
+++ b/vendor/github.com/aws/smithy-go/encoding/xml/xml_decoder.go
@@ -0,0 +1,154 @@
+package xml
+
+import (
+ "encoding/xml"
+ "fmt"
+ "strings"
+)
+
+// NodeDecoder is a XML decoder wrapper that is responsible to decoding
+// a single XML Node element and it's nested member elements. This wrapper decoder
+// takes in the start element of the top level node being decoded.
+type NodeDecoder struct {
+ Decoder *xml.Decoder
+ StartEl xml.StartElement
+}
+
+// WrapNodeDecoder returns an initialized XMLNodeDecoder
+func WrapNodeDecoder(decoder *xml.Decoder, startEl xml.StartElement) NodeDecoder {
+ return NodeDecoder{
+ Decoder: decoder,
+ StartEl: startEl,
+ }
+}
+
+// Token on a Node Decoder returns a xml StartElement. It returns a boolean that indicates the
+// a token is the node decoder's end node token; and an error which indicates any error
+// that occurred while retrieving the start element
+func (d NodeDecoder) Token() (t xml.StartElement, done bool, err error) {
+ for {
+ token, e := d.Decoder.Token()
+ if e != nil {
+ return t, done, e
+ }
+
+ // check if we reach end of the node being decoded
+ if el, ok := token.(xml.EndElement); ok {
+ return t, el == d.StartEl.End(), err
+ }
+
+ if t, ok := token.(xml.StartElement); ok {
+ return restoreAttrNamespaces(t), false, err
+ }
+
+ // skip token if it is a comment or preamble or empty space value due to indentation
+ // or if it's a value and is not expected
+ }
+}
+
+// restoreAttrNamespaces update XML attributes to restore the short namespaces found within
+// the raw XML document.
+func restoreAttrNamespaces(node xml.StartElement) xml.StartElement {
+ if len(node.Attr) == 0 {
+ return node
+ }
+
+ // Generate a mapping of XML namespace values to their short names.
+ ns := map[string]string{}
+ for _, a := range node.Attr {
+ if a.Name.Space == "xmlns" {
+ ns[a.Value] = a.Name.Local
+ break
+ }
+ }
+
+ for i, a := range node.Attr {
+ if a.Name.Space == "xmlns" {
+ continue
+ }
+ // By default, xml.Decoder will fully resolve these namespaces. So if you had
+ // then by default the second attribute would have the `Name.Space` resolved to `baz`. But we need it to
+ // continue to resolve as `bar` so we can easily identify it later on.
+ if v, ok := ns[node.Attr[i].Name.Space]; ok {
+ node.Attr[i].Name.Space = v
+ }
+ }
+ return node
+}
+
+// GetElement looks for the given tag name at the current level, and returns the element if found, and
+// skipping over non-matching elements. Returns an error if the node is not found, or if an error occurs while walking
+// the document.
+func (d NodeDecoder) GetElement(name string) (t xml.StartElement, err error) {
+ for {
+ token, done, err := d.Token()
+ if err != nil {
+ return t, err
+ }
+ if done {
+ return t, fmt.Errorf("%s node not found", name)
+ }
+ switch {
+ case strings.EqualFold(name, token.Name.Local):
+ return token, nil
+ default:
+ err = d.Decoder.Skip()
+ if err != nil {
+ return t, err
+ }
+ }
+ }
+}
+
+// Value provides an abstraction to retrieve char data value within an xml element.
+// The method will return an error if it encounters a nested xml element instead of char data.
+// This method should only be used to retrieve simple type or blob shape values as []byte.
+func (d NodeDecoder) Value() (c []byte, err error) {
+ t, e := d.Decoder.Token()
+ if e != nil {
+ return c, e
+ }
+
+ endElement := d.StartEl.End()
+
+ switch ev := t.(type) {
+ case xml.CharData:
+ c = ev.Copy()
+ case xml.EndElement: // end tag or self-closing
+ if ev == endElement {
+ return []byte{}, err
+ }
+ return c, fmt.Errorf("expected value for %v element, got %T type %v instead", d.StartEl.Name.Local, t, t)
+ default:
+ return c, fmt.Errorf("expected value for %v element, got %T type %v instead", d.StartEl.Name.Local, t, t)
+ }
+
+ t, e = d.Decoder.Token()
+ if e != nil {
+ return c, e
+ }
+
+ if ev, ok := t.(xml.EndElement); ok {
+ if ev == endElement {
+ return c, err
+ }
+ }
+
+ return c, fmt.Errorf("expected end element %v, got %T type %v instead", endElement, t, t)
+}
+
+// FetchRootElement takes in a decoder and returns the first start element within the xml body.
+// This function is useful in fetching the start element of an XML response and ignore the
+// comments and preamble
+func FetchRootElement(decoder *xml.Decoder) (startElement xml.StartElement, err error) {
+ for {
+ t, e := decoder.Token()
+ if e != nil {
+ return startElement, e
+ }
+
+ if startElement, ok := t.(xml.StartElement); ok {
+ return startElement, err
+ }
+ }
+}
diff --git a/vendor/github.com/aws/smithy-go/errors.go b/vendor/github.com/aws/smithy-go/errors.go
new file mode 100644
index 000000000..a397cf6fb
--- /dev/null
+++ b/vendor/github.com/aws/smithy-go/errors.go
@@ -0,0 +1,137 @@
+package smithy
+
+import "fmt"
+
+// APIError provides the generic API and protocol agnostic error type all SDK
+// generated exception types will implement.
+type APIError interface {
+ error
+
+ // ErrorCode returns the error code for the API exception.
+ ErrorCode() string
+ // ErrorMessage returns the error message for the API exception.
+ ErrorMessage() string
+ // ErrorFault returns the fault for the API exception.
+ ErrorFault() ErrorFault
+}
+
+// GenericAPIError provides a generic concrete API error type that SDKs can use
+// to deserialize error responses into. Should be used for unmodeled or untyped
+// errors.
+type GenericAPIError struct {
+ Code string
+ Message string
+ Fault ErrorFault
+}
+
+// ErrorCode returns the error code for the API exception.
+func (e *GenericAPIError) ErrorCode() string { return e.Code }
+
+// ErrorMessage returns the error message for the API exception.
+func (e *GenericAPIError) ErrorMessage() string { return e.Message }
+
+// ErrorFault returns the fault for the API exception.
+func (e *GenericAPIError) ErrorFault() ErrorFault { return e.Fault }
+
+func (e *GenericAPIError) Error() string {
+ return fmt.Sprintf("api error %s: %s", e.Code, e.Message)
+}
+
+var _ APIError = (*GenericAPIError)(nil)
+
+// OperationError decorates an underlying error which occurred while invoking
+// an operation with names of the operation and API.
+type OperationError struct {
+ ServiceID string
+ OperationName string
+ Err error
+}
+
+// Service returns the name of the API service the error occurred with.
+func (e *OperationError) Service() string { return e.ServiceID }
+
+// Operation returns the name of the API operation the error occurred with.
+func (e *OperationError) Operation() string { return e.OperationName }
+
+// Unwrap returns the nested error if any, or nil.
+func (e *OperationError) Unwrap() error { return e.Err }
+
+func (e *OperationError) Error() string {
+ return fmt.Sprintf("operation error %s: %s, %v", e.ServiceID, e.OperationName, e.Err)
+}
+
+// DeserializationError provides a wrapper for and error that occurs during
+// deserialization.
+type DeserializationError struct {
+ Err error // original error
+ Snapshot []byte
+}
+
+// Error returns a formatted error for DeserializationError
+func (e *DeserializationError) Error() string {
+ const msg = "deserialization failed"
+ if e.Err == nil {
+ return msg
+ }
+ return fmt.Sprintf("%s, %v", msg, e.Err)
+}
+
+// Unwrap returns the underlying Error in DeserializationError
+func (e *DeserializationError) Unwrap() error { return e.Err }
+
+// ErrorFault provides the type for a Smithy API error fault.
+type ErrorFault int
+
+// ErrorFault enumeration values
+const (
+ FaultUnknown ErrorFault = iota
+ FaultServer
+ FaultClient
+)
+
+func (f ErrorFault) String() string {
+ switch f {
+ case FaultServer:
+ return "server"
+ case FaultClient:
+ return "client"
+ default:
+ return "unknown"
+ }
+}
+
+// SerializationError represents an error that occurred while attempting to serialize a request
+type SerializationError struct {
+ Err error // original error
+}
+
+// Error returns a formatted error for SerializationError
+func (e *SerializationError) Error() string {
+ const msg = "serialization failed"
+ if e.Err == nil {
+ return msg
+ }
+ return fmt.Sprintf("%s: %v", msg, e.Err)
+}
+
+// Unwrap returns the underlying Error in SerializationError
+func (e *SerializationError) Unwrap() error { return e.Err }
+
+// CanceledError is the error that will be returned by an API request that was
+// canceled. API operations given a Context may return this error when
+// canceled.
+type CanceledError struct {
+ Err error
+}
+
+// CanceledError returns true to satisfy interfaces checking for canceled errors.
+func (*CanceledError) CanceledError() bool { return true }
+
+// Unwrap returns the underlying error, if there was one.
+func (e *CanceledError) Unwrap() error {
+ return e.Err
+}
+
+func (e *CanceledError) Error() string {
+ return fmt.Sprintf("canceled, %v", e.Err)
+}
diff --git a/vendor/github.com/aws/smithy-go/go.mod b/vendor/github.com/aws/smithy-go/go.mod
new file mode 100644
index 000000000..02a91c4a8
--- /dev/null
+++ b/vendor/github.com/aws/smithy-go/go.mod
@@ -0,0 +1,5 @@
+module github.com/aws/smithy-go
+
+go 1.15
+
+require github.com/google/go-cmp v0.5.4
diff --git a/vendor/github.com/aws/smithy-go/go.sum b/vendor/github.com/aws/smithy-go/go.sum
new file mode 100644
index 000000000..1ffcbdbe2
--- /dev/null
+++ b/vendor/github.com/aws/smithy-go/go.sum
@@ -0,0 +1,4 @@
+github.com/google/go-cmp v0.5.4 h1:L8R9j+yAqZuZjsqh/z+F1NCffTKKLShY6zXTItVIZ8M=
+github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4=
+golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
diff --git a/vendor/github.com/aws/smithy-go/io/byte.go b/vendor/github.com/aws/smithy-go/io/byte.go
new file mode 100644
index 000000000..f8417c15b
--- /dev/null
+++ b/vendor/github.com/aws/smithy-go/io/byte.go
@@ -0,0 +1,12 @@
+package io
+
+const (
+ // Byte is 8 bits
+ Byte int64 = 1
+ // KibiByte (KiB) is 1024 Bytes
+ KibiByte = Byte * 1024
+ // MebiByte (MiB) is 1024 KiB
+ MebiByte = KibiByte * 1024
+ // GibiByte (GiB) is 1024 MiB
+ GibiByte = MebiByte * 1024
+)
diff --git a/vendor/github.com/aws/smithy-go/io/doc.go b/vendor/github.com/aws/smithy-go/io/doc.go
new file mode 100644
index 000000000..a6a33eaf5
--- /dev/null
+++ b/vendor/github.com/aws/smithy-go/io/doc.go
@@ -0,0 +1,2 @@
+// Package io provides utilities for Smithy generated API clients.
+package io
diff --git a/vendor/github.com/aws/smithy-go/io/reader.go b/vendor/github.com/aws/smithy-go/io/reader.go
new file mode 100644
index 000000000..07063f296
--- /dev/null
+++ b/vendor/github.com/aws/smithy-go/io/reader.go
@@ -0,0 +1,16 @@
+package io
+
+import (
+ "io"
+)
+
+// ReadSeekNopCloser wraps an io.ReadSeeker with an additional Close method
+// that does nothing.
+type ReadSeekNopCloser struct {
+ io.ReadSeeker
+}
+
+// Close does nothing.
+func (ReadSeekNopCloser) Close() error {
+ return nil
+}
diff --git a/vendor/github.com/aws/smithy-go/io/ringbuffer.go b/vendor/github.com/aws/smithy-go/io/ringbuffer.go
new file mode 100644
index 000000000..7537eb242
--- /dev/null
+++ b/vendor/github.com/aws/smithy-go/io/ringbuffer.go
@@ -0,0 +1,89 @@
+package io
+
+import (
+ "bytes"
+ "io"
+)
+
+// RingBuffer struct satisfies io.ReadWrite interface.
+//
+// ReadBuffer is a revolving buffer data structure, which can be used to store snapshots of data in a
+// revolving window.
+type RingBuffer struct {
+ slice []byte
+ start int
+ end int
+ size int
+}
+
+// NewRingBuffer method takes in a byte slice as an input and returns a RingBuffer.
+func NewRingBuffer(slice []byte) *RingBuffer {
+ ringBuf := RingBuffer{
+ slice: slice,
+ }
+ return &ringBuf
+}
+
+// Write method inserts the elements in a byte slice, and returns the number of bytes written along with an error.
+func (r *RingBuffer) Write(p []byte) (int, error) {
+ for _, b := range p {
+ // check if end points to invalid index, we need to circle back
+ if r.end == len(r.slice) {
+ r.end = 0
+ }
+ // check if start points to invalid index, we need to circle back
+ if r.start == len(r.slice) {
+ r.start = 0
+ }
+ // if ring buffer is filled, increment the start index
+ if r.size == len(r.slice) {
+ r.size--
+ r.start++
+ }
+
+ r.slice[r.end] = b
+ r.end++
+ r.size++
+ }
+ return r.size, nil
+}
+
+// Read copies the data on the ring buffer into the byte slice provided to the method.
+// Returns the read count along with Error encountered while reading
+func (r *RingBuffer) Read(p []byte) (int, error) {
+ // readCount keeps track of the number of bytes read
+ var readCount int
+ for j := 0; j < len(p); j++ {
+ // if ring buffer is empty or completely read
+ // return EOF error.
+ if r.size == 0 {
+ return readCount, io.EOF
+ }
+
+ p[j] = r.slice[r.start]
+ readCount++
+ // increment the start pointer for ring buffer
+ r.start++
+ // decrement the size of ring buffer
+ r.size--
+
+ if r.start == len(r.slice) {
+ r.start = 0
+ }
+ }
+ return readCount, nil
+}
+
+// Bytes returns a copy of the RingBuffer's bytes.
+func (r RingBuffer) Bytes() []byte {
+ var b bytes.Buffer
+ io.Copy(&b, &r)
+ return b.Bytes()
+}
+
+// Reset resets the ring buffer.
+func (r *RingBuffer) Reset() {
+ *r = RingBuffer{
+ slice: r.slice,
+ }
+}
diff --git a/vendor/github.com/aws/smithy-go/logging/logger.go b/vendor/github.com/aws/smithy-go/logging/logger.go
new file mode 100644
index 000000000..70b387621
--- /dev/null
+++ b/vendor/github.com/aws/smithy-go/logging/logger.go
@@ -0,0 +1,82 @@
+package logging
+
+import (
+ "context"
+ "io"
+ "log"
+)
+
+// Classification the log entry's classification name
+type Classification string
+
+// Set of standard classifications that can be used by clients and middleware
+const (
+ Warn Classification = "WARN"
+ Debug Classification = "DEBUG"
+)
+
+// Logger is an interface for logging entries at certain classifications.
+type Logger interface {
+ // Logf is expected to support the standard fmt package "verbs".
+ Logf(classification Classification, format string, v ...interface{})
+}
+
+// LoggerFunc is a wrapper around a function to satisfy the Logger interface.
+type LoggerFunc func(classification Classification, format string, v ...interface{})
+
+// Logf delegates the logging request to the wrapped function.
+func (f LoggerFunc) Logf(classification Classification, format string, v ...interface{}) {
+ f(classification, format, v...)
+}
+
+// ContextLogger is an optional interface a Logger implementation may expose that provides
+// the ability to create context aware log entries.
+type ContextLogger interface {
+ WithContext(context.Context) Logger
+}
+
+// WithContext will pass the provided context to logger if it implements the ContextLogger interface and return the resulting
+// logger. Otherwise the logger will be returned as is. As a special case if a nil logger is provided, a Nop logger will
+// be returned to the caller.
+func WithContext(ctx context.Context, logger Logger) Logger {
+ if logger == nil {
+ return Nop{}
+ }
+
+ cl, ok := logger.(ContextLogger)
+ if !ok {
+ return logger
+ }
+
+ return cl.WithContext(ctx)
+}
+
+// Nop is a Logger implementation that simply does not perform any logging.
+type Nop struct{}
+
+// Logf simply returns without performing any action
+func (n Nop) Logf(Classification, string, ...interface{}) {
+ return
+}
+
+// StandardLogger is a Logger implementation that wraps the standard library logger, and delegates logging to it's
+// Printf method.
+type StandardLogger struct {
+ Logger *log.Logger
+}
+
+// Logf logs the given classification and message to the underlying logger.
+func (s StandardLogger) Logf(classification Classification, format string, v ...interface{}) {
+ if len(classification) != 0 {
+ format = string(classification) + " " + format
+ }
+
+ s.Logger.Printf(format, v...)
+}
+
+// NewStandardLogger returns a new StandardLogger
+func NewStandardLogger(writer io.Writer) *StandardLogger {
+ return &StandardLogger{
+ Logger: log.New(writer, "SDK ", log.LstdFlags),
+ }
+}
diff --git a/vendor/github.com/aws/smithy-go/middleware/doc.go b/vendor/github.com/aws/smithy-go/middleware/doc.go
new file mode 100644
index 000000000..c4bc958c2
--- /dev/null
+++ b/vendor/github.com/aws/smithy-go/middleware/doc.go
@@ -0,0 +1,67 @@
+// Package middleware provide transport agnostic middleware for decorating SDK
+// handlers.
+//
+// The Smithy middleware stack provides ordered behavior to be invoked on an
+// underlying handler. The stack is separated into steps that are invoked in a
+// static order. A step is a collection of middleware that are injected into a
+// ordered list defined by the user. The user may add, insert, swap, and remove a
+// step's middleware. When the stack is invoked the step middleware become static,
+// and their order cannot be modified.
+//
+// A stack and its step middleware are **not** safe to modify concurrently.
+//
+// A stack will use the ordered list of middleware to decorate a underlying
+// handler. A handler could be something like an HTTP Client that round trips an
+// API operation over HTTP.
+//
+// Smithy Middleware Stack
+//
+// A Stack is a collection of middleware that wrap a handler. The stack can be
+// broken down into discreet steps. Each step may contain zero or more middleware
+// specific to that stack's step.
+//
+// A Stack Step is a predefined set of middleware that are invoked in a static
+// order by the Stack. These steps represent fixed points in the middleware stack
+// for organizing specific behavior, such as serialize and build. A Stack Step is
+// composed of zero or more middleware that are specific to that step. A step may
+// define its on set of input/output parameters the generic input/output
+// parameters are cast from. A step calls its middleware recursively, before
+// calling the next step in the stack returning the result or error of the step
+// middleware decorating the underlying handler.
+//
+// * Initialize: Prepares the input, and sets any default parameters as needed,
+// (e.g. idempotency token, and presigned URLs).
+//
+// * Serialize: Serializes the prepared input into a data structure that can be
+// consumed by the target transport's message, (e.g. REST-JSON serialization).
+//
+// * Build: Adds additional metadata to the serialized transport message, (e.g.
+// HTTP's Content-Length header, or body checksum). Decorations and
+// modifications to the message should be copied to all message attempts.
+//
+// * Finalize: Preforms final preparations needed before sending the message. The
+// message should already be complete by this stage, and is only alternated to
+// meet the expectations of the recipient, (e.g. Retry and AWS SigV4 request
+// signing).
+//
+// * Deserialize: Reacts to the handler's response returned by the recipient of
+// the request message. Deserializes the response into a structured type or
+// error above stacks can react to.
+//
+// Adding Middleware to a Stack Step
+//
+// Middleware can be added to a step front or back, or relative, by name, to an
+// existing middleware in that stack. If a middleware does not have a name a
+// unique name will be generated at the middleware is added to the step.
+//
+// // Create middleware stack
+// stack := middleware.NewStack()
+//
+// // Add middleware to stack steps
+// stack.Initialize.Add(paramValidationMiddleware, middleware.After)
+// stack.Serialize.Add(marshalOperationFoo, middleware.After)
+// stack.Deserialize.Add(unmarshalOperationFoo, middleware.After)
+//
+// // Invoke middleware on handler.
+// resp, err := stack.HandleMiddleware(ctx, req.Input, clientHandler)
+package middleware
diff --git a/vendor/github.com/aws/smithy-go/middleware/logging.go b/vendor/github.com/aws/smithy-go/middleware/logging.go
new file mode 100644
index 000000000..c2f0dbb6b
--- /dev/null
+++ b/vendor/github.com/aws/smithy-go/middleware/logging.go
@@ -0,0 +1,46 @@
+package middleware
+
+import (
+ "context"
+
+ "github.com/aws/smithy-go/logging"
+)
+
+// loggerKey is the context value key for which the logger is associated with.
+type loggerKey struct{}
+
+// GetLogger takes a context to retrieve a Logger from. If no logger is present on the context a logging.Nop logger
+// is returned. If the logger retrieved from context supports the ContextLogger interface, the context will be passed
+// to the WithContext method and the resulting logger will be returned. Otherwise the stored logger is returned as is.
+func GetLogger(ctx context.Context) logging.Logger {
+ logger, ok := ctx.Value(loggerKey{}).(logging.Logger)
+ if !ok || logger == nil {
+ return logging.Nop{}
+ }
+
+ return logging.WithContext(ctx, logger)
+}
+
+// SetLogger sets the provided logger value on the provided ctx.
+func SetLogger(ctx context.Context, logger logging.Logger) context.Context {
+ return context.WithValue(ctx, loggerKey{}, logger)
+}
+
+type setLogger struct {
+ Logger logging.Logger
+}
+
+// AddSetLoggerMiddleware adds a middleware that will add the provided logger to the middleware context.
+func AddSetLoggerMiddleware(stack *Stack, logger logging.Logger) error {
+ return stack.Initialize.Add(&setLogger{Logger: logger}, After)
+}
+
+func (a *setLogger) ID() string {
+ return "SetLogger"
+}
+
+func (a *setLogger) HandleInitialize(ctx context.Context, in InitializeInput, next InitializeHandler) (
+ out InitializeOutput, metadata Metadata, err error,
+) {
+ return next.HandleInitialize(SetLogger(ctx, a.Logger), in)
+}
diff --git a/vendor/github.com/aws/smithy-go/middleware/metadata.go b/vendor/github.com/aws/smithy-go/middleware/metadata.go
new file mode 100644
index 000000000..c41ebf5a4
--- /dev/null
+++ b/vendor/github.com/aws/smithy-go/middleware/metadata.go
@@ -0,0 +1,52 @@
+package middleware
+
+// MetadataReader provides an interface for reading metadata from the
+// underlying metadata container.
+type MetadataReader interface {
+ Get(key interface{}) interface{}
+}
+
+// Metadata provides storing and reading metadata values. Keys may be any
+// comparable value type. Get and set will panic if key is not a comparable
+// value type.
+//
+// Metadata uses lazy initialization, and Set method must be called as an
+// addressable value, or pointer. Not doing so may cause key/value pair to not
+// be set.
+type Metadata struct {
+ values map[interface{}]interface{}
+}
+
+// Get attempts to retrieve the value the key points to. Returns nil if the
+// key was not found.
+//
+// Panics if key type is not comparable.
+func (m Metadata) Get(key interface{}) interface{} {
+ return m.values[key]
+}
+
+// Set stores the value pointed to by the key. If a value already exists at
+// that key it will be replaced with the new value.
+//
+// Set method must be called as an addressable value, or pointer. If Set is not
+// called as a addressable value or pointer, the key value pair being set may
+// be lost.
+//
+// Panics if the key type is not comparable.
+func (m *Metadata) Set(key, value interface{}) {
+ if m.values == nil {
+ m.values = map[interface{}]interface{}{}
+ }
+ m.values[key] = value
+}
+
+// Has returns if the key exists in the metadata.
+//
+// Panics if the key type is not comparable.
+func (m Metadata) Has(key interface{}) bool {
+ if m.values == nil {
+ return false
+ }
+ _, ok := m.values[key]
+ return ok
+}
diff --git a/vendor/github.com/aws/smithy-go/middleware/middleware.go b/vendor/github.com/aws/smithy-go/middleware/middleware.go
new file mode 100644
index 000000000..803b7c751
--- /dev/null
+++ b/vendor/github.com/aws/smithy-go/middleware/middleware.go
@@ -0,0 +1,71 @@
+package middleware
+
+import (
+ "context"
+)
+
+// Handler provides the interface for performing the logic to obtain an output,
+// or error for the given input.
+type Handler interface {
+ // Handle performs logic to obtain an output for the given input. Handler
+ // should be decorated with middleware to perform input specific behavior.
+ Handle(ctx context.Context, input interface{}) (
+ output interface{}, metadata Metadata, err error,
+ )
+}
+
+// HandlerFunc provides a wrapper around a function pointer to be used as a
+// middleware handler.
+type HandlerFunc func(ctx context.Context, input interface{}) (
+ output interface{}, metadata Metadata, err error,
+)
+
+// Handle invokes the underlying function, returning the result.
+func (fn HandlerFunc) Handle(ctx context.Context, input interface{}) (
+ output interface{}, metadata Metadata, err error,
+) {
+ return fn(ctx, input)
+}
+
+// Middleware provides the interface to call handlers in a chain.
+type Middleware interface {
+ // ID provides a unique identifier for the middleware.
+ ID() string
+
+ // Performs the middleware's handling of the input, returning the output,
+ // or error. The middleware can invoke the next Handler if handling should
+ // continue.
+ HandleMiddleware(ctx context.Context, input interface{}, next Handler) (
+ output interface{}, metadata Metadata, err error,
+ )
+}
+
+// decoratedHandler wraps a middleware in order to to call the next handler in
+// the chain.
+type decoratedHandler struct {
+ // The next handler to be called.
+ Next Handler
+
+ // The current middleware decorating the handler.
+ With Middleware
+}
+
+// Handle implements the Handler interface to handle a operation invocation.
+func (m decoratedHandler) Handle(ctx context.Context, input interface{}) (
+ output interface{}, metadata Metadata, err error,
+) {
+ return m.With.HandleMiddleware(ctx, input, m.Next)
+}
+
+// DecorateHandler decorates a handler with a middleware. Wrapping the handler
+// with the middleware.
+func DecorateHandler(h Handler, with ...Middleware) Handler {
+ for i := len(with) - 1; i >= 0; i-- {
+ h = decoratedHandler{
+ Next: h,
+ With: with[i],
+ }
+ }
+
+ return h
+}
diff --git a/vendor/github.com/aws/smithy-go/middleware/ordered_group.go b/vendor/github.com/aws/smithy-go/middleware/ordered_group.go
new file mode 100644
index 000000000..144a7fa9e
--- /dev/null
+++ b/vendor/github.com/aws/smithy-go/middleware/ordered_group.go
@@ -0,0 +1,268 @@
+package middleware
+
+import "fmt"
+
+// RelativePosition provides specifying the relative position of a middleware
+// in an ordered group.
+type RelativePosition int
+
+// Relative position for middleware in steps.
+const (
+ After RelativePosition = iota
+ Before
+)
+
+type ider interface {
+ ID() string
+}
+
+// orderedIDs provides an ordered collection of items with relative ordering
+// by name.
+type orderedIDs struct {
+ order *relativeOrder
+ items map[string]ider
+}
+
+const baseOrderedItems = 5
+
+func newOrderedIDs() *orderedIDs {
+ return &orderedIDs{
+ order: newRelativeOrder(),
+ items: make(map[string]ider, baseOrderedItems),
+ }
+}
+
+// Add injects the item to the relative position of the item group. Returns an
+// error if the item already exists.
+func (g *orderedIDs) Add(m ider, pos RelativePosition) error {
+ id := m.ID()
+ if len(id) == 0 {
+ return fmt.Errorf("empty ID, ID must not be empty")
+ }
+
+ if err := g.order.Add(pos, id); err != nil {
+ return err
+ }
+
+ g.items[id] = m
+ return nil
+}
+
+// Insert injects the item relative to an existing item id. Return error if
+// the original item does not exist, or the item being added already exists.
+func (g *orderedIDs) Insert(m ider, relativeTo string, pos RelativePosition) error {
+ if len(m.ID()) == 0 {
+ return fmt.Errorf("insert ID must not be empty")
+ }
+ if len(relativeTo) == 0 {
+ return fmt.Errorf("relative to ID must not be empty")
+ }
+
+ if err := g.order.Insert(relativeTo, pos, m.ID()); err != nil {
+ return err
+ }
+
+ g.items[m.ID()] = m
+ return nil
+}
+
+// Get returns the ider identified by id. If ider is not present, returns false
+func (g *orderedIDs) Get(id string) (ider, bool) {
+ v, ok := g.items[id]
+ return v, ok
+}
+
+// Swap removes the item by id, replacing it with the new item. Returns error
+// if the original item doesn't exist.
+func (g *orderedIDs) Swap(id string, m ider) (ider, error) {
+ if len(id) == 0 {
+ return nil, fmt.Errorf("swap from ID must not be empty")
+ }
+
+ iderID := m.ID()
+ if len(iderID) == 0 {
+ return nil, fmt.Errorf("swap to ID must not be empty")
+ }
+
+ if err := g.order.Swap(id, iderID); err != nil {
+ return nil, err
+ }
+
+ removed := g.items[id]
+
+ delete(g.items, id)
+ g.items[iderID] = m
+
+ return removed, nil
+}
+
+// Remove removes the item by id. Returns error if the item
+// doesn't exist.
+func (g *orderedIDs) Remove(id string) (ider, error) {
+ if len(id) == 0 {
+ return nil, fmt.Errorf("remove ID must not be empty")
+ }
+
+ if err := g.order.Remove(id); err != nil {
+ return nil, err
+ }
+
+ removed := g.items[id]
+ delete(g.items, id)
+ return removed, nil
+}
+
+func (g *orderedIDs) List() []string {
+ items := g.order.List()
+ order := make([]string, len(items))
+ copy(order, items)
+ return order
+}
+
+// Clear removes all entries and slots.
+func (g *orderedIDs) Clear() {
+ g.order.Clear()
+ g.items = map[string]ider{}
+}
+
+// GetOrder returns the item in the order it should be invoked in.
+func (g *orderedIDs) GetOrder() []interface{} {
+ order := g.order.List()
+ ordered := make([]interface{}, len(order))
+ for i := 0; i < len(order); i++ {
+ ordered[i] = g.items[order[i]]
+ }
+
+ return ordered
+}
+
+// relativeOrder provides ordering of item
+type relativeOrder struct {
+ order []string
+}
+
+func newRelativeOrder() *relativeOrder {
+ return &relativeOrder{
+ order: make([]string, 0, baseOrderedItems),
+ }
+}
+
+// Add inserts a item into the order relative to the position provided.
+func (s *relativeOrder) Add(pos RelativePosition, ids ...string) error {
+ if len(ids) == 0 {
+ return nil
+ }
+
+ for _, id := range ids {
+ if _, ok := s.has(id); ok {
+ return fmt.Errorf("already exists, %v", id)
+ }
+ }
+
+ switch pos {
+ case Before:
+ return s.insert(0, Before, ids...)
+
+ case After:
+ s.order = append(s.order, ids...)
+
+ default:
+ return fmt.Errorf("invalid position, %v", int(pos))
+ }
+
+ return nil
+}
+
+// Insert injects a item before or after the relative item. Returns
+// an error if the relative item does not exist.
+func (s *relativeOrder) Insert(relativeTo string, pos RelativePosition, ids ...string) error {
+ if len(ids) == 0 {
+ return nil
+ }
+
+ for _, id := range ids {
+ if _, ok := s.has(id); ok {
+ return fmt.Errorf("already exists, %v", id)
+ }
+ }
+
+ i, ok := s.has(relativeTo)
+ if !ok {
+ return fmt.Errorf("not found, %v", relativeTo)
+ }
+
+ return s.insert(i, pos, ids...)
+}
+
+// Swap will replace the item id with the to item. Returns an
+// error if the original item id does not exist. Allows swapping out a
+// item for another item with the same id.
+func (s *relativeOrder) Swap(id, to string) error {
+ i, ok := s.has(id)
+ if !ok {
+ return fmt.Errorf("not found, %v", id)
+ }
+
+ if _, ok = s.has(to); ok && id != to {
+ return fmt.Errorf("already exists, %v", to)
+ }
+
+ s.order[i] = to
+ return nil
+}
+
+func (s *relativeOrder) Remove(id string) error {
+ i, ok := s.has(id)
+ if !ok {
+ return fmt.Errorf("not found, %v", id)
+ }
+
+ s.order = append(s.order[:i], s.order[i+1:]...)
+ return nil
+}
+
+func (s *relativeOrder) List() []string {
+ return s.order
+}
+
+func (s *relativeOrder) Clear() {
+ s.order = s.order[0:0]
+}
+
+func (s *relativeOrder) insert(i int, pos RelativePosition, ids ...string) error {
+ switch pos {
+ case Before:
+ n := len(ids)
+ var src []string
+ if n <= cap(s.order)-len(s.order) {
+ s.order = s.order[:len(s.order)+n]
+ src = s.order
+ } else {
+ src = s.order
+ s.order = make([]string, len(s.order)+n)
+ copy(s.order[:i], src[:i]) // only when allocating a new slice do we need to copy the front half
+ }
+ copy(s.order[i+n:], src[i:])
+ copy(s.order[i:], ids)
+ case After:
+ if i == len(s.order)-1 || len(s.order) == 0 {
+ s.order = append(s.order, ids...)
+ } else {
+ s.order = append(s.order[:i+1], append(ids, s.order[i+1:]...)...)
+ }
+
+ default:
+ return fmt.Errorf("invalid position, %v", int(pos))
+ }
+
+ return nil
+}
+
+func (s *relativeOrder) has(id string) (i int, found bool) {
+ for i := 0; i < len(s.order); i++ {
+ if s.order[i] == id {
+ return i, true
+ }
+ }
+ return 0, false
+}
diff --git a/vendor/github.com/aws/smithy-go/middleware/stack.go b/vendor/github.com/aws/smithy-go/middleware/stack.go
new file mode 100644
index 000000000..ae3d82e61
--- /dev/null
+++ b/vendor/github.com/aws/smithy-go/middleware/stack.go
@@ -0,0 +1,209 @@
+package middleware
+
+import (
+ "context"
+ "io"
+ "strings"
+)
+
+// Stack provides protocol and transport agnostic set of middleware split into
+// distinct steps. Steps have specific transitions between them, that is
+// managed by the individual step.
+//
+// Steps are composed as middleware around the underlying handler in the
+// following order:
+//
+// Initialize -> Serialize -> Build -> Finalize -> Deserialize -> Handler
+//
+// Any middleware within the chain may chose to stop and return an error or
+// response. Since the middleware decorate the handler like a call stack, each
+// middleware will receive the result of the next middleware in the chain.
+// Middleware that does not need to react to an input, or result must forward
+// along the input down the chain, or return the result back up the chain.
+//
+// Initialize <- Serialize -> Build -> Finalize <- Deserialize <- Handler
+type Stack struct {
+ // Initialize Prepares the input, and sets any default parameters as
+ // needed, (e.g. idempotency token, and presigned URLs).
+ //
+ // Takes Input Parameters, and returns result or error.
+ //
+ // Receives result or error from Serialize step.
+ Initialize *InitializeStep
+
+ // Serializes the prepared input into a data structure that can be consumed
+ // by the target transport's message, (e.g. REST-JSON serialization)
+ //
+ // Converts Input Parameters into a Request, and returns the result or error.
+ //
+ // Receives result or error from Build step.
+ Serialize *SerializeStep
+
+ // Adds additional metadata to the serialized transport message,
+ // (e.g. HTTP's Content-Length header, or body checksum). Decorations and
+ // modifications to the message should be copied to all message attempts.
+ //
+ // Takes Request, and returns result or error.
+ //
+ // Receives result or error from Finalize step.
+ Build *BuildStep
+
+ // Preforms final preparations needed before sending the message. The
+ // message should already be complete by this stage, and is only alternated
+ // to meet the expectations of the recipient, (e.g. Retry and AWS SigV4
+ // request signing)
+ //
+ // Takes Request, and returns result or error.
+ //
+ // Receives result or error from Deserialize step.
+ Finalize *FinalizeStep
+
+ // Reacts to the handler's response returned by the recipient of the request
+ // message. Deserializes the response into a structured type or error above
+ // stacks can react to.
+ //
+ // Should only forward Request to underlying handler.
+ //
+ // Takes Request, and returns result or error.
+ //
+ // Receives raw response, or error from underlying handler.
+ Deserialize *DeserializeStep
+
+ id string
+}
+
+// NewStack returns an initialize empty stack.
+func NewStack(id string, newRequestFn func() interface{}) *Stack {
+ return &Stack{
+ id: id,
+ Initialize: NewInitializeStep(),
+ Serialize: NewSerializeStep(newRequestFn),
+ Build: NewBuildStep(),
+ Finalize: NewFinalizeStep(),
+ Deserialize: NewDeserializeStep(),
+ }
+}
+
+// ID returns the unique ID for the stack as a middleware.
+func (s *Stack) ID() string { return s.id }
+
+// HandleMiddleware invokes the middleware stack decorating the next handler.
+// Each step of stack will be invoked in order before calling the next step.
+// With the next handler call last.
+//
+// The input value must be the input parameters of the operation being
+// performed.
+//
+// Will return the result of the operation, or error.
+func (s *Stack) HandleMiddleware(ctx context.Context, input interface{}, next Handler) (
+ output interface{}, metadata Metadata, err error,
+) {
+ h := DecorateHandler(next,
+ s.Initialize,
+ s.Serialize,
+ s.Build,
+ s.Finalize,
+ s.Deserialize,
+ )
+
+ return h.Handle(ctx, input)
+}
+
+// List returns a list of all middleware in the stack by step.
+func (s *Stack) List() []string {
+ var l []string
+ l = append(l, s.id)
+
+ l = append(l, s.Initialize.ID())
+ l = append(l, s.Initialize.List()...)
+
+ l = append(l, s.Serialize.ID())
+ l = append(l, s.Serialize.List()...)
+
+ l = append(l, s.Build.ID())
+ l = append(l, s.Build.List()...)
+
+ l = append(l, s.Finalize.ID())
+ l = append(l, s.Finalize.List()...)
+
+ l = append(l, s.Deserialize.ID())
+ l = append(l, s.Deserialize.List()...)
+
+ return l
+}
+
+func (s *Stack) String() string {
+ var b strings.Builder
+
+ w := &indentWriter{w: &b}
+
+ w.WriteLine(s.id)
+ w.Push()
+
+ writeStepItems(w, s.Initialize)
+ writeStepItems(w, s.Serialize)
+ writeStepItems(w, s.Build)
+ writeStepItems(w, s.Finalize)
+ writeStepItems(w, s.Deserialize)
+
+ return b.String()
+}
+
+type stackStepper interface {
+ ID() string
+ List() []string
+}
+
+func writeStepItems(w *indentWriter, s stackStepper) {
+ type lister interface {
+ List() []string
+ }
+
+ w.WriteLine(s.ID())
+ w.Push()
+
+ defer w.Pop()
+
+ // ignore stack to prevent circular iterations
+ if _, ok := s.(*Stack); ok {
+ return
+ }
+
+ for _, id := range s.List() {
+ w.WriteLine(id)
+ }
+}
+
+type stringWriter interface {
+ io.Writer
+ WriteString(string) (int, error)
+ WriteRune(rune) (int, error)
+}
+
+type indentWriter struct {
+ w stringWriter
+ depth int
+}
+
+const indentDepth = "\t\t\t\t\t\t\t\t\t\t"
+
+func (w *indentWriter) Push() {
+ w.depth++
+}
+
+func (w *indentWriter) Pop() {
+ w.depth--
+ if w.depth < 0 {
+ w.depth = 0
+ }
+}
+
+func (w *indentWriter) WriteLine(v string) {
+ w.w.WriteString(indentDepth[:w.depth])
+
+ v = strings.ReplaceAll(v, "\n", "\\n")
+ v = strings.ReplaceAll(v, "\r", "\\r")
+
+ w.w.WriteString(v)
+ w.w.WriteRune('\n')
+}
diff --git a/vendor/github.com/aws/smithy-go/middleware/stack_values.go b/vendor/github.com/aws/smithy-go/middleware/stack_values.go
new file mode 100644
index 000000000..21b736a92
--- /dev/null
+++ b/vendor/github.com/aws/smithy-go/middleware/stack_values.go
@@ -0,0 +1,100 @@
+package middleware
+
+import (
+ "context"
+ "reflect"
+ "strings"
+)
+
+// WithStackValue adds a key value pair to the context that are intended to be
+// scoped to a stack. Use ClearStackValues to get a new context with all stack
+// values cleared.
+func WithStackValue(ctx context.Context, key, value interface{}) context.Context {
+ md, _ := ctx.Value(stackValuesKey{}).(*stackValues)
+
+ md = withStackValue(md, key, value)
+ return context.WithValue(ctx, stackValuesKey{}, md)
+}
+
+// ClearStackValues returns a context without any stack values.
+func ClearStackValues(ctx context.Context) context.Context {
+ return context.WithValue(ctx, stackValuesKey{}, nil)
+}
+
+// GetStackValues returns the value pointed to by the key within the stack
+// values, if it is present.
+func GetStackValue(ctx context.Context, key interface{}) interface{} {
+ md, _ := ctx.Value(stackValuesKey{}).(*stackValues)
+ if md == nil {
+ return nil
+ }
+
+ return md.Value(key)
+}
+
+type stackValuesKey struct{}
+
+type stackValues struct {
+ key interface{}
+ value interface{}
+ parent *stackValues
+}
+
+func withStackValue(parent *stackValues, key, value interface{}) *stackValues {
+ if key == nil {
+ panic("nil key")
+ }
+ if !reflect.TypeOf(key).Comparable() {
+ panic("key is not comparable")
+ }
+ return &stackValues{key: key, value: value, parent: parent}
+}
+
+func (m *stackValues) Value(key interface{}) interface{} {
+ if key == m.key {
+ return m.value
+ }
+
+ if m.parent == nil {
+ return nil
+ }
+
+ return m.parent.Value(key)
+}
+
+func (c *stackValues) String() string {
+ var str strings.Builder
+
+ cc := c
+ for cc == nil {
+ str.WriteString("(" +
+ reflect.TypeOf(c.key).String() +
+ ": " +
+ stringify(cc.value) +
+ ")")
+ if cc.parent != nil {
+ str.WriteString(" -> ")
+ }
+ cc = cc.parent
+ }
+ str.WriteRune('}')
+
+ return str.String()
+}
+
+type stringer interface {
+ String() string
+}
+
+// stringify tries a bit to stringify v, without using fmt, since we don't
+// want context depending on the unicode tables. This is only used by
+// *valueCtx.String().
+func stringify(v interface{}) string {
+ switch s := v.(type) {
+ case stringer:
+ return s.String()
+ case string:
+ return s
+ }
+ return ""
+}
diff --git a/vendor/github.com/aws/smithy-go/middleware/step_build.go b/vendor/github.com/aws/smithy-go/middleware/step_build.go
new file mode 100644
index 000000000..b91e0959e
--- /dev/null
+++ b/vendor/github.com/aws/smithy-go/middleware/step_build.go
@@ -0,0 +1,211 @@
+package middleware
+
+import (
+ "context"
+)
+
+// BuildInput provides the input parameters for the BuildMiddleware to consume.
+// BuildMiddleware may modify the Request value before forwarding the input
+// along to the next BuildHandler.
+type BuildInput struct {
+ Request interface{}
+}
+
+// BuildOutput provides the result returned by the next BuildHandler.
+type BuildOutput struct {
+ Result interface{}
+}
+
+// BuildHandler provides the interface for the next handler the
+// BuildMiddleware will call in the middleware chain.
+type BuildHandler interface {
+ HandleBuild(ctx context.Context, in BuildInput) (
+ out BuildOutput, metadata Metadata, err error,
+ )
+}
+
+// BuildMiddleware provides the interface for middleware specific to the
+// serialize step. Delegates to the next BuildHandler for further
+// processing.
+type BuildMiddleware interface {
+ // Unique ID for the middleware in theBuildStep. The step does not allow
+ // duplicate IDs.
+ ID() string
+
+ // Invokes the middleware behavior which must delegate to the next handler
+ // for the middleware chain to continue. The method must return a result or
+ // error to its caller.
+ HandleBuild(ctx context.Context, in BuildInput, next BuildHandler) (
+ out BuildOutput, metadata Metadata, err error,
+ )
+}
+
+// BuildMiddlewareFunc returns a BuildMiddleware with the unique ID provided,
+// and the func to be invoked.
+func BuildMiddlewareFunc(id string, fn func(context.Context, BuildInput, BuildHandler) (BuildOutput, Metadata, error)) BuildMiddleware {
+ return buildMiddlewareFunc{
+ id: id,
+ fn: fn,
+ }
+}
+
+type buildMiddlewareFunc struct {
+ // Unique ID for the middleware.
+ id string
+
+ // Middleware function to be called.
+ fn func(context.Context, BuildInput, BuildHandler) (BuildOutput, Metadata, error)
+}
+
+// ID returns the unique ID for the middleware.
+func (s buildMiddlewareFunc) ID() string { return s.id }
+
+// HandleBuild invokes the middleware Fn.
+func (s buildMiddlewareFunc) HandleBuild(ctx context.Context, in BuildInput, next BuildHandler) (
+ out BuildOutput, metadata Metadata, err error,
+) {
+ return s.fn(ctx, in, next)
+}
+
+var _ BuildMiddleware = (buildMiddlewareFunc{})
+
+// BuildStep provides the ordered grouping of BuildMiddleware to be invoked on
+// an handler.
+type BuildStep struct {
+ ids *orderedIDs
+}
+
+// NewBuildStep returns an BuildStep ready to have middleware for
+// initialization added to it.
+func NewBuildStep() *BuildStep {
+ return &BuildStep{
+ ids: newOrderedIDs(),
+ }
+}
+
+var _ Middleware = (*BuildStep)(nil)
+
+// ID returns the unique name of the step as a middleware.
+func (s *BuildStep) ID() string {
+ return "Build stack step"
+}
+
+// HandleMiddleware invokes the middleware by decorating the next handler
+// provided. Returns the result of the middleware and handler being invoked.
+//
+// Implements Middleware interface.
+func (s *BuildStep) HandleMiddleware(ctx context.Context, in interface{}, next Handler) (
+ out interface{}, metadata Metadata, err error,
+) {
+ order := s.ids.GetOrder()
+
+ var h BuildHandler = buildWrapHandler{Next: next}
+ for i := len(order) - 1; i >= 0; i-- {
+ h = decoratedBuildHandler{
+ Next: h,
+ With: order[i].(BuildMiddleware),
+ }
+ }
+
+ sIn := BuildInput{
+ Request: in,
+ }
+
+ res, metadata, err := h.HandleBuild(ctx, sIn)
+ return res.Result, metadata, err
+}
+
+// Get retrieves the middleware identified by id. If the middleware is not present, returns false.
+func (s *BuildStep) Get(id string) (BuildMiddleware, bool) {
+ get, ok := s.ids.Get(id)
+ if !ok {
+ return nil, false
+ }
+ return get.(BuildMiddleware), ok
+}
+
+// Add injects the middleware to the relative position of the middleware group.
+// Returns an error if the middleware already exists.
+func (s *BuildStep) Add(m BuildMiddleware, pos RelativePosition) error {
+ return s.ids.Add(m, pos)
+}
+
+// Insert injects the middleware relative to an existing middleware id.
+// Return error if the original middleware does not exist, or the middleware
+// being added already exists.
+func (s *BuildStep) Insert(m BuildMiddleware, relativeTo string, pos RelativePosition) error {
+ return s.ids.Insert(m, relativeTo, pos)
+}
+
+// Swap removes the middleware by id, replacing it with the new middleware.
+// Returns the middleware removed, or error if the middleware to be removed
+// doesn't exist.
+func (s *BuildStep) Swap(id string, m BuildMiddleware) (BuildMiddleware, error) {
+ removed, err := s.ids.Swap(id, m)
+ if err != nil {
+ return nil, err
+ }
+
+ return removed.(BuildMiddleware), nil
+}
+
+// Remove removes the middleware by id. Returns error if the middleware
+// doesn't exist.
+func (s *BuildStep) Remove(id string) (BuildMiddleware, error) {
+ removed, err := s.ids.Remove(id)
+ if err != nil {
+ return nil, err
+ }
+
+ return removed.(BuildMiddleware), nil
+}
+
+// List returns a list of the middleware in the step.
+func (s *BuildStep) List() []string {
+ return s.ids.List()
+}
+
+// Clear removes all middleware in the step.
+func (s *BuildStep) Clear() {
+ s.ids.Clear()
+}
+
+type buildWrapHandler struct {
+ Next Handler
+}
+
+var _ BuildHandler = (*buildWrapHandler)(nil)
+
+// Implements BuildHandler, converts types and delegates to underlying
+// generic handler.
+func (w buildWrapHandler) HandleBuild(ctx context.Context, in BuildInput) (
+ out BuildOutput, metadata Metadata, err error,
+) {
+ res, metadata, err := w.Next.Handle(ctx, in.Request)
+ return BuildOutput{
+ Result: res,
+ }, metadata, err
+}
+
+type decoratedBuildHandler struct {
+ Next BuildHandler
+ With BuildMiddleware
+}
+
+var _ BuildHandler = (*decoratedBuildHandler)(nil)
+
+func (h decoratedBuildHandler) HandleBuild(ctx context.Context, in BuildInput) (
+ out BuildOutput, metadata Metadata, err error,
+) {
+ return h.With.HandleBuild(ctx, in, h.Next)
+}
+
+// BuildHandlerFunc provides a wrapper around a function to be used as a build middleware handler.
+type BuildHandlerFunc func(context.Context, BuildInput) (BuildOutput, Metadata, error)
+
+// HandleBuild invokes the wrapped function with the provided arguments.
+func (b BuildHandlerFunc) HandleBuild(ctx context.Context, in BuildInput) (BuildOutput, Metadata, error) {
+ return b(ctx, in)
+}
+
+var _ BuildHandler = BuildHandlerFunc(nil)
diff --git a/vendor/github.com/aws/smithy-go/middleware/step_deserialize.go b/vendor/github.com/aws/smithy-go/middleware/step_deserialize.go
new file mode 100644
index 000000000..0d46e6b1b
--- /dev/null
+++ b/vendor/github.com/aws/smithy-go/middleware/step_deserialize.go
@@ -0,0 +1,217 @@
+package middleware
+
+import (
+ "context"
+)
+
+// DeserializeInput provides the input parameters for the DeserializeInput to
+// consume. DeserializeMiddleware should not modify the Request, and instead
+// forward it along to the next DeserializeHandler.
+type DeserializeInput struct {
+ Request interface{}
+}
+
+// DeserializeOutput provides the result returned by the next
+// DeserializeHandler. The DeserializeMiddleware should deserailize the
+// RawResponse into a Result that can be consumed by middleware higher up in
+// the stack.
+type DeserializeOutput struct {
+ RawResponse interface{}
+ Result interface{}
+}
+
+// DeserializeHandler provides the interface for the next handler the
+// DeserializeMiddleware will call in the middleware chain.
+type DeserializeHandler interface {
+ HandleDeserialize(ctx context.Context, in DeserializeInput) (
+ out DeserializeOutput, metadata Metadata, err error,
+ )
+}
+
+// DeserializeMiddleware provides the interface for middleware specific to the
+// serialize step. Delegates to the next DeserializeHandler for further
+// processing.
+type DeserializeMiddleware interface {
+ // Unique ID for the middleware in the DeserializeStep. The step does not
+ // allow duplicate IDs.
+ ID() string
+
+ // Invokes the middleware behavior which must delegate to the next handler
+ // for the middleware chain to continue. The method must return a result or
+ // error to its caller.
+ HandleDeserialize(ctx context.Context, in DeserializeInput, next DeserializeHandler) (
+ out DeserializeOutput, metadata Metadata, err error,
+ )
+}
+
+// DeserializeMiddlewareFunc returns a DeserializeMiddleware with the unique ID
+// provided, and the func to be invoked.
+func DeserializeMiddlewareFunc(id string, fn func(context.Context, DeserializeInput, DeserializeHandler) (DeserializeOutput, Metadata, error)) DeserializeMiddleware {
+ return deserializeMiddlewareFunc{
+ id: id,
+ fn: fn,
+ }
+}
+
+type deserializeMiddlewareFunc struct {
+ // Unique ID for the middleware.
+ id string
+
+ // Middleware function to be called.
+ fn func(context.Context, DeserializeInput, DeserializeHandler) (
+ DeserializeOutput, Metadata, error,
+ )
+}
+
+// ID returns the unique ID for the middleware.
+func (s deserializeMiddlewareFunc) ID() string { return s.id }
+
+// HandleDeserialize invokes the middleware Fn.
+func (s deserializeMiddlewareFunc) HandleDeserialize(ctx context.Context, in DeserializeInput, next DeserializeHandler) (
+ out DeserializeOutput, metadata Metadata, err error,
+) {
+ return s.fn(ctx, in, next)
+}
+
+var _ DeserializeMiddleware = (deserializeMiddlewareFunc{})
+
+// DeserializeStep provides the ordered grouping of DeserializeMiddleware to be
+// invoked on an handler.
+type DeserializeStep struct {
+ ids *orderedIDs
+}
+
+// NewDeserializeStep returns an DeserializeStep ready to have middleware for
+// initialization added to it.
+func NewDeserializeStep() *DeserializeStep {
+ return &DeserializeStep{
+ ids: newOrderedIDs(),
+ }
+}
+
+var _ Middleware = (*DeserializeStep)(nil)
+
+// ID returns the unique id of the step as a middleware.
+func (s *DeserializeStep) ID() string {
+ return "Deserialize stack step"
+}
+
+// HandleMiddleware invokes the middleware by decorating the next handler
+// provided. Returns the result of the middleware and handler being invoked.
+//
+// Implements Middleware interface.
+func (s *DeserializeStep) HandleMiddleware(ctx context.Context, in interface{}, next Handler) (
+ out interface{}, metadata Metadata, err error,
+) {
+ order := s.ids.GetOrder()
+
+ var h DeserializeHandler = deserializeWrapHandler{Next: next}
+ for i := len(order) - 1; i >= 0; i-- {
+ h = decoratedDeserializeHandler{
+ Next: h,
+ With: order[i].(DeserializeMiddleware),
+ }
+ }
+
+ sIn := DeserializeInput{
+ Request: in,
+ }
+
+ res, metadata, err := h.HandleDeserialize(ctx, sIn)
+ return res.Result, metadata, err
+}
+
+// Get retrieves the middleware identified by id. If the middleware is not present, returns false.
+func (s *DeserializeStep) Get(id string) (DeserializeMiddleware, bool) {
+ get, ok := s.ids.Get(id)
+ if !ok {
+ return nil, false
+ }
+ return get.(DeserializeMiddleware), ok
+}
+
+// Add injects the middleware to the relative position of the middleware group.
+// Returns an error if the middleware already exists.
+func (s *DeserializeStep) Add(m DeserializeMiddleware, pos RelativePosition) error {
+ return s.ids.Add(m, pos)
+}
+
+// Insert injects the middleware relative to an existing middleware id.
+// Return error if the original middleware does not exist, or the middleware
+// being added already exists.
+func (s *DeserializeStep) Insert(m DeserializeMiddleware, relativeTo string, pos RelativePosition) error {
+ return s.ids.Insert(m, relativeTo, pos)
+}
+
+// Swap removes the middleware by id, replacing it with the new middleware.
+// Returns the middleware removed, or error if the middleware to be removed
+// doesn't exist.
+func (s *DeserializeStep) Swap(id string, m DeserializeMiddleware) (DeserializeMiddleware, error) {
+ removed, err := s.ids.Swap(id, m)
+ if err != nil {
+ return nil, err
+ }
+
+ return removed.(DeserializeMiddleware), nil
+}
+
+// Remove removes the middleware by id. Returns error if the middleware
+// doesn't exist.
+func (s *DeserializeStep) Remove(id string) (DeserializeMiddleware, error) {
+ removed, err := s.ids.Remove(id)
+ if err != nil {
+ return nil, err
+ }
+
+ return removed.(DeserializeMiddleware), nil
+}
+
+// List returns a list of the middleware in the step.
+func (s *DeserializeStep) List() []string {
+ return s.ids.List()
+}
+
+// Clear removes all middleware in the step.
+func (s *DeserializeStep) Clear() {
+ s.ids.Clear()
+}
+
+type deserializeWrapHandler struct {
+ Next Handler
+}
+
+var _ DeserializeHandler = (*deserializeWrapHandler)(nil)
+
+// Implements DeserializeHandler, converts types and delegates to underlying
+// generic handler.
+func (w deserializeWrapHandler) HandleDeserialize(ctx context.Context, in DeserializeInput) (
+ out DeserializeOutput, metadata Metadata, err error,
+) {
+ resp, metadata, err := w.Next.Handle(ctx, in.Request)
+ return DeserializeOutput{
+ RawResponse: resp,
+ }, metadata, err
+}
+
+type decoratedDeserializeHandler struct {
+ Next DeserializeHandler
+ With DeserializeMiddleware
+}
+
+var _ DeserializeHandler = (*decoratedDeserializeHandler)(nil)
+
+func (h decoratedDeserializeHandler) HandleDeserialize(ctx context.Context, in DeserializeInput) (
+ out DeserializeOutput, metadata Metadata, err error,
+) {
+ return h.With.HandleDeserialize(ctx, in, h.Next)
+}
+
+// DeserializeHandlerFunc provides a wrapper around a function to be used as a deserialize middleware handler.
+type DeserializeHandlerFunc func(context.Context, DeserializeInput) (DeserializeOutput, Metadata, error)
+
+// HandleDeserialize invokes the wrapped function with the given arguments.
+func (d DeserializeHandlerFunc) HandleDeserialize(ctx context.Context, in DeserializeInput) (DeserializeOutput, Metadata, error) {
+ return d(ctx, in)
+}
+
+var _ DeserializeHandler = DeserializeHandlerFunc(nil)
diff --git a/vendor/github.com/aws/smithy-go/middleware/step_finalize.go b/vendor/github.com/aws/smithy-go/middleware/step_finalize.go
new file mode 100644
index 000000000..88e79ded7
--- /dev/null
+++ b/vendor/github.com/aws/smithy-go/middleware/step_finalize.go
@@ -0,0 +1,211 @@
+package middleware
+
+import "context"
+
+// FinalizeInput provides the input parameters for the FinalizeMiddleware to
+// consume. FinalizeMiddleware may modify the Request value before forwarding
+// the FinalizeInput along to the next next FinalizeHandler.
+type FinalizeInput struct {
+ Request interface{}
+}
+
+// FinalizeOutput provides the result returned by the next FinalizeHandler.
+type FinalizeOutput struct {
+ Result interface{}
+}
+
+// FinalizeHandler provides the interface for the next handler the
+// FinalizeMiddleware will call in the middleware chain.
+type FinalizeHandler interface {
+ HandleFinalize(ctx context.Context, in FinalizeInput) (
+ out FinalizeOutput, metadata Metadata, err error,
+ )
+}
+
+// FinalizeMiddleware provides the interface for middleware specific to the
+// serialize step. Delegates to the next FinalizeHandler for further
+// processing.
+type FinalizeMiddleware interface {
+ // Unique ID for the middleware in the FinalizeStep. The step does not
+ // allow duplicate IDs.
+ ID() string
+
+ // Invokes the middleware behavior which must delegate to the next handler
+ // for the middleware chain to continue. The method must return a result or
+ // error to its caller.
+ HandleFinalize(ctx context.Context, in FinalizeInput, next FinalizeHandler) (
+ out FinalizeOutput, metadata Metadata, err error,
+ )
+}
+
+// FinalizeMiddlewareFunc returns a FinalizeMiddleware with the unique ID
+// provided, and the func to be invoked.
+func FinalizeMiddlewareFunc(id string, fn func(context.Context, FinalizeInput, FinalizeHandler) (FinalizeOutput, Metadata, error)) FinalizeMiddleware {
+ return finalizeMiddlewareFunc{
+ id: id,
+ fn: fn,
+ }
+}
+
+type finalizeMiddlewareFunc struct {
+ // Unique ID for the middleware.
+ id string
+
+ // Middleware function to be called.
+ fn func(context.Context, FinalizeInput, FinalizeHandler) (
+ FinalizeOutput, Metadata, error,
+ )
+}
+
+// ID returns the unique ID for the middleware.
+func (s finalizeMiddlewareFunc) ID() string { return s.id }
+
+// HandleFinalize invokes the middleware Fn.
+func (s finalizeMiddlewareFunc) HandleFinalize(ctx context.Context, in FinalizeInput, next FinalizeHandler) (
+ out FinalizeOutput, metadata Metadata, err error,
+) {
+ return s.fn(ctx, in, next)
+}
+
+var _ FinalizeMiddleware = (finalizeMiddlewareFunc{})
+
+// FinalizeStep provides the ordered grouping of FinalizeMiddleware to be
+// invoked on an handler.
+type FinalizeStep struct {
+ ids *orderedIDs
+}
+
+// NewFinalizeStep returns an FinalizeStep ready to have middleware for
+// initialization added to it.
+func NewFinalizeStep() *FinalizeStep {
+ return &FinalizeStep{
+ ids: newOrderedIDs(),
+ }
+}
+
+var _ Middleware = (*FinalizeStep)(nil)
+
+// ID returns the unique id of the step as a middleware.
+func (s *FinalizeStep) ID() string {
+ return "Finalize stack step"
+}
+
+// HandleMiddleware invokes the middleware by decorating the next handler
+// provided. Returns the result of the middleware and handler being invoked.
+//
+// Implements Middleware interface.
+func (s *FinalizeStep) HandleMiddleware(ctx context.Context, in interface{}, next Handler) (
+ out interface{}, metadata Metadata, err error,
+) {
+ order := s.ids.GetOrder()
+
+ var h FinalizeHandler = finalizeWrapHandler{Next: next}
+ for i := len(order) - 1; i >= 0; i-- {
+ h = decoratedFinalizeHandler{
+ Next: h,
+ With: order[i].(FinalizeMiddleware),
+ }
+ }
+
+ sIn := FinalizeInput{
+ Request: in,
+ }
+
+ res, metadata, err := h.HandleFinalize(ctx, sIn)
+ return res.Result, metadata, err
+}
+
+// Get retrieves the middleware identified by id. If the middleware is not present, returns false.
+func (s *FinalizeStep) Get(id string) (FinalizeMiddleware, bool) {
+ get, ok := s.ids.Get(id)
+ if !ok {
+ return nil, false
+ }
+ return get.(FinalizeMiddleware), ok
+}
+
+// Add injects the middleware to the relative position of the middleware group.
+// Returns an error if the middleware already exists.
+func (s *FinalizeStep) Add(m FinalizeMiddleware, pos RelativePosition) error {
+ return s.ids.Add(m, pos)
+}
+
+// Insert injects the middleware relative to an existing middleware id.
+// Return error if the original middleware does not exist, or the middleware
+// being added already exists.
+func (s *FinalizeStep) Insert(m FinalizeMiddleware, relativeTo string, pos RelativePosition) error {
+ return s.ids.Insert(m, relativeTo, pos)
+}
+
+// Swap removes the middleware by id, replacing it with the new middleware.
+// Returns the middleware removed, or error if the middleware to be removed
+// doesn't exist.
+func (s *FinalizeStep) Swap(id string, m FinalizeMiddleware) (FinalizeMiddleware, error) {
+ removed, err := s.ids.Swap(id, m)
+ if err != nil {
+ return nil, err
+ }
+
+ return removed.(FinalizeMiddleware), nil
+}
+
+// Remove removes the middleware by id. Returns error if the middleware
+// doesn't exist.
+func (s *FinalizeStep) Remove(id string) (FinalizeMiddleware, error) {
+ removed, err := s.ids.Remove(id)
+ if err != nil {
+ return nil, err
+ }
+
+ return removed.(FinalizeMiddleware), nil
+}
+
+// List returns a list of the middleware in the step.
+func (s *FinalizeStep) List() []string {
+ return s.ids.List()
+}
+
+// Clear removes all middleware in the step.
+func (s *FinalizeStep) Clear() {
+ s.ids.Clear()
+}
+
+type finalizeWrapHandler struct {
+ Next Handler
+}
+
+var _ FinalizeHandler = (*finalizeWrapHandler)(nil)
+
+// Implements FinalizeHandler, converts types and delegates to underlying
+// generic handler.
+func (w finalizeWrapHandler) HandleFinalize(ctx context.Context, in FinalizeInput) (
+ out FinalizeOutput, metadata Metadata, err error,
+) {
+ res, metadata, err := w.Next.Handle(ctx, in.Request)
+ return FinalizeOutput{
+ Result: res,
+ }, metadata, err
+}
+
+type decoratedFinalizeHandler struct {
+ Next FinalizeHandler
+ With FinalizeMiddleware
+}
+
+var _ FinalizeHandler = (*decoratedFinalizeHandler)(nil)
+
+func (h decoratedFinalizeHandler) HandleFinalize(ctx context.Context, in FinalizeInput) (
+ out FinalizeOutput, metadata Metadata, err error,
+) {
+ return h.With.HandleFinalize(ctx, in, h.Next)
+}
+
+// FinalizeHandlerFunc provides a wrapper around a function to be used as a finalize middleware handler.
+type FinalizeHandlerFunc func(context.Context, FinalizeInput) (FinalizeOutput, Metadata, error)
+
+// HandleFinalize invokes the wrapped function with the given arguments.
+func (f FinalizeHandlerFunc) HandleFinalize(ctx context.Context, in FinalizeInput) (FinalizeOutput, Metadata, error) {
+ return f(ctx, in)
+}
+
+var _ FinalizeHandler = FinalizeHandlerFunc(nil)
diff --git a/vendor/github.com/aws/smithy-go/middleware/step_initialize.go b/vendor/github.com/aws/smithy-go/middleware/step_initialize.go
new file mode 100644
index 000000000..3980bce63
--- /dev/null
+++ b/vendor/github.com/aws/smithy-go/middleware/step_initialize.go
@@ -0,0 +1,211 @@
+package middleware
+
+import "context"
+
+// InitializeInput wraps the input parameters for the InitializeMiddlewares to
+// consume. InitializeMiddleware may modify the parameter value before
+// forwarding it along to the next InitializeHandler.
+type InitializeInput struct {
+ Parameters interface{}
+}
+
+// InitializeOutput provides the result returned by the next InitializeHandler.
+type InitializeOutput struct {
+ Result interface{}
+}
+
+// InitializeHandler provides the interface for the next handler the
+// InitializeMiddleware will call in the middleware chain.
+type InitializeHandler interface {
+ HandleInitialize(ctx context.Context, in InitializeInput) (
+ out InitializeOutput, metadata Metadata, err error,
+ )
+}
+
+// InitializeMiddleware provides the interface for middleware specific to the
+// initialize step. Delegates to the next InitializeHandler for further
+// processing.
+type InitializeMiddleware interface {
+ // Unique ID for the middleware in the InitializeStep. The step does not
+ // allow duplicate IDs.
+ ID() string
+
+ // Invokes the middleware behavior which must delegate to the next handler
+ // for the middleware chain to continue. The method must return a result or
+ // error to its caller.
+ HandleInitialize(ctx context.Context, in InitializeInput, next InitializeHandler) (
+ out InitializeOutput, metadata Metadata, err error,
+ )
+}
+
+// InitializeMiddlewareFunc returns a InitializeMiddleware with the unique ID provided,
+// and the func to be invoked.
+func InitializeMiddlewareFunc(id string, fn func(context.Context, InitializeInput, InitializeHandler) (InitializeOutput, Metadata, error)) InitializeMiddleware {
+ return initializeMiddlewareFunc{
+ id: id,
+ fn: fn,
+ }
+}
+
+type initializeMiddlewareFunc struct {
+ // Unique ID for the middleware.
+ id string
+
+ // Middleware function to be called.
+ fn func(context.Context, InitializeInput, InitializeHandler) (
+ InitializeOutput, Metadata, error,
+ )
+}
+
+// ID returns the unique ID for the middleware.
+func (s initializeMiddlewareFunc) ID() string { return s.id }
+
+// HandleInitialize invokes the middleware Fn.
+func (s initializeMiddlewareFunc) HandleInitialize(ctx context.Context, in InitializeInput, next InitializeHandler) (
+ out InitializeOutput, metadata Metadata, err error,
+) {
+ return s.fn(ctx, in, next)
+}
+
+var _ InitializeMiddleware = (initializeMiddlewareFunc{})
+
+// InitializeStep provides the ordered grouping of InitializeMiddleware to be
+// invoked on an handler.
+type InitializeStep struct {
+ ids *orderedIDs
+}
+
+// NewInitializeStep returns an InitializeStep ready to have middleware for
+// initialization added to it.
+func NewInitializeStep() *InitializeStep {
+ return &InitializeStep{
+ ids: newOrderedIDs(),
+ }
+}
+
+var _ Middleware = (*InitializeStep)(nil)
+
+// ID returns the unique id of the step as a middleware.
+func (s *InitializeStep) ID() string {
+ return "Initialize stack step"
+}
+
+// HandleMiddleware invokes the middleware by decorating the next handler
+// provided. Returns the result of the middleware and handler being invoked.
+//
+// Implements Middleware interface.
+func (s *InitializeStep) HandleMiddleware(ctx context.Context, in interface{}, next Handler) (
+ out interface{}, metadata Metadata, err error,
+) {
+ order := s.ids.GetOrder()
+
+ var h InitializeHandler = initializeWrapHandler{Next: next}
+ for i := len(order) - 1; i >= 0; i-- {
+ h = decoratedInitializeHandler{
+ Next: h,
+ With: order[i].(InitializeMiddleware),
+ }
+ }
+
+ sIn := InitializeInput{
+ Parameters: in,
+ }
+
+ res, metadata, err := h.HandleInitialize(ctx, sIn)
+ return res.Result, metadata, err
+}
+
+// Get retrieves the middleware identified by id. If the middleware is not present, returns false.
+func (s *InitializeStep) Get(id string) (InitializeMiddleware, bool) {
+ get, ok := s.ids.Get(id)
+ if !ok {
+ return nil, false
+ }
+ return get.(InitializeMiddleware), ok
+}
+
+// Add injects the middleware to the relative position of the middleware group.
+// Returns an error if the middleware already exists.
+func (s *InitializeStep) Add(m InitializeMiddleware, pos RelativePosition) error {
+ return s.ids.Add(m, pos)
+}
+
+// Insert injects the middleware relative to an existing middleware id.
+// Return error if the original middleware does not exist, or the middleware
+// being added already exists.
+func (s *InitializeStep) Insert(m InitializeMiddleware, relativeTo string, pos RelativePosition) error {
+ return s.ids.Insert(m, relativeTo, pos)
+}
+
+// Swap removes the middleware by id, replacing it with the new middleware.
+// Returns the middleware removed, or error if the middleware to be removed
+// doesn't exist.
+func (s *InitializeStep) Swap(id string, m InitializeMiddleware) (InitializeMiddleware, error) {
+ removed, err := s.ids.Swap(id, m)
+ if err != nil {
+ return nil, err
+ }
+
+ return removed.(InitializeMiddleware), nil
+}
+
+// Remove removes the middleware by id. Returns error if the middleware
+// doesn't exist.
+func (s *InitializeStep) Remove(id string) (InitializeMiddleware, error) {
+ removed, err := s.ids.Remove(id)
+ if err != nil {
+ return nil, err
+ }
+
+ return removed.(InitializeMiddleware), nil
+}
+
+// List returns a list of the middleware in the step.
+func (s *InitializeStep) List() []string {
+ return s.ids.List()
+}
+
+// Clear removes all middleware in the step.
+func (s *InitializeStep) Clear() {
+ s.ids.Clear()
+}
+
+type initializeWrapHandler struct {
+ Next Handler
+}
+
+var _ InitializeHandler = (*initializeWrapHandler)(nil)
+
+// Implements InitializeHandler, converts types and delegates to underlying
+// generic handler.
+func (w initializeWrapHandler) HandleInitialize(ctx context.Context, in InitializeInput) (
+ out InitializeOutput, metadata Metadata, err error,
+) {
+ res, metadata, err := w.Next.Handle(ctx, in.Parameters)
+ return InitializeOutput{
+ Result: res,
+ }, metadata, err
+}
+
+type decoratedInitializeHandler struct {
+ Next InitializeHandler
+ With InitializeMiddleware
+}
+
+var _ InitializeHandler = (*decoratedInitializeHandler)(nil)
+
+func (h decoratedInitializeHandler) HandleInitialize(ctx context.Context, in InitializeInput) (
+ out InitializeOutput, metadata Metadata, err error,
+) {
+ return h.With.HandleInitialize(ctx, in, h.Next)
+}
+
+// InitializeHandlerFunc provides a wrapper around a function to be used as an initialize middleware handler.
+type InitializeHandlerFunc func(context.Context, InitializeInput) (InitializeOutput, Metadata, error)
+
+// HandleInitialize calls the wrapped function with the provided arguments.
+func (i InitializeHandlerFunc) HandleInitialize(ctx context.Context, in InitializeInput) (InitializeOutput, Metadata, error) {
+ return i(ctx, in)
+}
+
+var _ InitializeHandler = InitializeHandlerFunc(nil)
diff --git a/vendor/github.com/aws/smithy-go/middleware/step_serialize.go b/vendor/github.com/aws/smithy-go/middleware/step_serialize.go
new file mode 100644
index 000000000..1e25bbcaa
--- /dev/null
+++ b/vendor/github.com/aws/smithy-go/middleware/step_serialize.go
@@ -0,0 +1,219 @@
+package middleware
+
+import "context"
+
+// SerializeInput provides the input parameters for the SerializeMiddleware to
+// consume. SerializeMiddleware may modify the Request value before forwarding
+// SerializeInput along to the next SerializeHandler. The Parameters member
+// should not be modified by SerializeMiddleware, InitializeMiddleware should
+// be responsible for modifying the provided Parameter value.
+type SerializeInput struct {
+ Parameters interface{}
+ Request interface{}
+}
+
+// SerializeOutput provides the result returned by the next SerializeHandler.
+type SerializeOutput struct {
+ Result interface{}
+}
+
+// SerializeHandler provides the interface for the next handler the
+// SerializeMiddleware will call in the middleware chain.
+type SerializeHandler interface {
+ HandleSerialize(ctx context.Context, in SerializeInput) (
+ out SerializeOutput, metadata Metadata, err error,
+ )
+}
+
+// SerializeMiddleware provides the interface for middleware specific to the
+// serialize step. Delegates to the next SerializeHandler for further
+// processing.
+type SerializeMiddleware interface {
+ // Unique ID for the middleware in the SerializeStep. The step does not
+ // allow duplicate IDs.
+ ID() string
+
+ // Invokes the middleware behavior which must delegate to the next handler
+ // for the middleware chain to continue. The method must return a result or
+ // error to its caller.
+ HandleSerialize(ctx context.Context, in SerializeInput, next SerializeHandler) (
+ out SerializeOutput, metadata Metadata, err error,
+ )
+}
+
+// SerializeMiddlewareFunc returns a SerializeMiddleware with the unique ID
+// provided, and the func to be invoked.
+func SerializeMiddlewareFunc(id string, fn func(context.Context, SerializeInput, SerializeHandler) (SerializeOutput, Metadata, error)) SerializeMiddleware {
+ return serializeMiddlewareFunc{
+ id: id,
+ fn: fn,
+ }
+}
+
+type serializeMiddlewareFunc struct {
+ // Unique ID for the middleware.
+ id string
+
+ // Middleware function to be called.
+ fn func(context.Context, SerializeInput, SerializeHandler) (
+ SerializeOutput, Metadata, error,
+ )
+}
+
+// ID returns the unique ID for the middleware.
+func (s serializeMiddlewareFunc) ID() string { return s.id }
+
+// HandleSerialize invokes the middleware Fn.
+func (s serializeMiddlewareFunc) HandleSerialize(ctx context.Context, in SerializeInput, next SerializeHandler) (
+ out SerializeOutput, metadata Metadata, err error,
+) {
+ return s.fn(ctx, in, next)
+}
+
+var _ SerializeMiddleware = (serializeMiddlewareFunc{})
+
+// SerializeStep provides the ordered grouping of SerializeMiddleware to be
+// invoked on an handler.
+type SerializeStep struct {
+ newRequest func() interface{}
+ ids *orderedIDs
+}
+
+// NewSerializeStep returns an SerializeStep ready to have middleware for
+// initialization added to it. The newRequest func parameter is used to
+// initialize the transport specific request for the stack SerializeStep to
+// serialize the input parameters into.
+func NewSerializeStep(newRequest func() interface{}) *SerializeStep {
+ return &SerializeStep{
+ ids: newOrderedIDs(),
+ newRequest: newRequest,
+ }
+}
+
+var _ Middleware = (*SerializeStep)(nil)
+
+// ID returns the unique id of the step as a middleware.
+func (s *SerializeStep) ID() string {
+ return "Serialize stack step"
+}
+
+// HandleMiddleware invokes the middleware by decorating the next handler
+// provided. Returns the result of the middleware and handler being invoked.
+//
+// Implements Middleware interface.
+func (s *SerializeStep) HandleMiddleware(ctx context.Context, in interface{}, next Handler) (
+ out interface{}, metadata Metadata, err error,
+) {
+ order := s.ids.GetOrder()
+
+ var h SerializeHandler = serializeWrapHandler{Next: next}
+ for i := len(order) - 1; i >= 0; i-- {
+ h = decoratedSerializeHandler{
+ Next: h,
+ With: order[i].(SerializeMiddleware),
+ }
+ }
+
+ sIn := SerializeInput{
+ Parameters: in,
+ Request: s.newRequest(),
+ }
+
+ res, metadata, err := h.HandleSerialize(ctx, sIn)
+ return res.Result, metadata, err
+}
+
+// Get retrieves the middleware identified by id. If the middleware is not present, returns false.
+func (s *SerializeStep) Get(id string) (SerializeMiddleware, bool) {
+ get, ok := s.ids.Get(id)
+ if !ok {
+ return nil, false
+ }
+ return get.(SerializeMiddleware), ok
+}
+
+// Add injects the middleware to the relative position of the middleware group.
+// Returns an error if the middleware already exists.
+func (s *SerializeStep) Add(m SerializeMiddleware, pos RelativePosition) error {
+ return s.ids.Add(m, pos)
+}
+
+// Insert injects the middleware relative to an existing middleware id.
+// Return error if the original middleware does not exist, or the middleware
+// being added already exists.
+func (s *SerializeStep) Insert(m SerializeMiddleware, relativeTo string, pos RelativePosition) error {
+ return s.ids.Insert(m, relativeTo, pos)
+}
+
+// Swap removes the middleware by id, replacing it with the new middleware.
+// Returns the middleware removed, or error if the middleware to be removed
+// doesn't exist.
+func (s *SerializeStep) Swap(id string, m SerializeMiddleware) (SerializeMiddleware, error) {
+ removed, err := s.ids.Swap(id, m)
+ if err != nil {
+ return nil, err
+ }
+
+ return removed.(SerializeMiddleware), nil
+}
+
+// Remove removes the middleware by id. Returns error if the middleware
+// doesn't exist.
+func (s *SerializeStep) Remove(id string) (SerializeMiddleware, error) {
+ removed, err := s.ids.Remove(id)
+ if err != nil {
+ return nil, err
+ }
+
+ return removed.(SerializeMiddleware), nil
+}
+
+// List returns a list of the middleware in the step.
+func (s *SerializeStep) List() []string {
+ return s.ids.List()
+}
+
+// Clear removes all middleware in the step.
+func (s *SerializeStep) Clear() {
+ s.ids.Clear()
+}
+
+type serializeWrapHandler struct {
+ Next Handler
+}
+
+var _ SerializeHandler = (*serializeWrapHandler)(nil)
+
+// Implements SerializeHandler, converts types and delegates to underlying
+// generic handler.
+func (w serializeWrapHandler) HandleSerialize(ctx context.Context, in SerializeInput) (
+ out SerializeOutput, metadata Metadata, err error,
+) {
+ res, metadata, err := w.Next.Handle(ctx, in.Request)
+ return SerializeOutput{
+ Result: res,
+ }, metadata, err
+}
+
+type decoratedSerializeHandler struct {
+ Next SerializeHandler
+ With SerializeMiddleware
+}
+
+var _ SerializeHandler = (*decoratedSerializeHandler)(nil)
+
+func (h decoratedSerializeHandler) HandleSerialize(ctx context.Context, in SerializeInput) (
+ out SerializeOutput, metadata Metadata, err error,
+) {
+ return h.With.HandleSerialize(ctx, in, h.Next)
+}
+
+// SerializeHandlerFunc provides a wrapper around a function to be used as a serialize middleware handler.
+type SerializeHandlerFunc func(context.Context, SerializeInput) (SerializeOutput, Metadata, error)
+
+// HandleSerialize calls the wrapped function with the provided arguments.
+func (s SerializeHandlerFunc) HandleSerialize(ctx context.Context, in SerializeInput) (SerializeOutput, Metadata, error) {
+ return s(ctx, in)
+}
+
+var _ SerializeHandler = SerializeHandlerFunc(nil)
diff --git a/vendor/github.com/aws/smithy-go/ptr/doc.go b/vendor/github.com/aws/smithy-go/ptr/doc.go
new file mode 100644
index 000000000..bc1f69961
--- /dev/null
+++ b/vendor/github.com/aws/smithy-go/ptr/doc.go
@@ -0,0 +1,5 @@
+// Package ptr provides utilities for converting scalar literal type values to and from pointers inline.
+package ptr
+
+//go:generate go run -tags codegen generate.go
+//go:generate gofmt -w -s .
diff --git a/vendor/github.com/aws/smithy-go/ptr/from_ptr.go b/vendor/github.com/aws/smithy-go/ptr/from_ptr.go
new file mode 100644
index 000000000..5d5a9b3b0
--- /dev/null
+++ b/vendor/github.com/aws/smithy-go/ptr/from_ptr.go
@@ -0,0 +1,566 @@
+// Code generated by smithy-go/ptr/generate.go DO NOT EDIT.
+package ptr
+
+import (
+ "time"
+)
+
+// ToBool returns bool value dereferenced if the passed
+// in pointer was not nil. Returns a bool zero value if the
+// pointer was nil.
+func ToBool(p *bool) (v bool) {
+ if p == nil {
+ return v
+ }
+
+ return *p
+}
+
+// ToBoolSlice returns a slice of bool values, that are
+// dereferenced if the passed in pointer was not nil. Returns a bool
+// zero value if the pointer was nil.
+func ToBoolSlice(vs []*bool) []bool {
+ ps := make([]bool, len(vs))
+ for i, v := range vs {
+ ps[i] = ToBool(v)
+ }
+
+ return ps
+}
+
+// ToBoolMap returns a map of bool values, that are
+// dereferenced if the passed in pointer was not nil. The bool
+// zero value is used if the pointer was nil.
+func ToBoolMap(vs map[string]*bool) map[string]bool {
+ ps := make(map[string]bool, len(vs))
+ for k, v := range vs {
+ ps[k] = ToBool(v)
+ }
+
+ return ps
+}
+
+// ToByte returns byte value dereferenced if the passed
+// in pointer was not nil. Returns a byte zero value if the
+// pointer was nil.
+func ToByte(p *byte) (v byte) {
+ if p == nil {
+ return v
+ }
+
+ return *p
+}
+
+// ToByteSlice returns a slice of byte values, that are
+// dereferenced if the passed in pointer was not nil. Returns a byte
+// zero value if the pointer was nil.
+func ToByteSlice(vs []*byte) []byte {
+ ps := make([]byte, len(vs))
+ for i, v := range vs {
+ ps[i] = ToByte(v)
+ }
+
+ return ps
+}
+
+// ToByteMap returns a map of byte values, that are
+// dereferenced if the passed in pointer was not nil. The byte
+// zero value is used if the pointer was nil.
+func ToByteMap(vs map[string]*byte) map[string]byte {
+ ps := make(map[string]byte, len(vs))
+ for k, v := range vs {
+ ps[k] = ToByte(v)
+ }
+
+ return ps
+}
+
+// ToString returns string value dereferenced if the passed
+// in pointer was not nil. Returns a string zero value if the
+// pointer was nil.
+func ToString(p *string) (v string) {
+ if p == nil {
+ return v
+ }
+
+ return *p
+}
+
+// ToStringSlice returns a slice of string values, that are
+// dereferenced if the passed in pointer was not nil. Returns a string
+// zero value if the pointer was nil.
+func ToStringSlice(vs []*string) []string {
+ ps := make([]string, len(vs))
+ for i, v := range vs {
+ ps[i] = ToString(v)
+ }
+
+ return ps
+}
+
+// ToStringMap returns a map of string values, that are
+// dereferenced if the passed in pointer was not nil. The string
+// zero value is used if the pointer was nil.
+func ToStringMap(vs map[string]*string) map[string]string {
+ ps := make(map[string]string, len(vs))
+ for k, v := range vs {
+ ps[k] = ToString(v)
+ }
+
+ return ps
+}
+
+// ToInt returns int value dereferenced if the passed
+// in pointer was not nil. Returns a int zero value if the
+// pointer was nil.
+func ToInt(p *int) (v int) {
+ if p == nil {
+ return v
+ }
+
+ return *p
+}
+
+// ToIntSlice returns a slice of int values, that are
+// dereferenced if the passed in pointer was not nil. Returns a int
+// zero value if the pointer was nil.
+func ToIntSlice(vs []*int) []int {
+ ps := make([]int, len(vs))
+ for i, v := range vs {
+ ps[i] = ToInt(v)
+ }
+
+ return ps
+}
+
+// ToIntMap returns a map of int values, that are
+// dereferenced if the passed in pointer was not nil. The int
+// zero value is used if the pointer was nil.
+func ToIntMap(vs map[string]*int) map[string]int {
+ ps := make(map[string]int, len(vs))
+ for k, v := range vs {
+ ps[k] = ToInt(v)
+ }
+
+ return ps
+}
+
+// ToInt8 returns int8 value dereferenced if the passed
+// in pointer was not nil. Returns a int8 zero value if the
+// pointer was nil.
+func ToInt8(p *int8) (v int8) {
+ if p == nil {
+ return v
+ }
+
+ return *p
+}
+
+// ToInt8Slice returns a slice of int8 values, that are
+// dereferenced if the passed in pointer was not nil. Returns a int8
+// zero value if the pointer was nil.
+func ToInt8Slice(vs []*int8) []int8 {
+ ps := make([]int8, len(vs))
+ for i, v := range vs {
+ ps[i] = ToInt8(v)
+ }
+
+ return ps
+}
+
+// ToInt8Map returns a map of int8 values, that are
+// dereferenced if the passed in pointer was not nil. The int8
+// zero value is used if the pointer was nil.
+func ToInt8Map(vs map[string]*int8) map[string]int8 {
+ ps := make(map[string]int8, len(vs))
+ for k, v := range vs {
+ ps[k] = ToInt8(v)
+ }
+
+ return ps
+}
+
+// ToInt16 returns int16 value dereferenced if the passed
+// in pointer was not nil. Returns a int16 zero value if the
+// pointer was nil.
+func ToInt16(p *int16) (v int16) {
+ if p == nil {
+ return v
+ }
+
+ return *p
+}
+
+// ToInt16Slice returns a slice of int16 values, that are
+// dereferenced if the passed in pointer was not nil. Returns a int16
+// zero value if the pointer was nil.
+func ToInt16Slice(vs []*int16) []int16 {
+ ps := make([]int16, len(vs))
+ for i, v := range vs {
+ ps[i] = ToInt16(v)
+ }
+
+ return ps
+}
+
+// ToInt16Map returns a map of int16 values, that are
+// dereferenced if the passed in pointer was not nil. The int16
+// zero value is used if the pointer was nil.
+func ToInt16Map(vs map[string]*int16) map[string]int16 {
+ ps := make(map[string]int16, len(vs))
+ for k, v := range vs {
+ ps[k] = ToInt16(v)
+ }
+
+ return ps
+}
+
+// ToInt32 returns int32 value dereferenced if the passed
+// in pointer was not nil. Returns a int32 zero value if the
+// pointer was nil.
+func ToInt32(p *int32) (v int32) {
+ if p == nil {
+ return v
+ }
+
+ return *p
+}
+
+// ToInt32Slice returns a slice of int32 values, that are
+// dereferenced if the passed in pointer was not nil. Returns a int32
+// zero value if the pointer was nil.
+func ToInt32Slice(vs []*int32) []int32 {
+ ps := make([]int32, len(vs))
+ for i, v := range vs {
+ ps[i] = ToInt32(v)
+ }
+
+ return ps
+}
+
+// ToInt32Map returns a map of int32 values, that are
+// dereferenced if the passed in pointer was not nil. The int32
+// zero value is used if the pointer was nil.
+func ToInt32Map(vs map[string]*int32) map[string]int32 {
+ ps := make(map[string]int32, len(vs))
+ for k, v := range vs {
+ ps[k] = ToInt32(v)
+ }
+
+ return ps
+}
+
+// ToInt64 returns int64 value dereferenced if the passed
+// in pointer was not nil. Returns a int64 zero value if the
+// pointer was nil.
+func ToInt64(p *int64) (v int64) {
+ if p == nil {
+ return v
+ }
+
+ return *p
+}
+
+// ToInt64Slice returns a slice of int64 values, that are
+// dereferenced if the passed in pointer was not nil. Returns a int64
+// zero value if the pointer was nil.
+func ToInt64Slice(vs []*int64) []int64 {
+ ps := make([]int64, len(vs))
+ for i, v := range vs {
+ ps[i] = ToInt64(v)
+ }
+
+ return ps
+}
+
+// ToInt64Map returns a map of int64 values, that are
+// dereferenced if the passed in pointer was not nil. The int64
+// zero value is used if the pointer was nil.
+func ToInt64Map(vs map[string]*int64) map[string]int64 {
+ ps := make(map[string]int64, len(vs))
+ for k, v := range vs {
+ ps[k] = ToInt64(v)
+ }
+
+ return ps
+}
+
+// ToUint returns uint value dereferenced if the passed
+// in pointer was not nil. Returns a uint zero value if the
+// pointer was nil.
+func ToUint(p *uint) (v uint) {
+ if p == nil {
+ return v
+ }
+
+ return *p
+}
+
+// ToUintSlice returns a slice of uint values, that are
+// dereferenced if the passed in pointer was not nil. Returns a uint
+// zero value if the pointer was nil.
+func ToUintSlice(vs []*uint) []uint {
+ ps := make([]uint, len(vs))
+ for i, v := range vs {
+ ps[i] = ToUint(v)
+ }
+
+ return ps
+}
+
+// ToUintMap returns a map of uint values, that are
+// dereferenced if the passed in pointer was not nil. The uint
+// zero value is used if the pointer was nil.
+func ToUintMap(vs map[string]*uint) map[string]uint {
+ ps := make(map[string]uint, len(vs))
+ for k, v := range vs {
+ ps[k] = ToUint(v)
+ }
+
+ return ps
+}
+
+// ToUint8 returns uint8 value dereferenced if the passed
+// in pointer was not nil. Returns a uint8 zero value if the
+// pointer was nil.
+func ToUint8(p *uint8) (v uint8) {
+ if p == nil {
+ return v
+ }
+
+ return *p
+}
+
+// ToUint8Slice returns a slice of uint8 values, that are
+// dereferenced if the passed in pointer was not nil. Returns a uint8
+// zero value if the pointer was nil.
+func ToUint8Slice(vs []*uint8) []uint8 {
+ ps := make([]uint8, len(vs))
+ for i, v := range vs {
+ ps[i] = ToUint8(v)
+ }
+
+ return ps
+}
+
+// ToUint8Map returns a map of uint8 values, that are
+// dereferenced if the passed in pointer was not nil. The uint8
+// zero value is used if the pointer was nil.
+func ToUint8Map(vs map[string]*uint8) map[string]uint8 {
+ ps := make(map[string]uint8, len(vs))
+ for k, v := range vs {
+ ps[k] = ToUint8(v)
+ }
+
+ return ps
+}
+
+// ToUint16 returns uint16 value dereferenced if the passed
+// in pointer was not nil. Returns a uint16 zero value if the
+// pointer was nil.
+func ToUint16(p *uint16) (v uint16) {
+ if p == nil {
+ return v
+ }
+
+ return *p
+}
+
+// ToUint16Slice returns a slice of uint16 values, that are
+// dereferenced if the passed in pointer was not nil. Returns a uint16
+// zero value if the pointer was nil.
+func ToUint16Slice(vs []*uint16) []uint16 {
+ ps := make([]uint16, len(vs))
+ for i, v := range vs {
+ ps[i] = ToUint16(v)
+ }
+
+ return ps
+}
+
+// ToUint16Map returns a map of uint16 values, that are
+// dereferenced if the passed in pointer was not nil. The uint16
+// zero value is used if the pointer was nil.
+func ToUint16Map(vs map[string]*uint16) map[string]uint16 {
+ ps := make(map[string]uint16, len(vs))
+ for k, v := range vs {
+ ps[k] = ToUint16(v)
+ }
+
+ return ps
+}
+
+// ToUint32 returns uint32 value dereferenced if the passed
+// in pointer was not nil. Returns a uint32 zero value if the
+// pointer was nil.
+func ToUint32(p *uint32) (v uint32) {
+ if p == nil {
+ return v
+ }
+
+ return *p
+}
+
+// ToUint32Slice returns a slice of uint32 values, that are
+// dereferenced if the passed in pointer was not nil. Returns a uint32
+// zero value if the pointer was nil.
+func ToUint32Slice(vs []*uint32) []uint32 {
+ ps := make([]uint32, len(vs))
+ for i, v := range vs {
+ ps[i] = ToUint32(v)
+ }
+
+ return ps
+}
+
+// ToUint32Map returns a map of uint32 values, that are
+// dereferenced if the passed in pointer was not nil. The uint32
+// zero value is used if the pointer was nil.
+func ToUint32Map(vs map[string]*uint32) map[string]uint32 {
+ ps := make(map[string]uint32, len(vs))
+ for k, v := range vs {
+ ps[k] = ToUint32(v)
+ }
+
+ return ps
+}
+
+// ToUint64 returns uint64 value dereferenced if the passed
+// in pointer was not nil. Returns a uint64 zero value if the
+// pointer was nil.
+func ToUint64(p *uint64) (v uint64) {
+ if p == nil {
+ return v
+ }
+
+ return *p
+}
+
+// ToUint64Slice returns a slice of uint64 values, that are
+// dereferenced if the passed in pointer was not nil. Returns a uint64
+// zero value if the pointer was nil.
+func ToUint64Slice(vs []*uint64) []uint64 {
+ ps := make([]uint64, len(vs))
+ for i, v := range vs {
+ ps[i] = ToUint64(v)
+ }
+
+ return ps
+}
+
+// ToUint64Map returns a map of uint64 values, that are
+// dereferenced if the passed in pointer was not nil. The uint64
+// zero value is used if the pointer was nil.
+func ToUint64Map(vs map[string]*uint64) map[string]uint64 {
+ ps := make(map[string]uint64, len(vs))
+ for k, v := range vs {
+ ps[k] = ToUint64(v)
+ }
+
+ return ps
+}
+
+// ToFloat32 returns float32 value dereferenced if the passed
+// in pointer was not nil. Returns a float32 zero value if the
+// pointer was nil.
+func ToFloat32(p *float32) (v float32) {
+ if p == nil {
+ return v
+ }
+
+ return *p
+}
+
+// ToFloat32Slice returns a slice of float32 values, that are
+// dereferenced if the passed in pointer was not nil. Returns a float32
+// zero value if the pointer was nil.
+func ToFloat32Slice(vs []*float32) []float32 {
+ ps := make([]float32, len(vs))
+ for i, v := range vs {
+ ps[i] = ToFloat32(v)
+ }
+
+ return ps
+}
+
+// ToFloat32Map returns a map of float32 values, that are
+// dereferenced if the passed in pointer was not nil. The float32
+// zero value is used if the pointer was nil.
+func ToFloat32Map(vs map[string]*float32) map[string]float32 {
+ ps := make(map[string]float32, len(vs))
+ for k, v := range vs {
+ ps[k] = ToFloat32(v)
+ }
+
+ return ps
+}
+
+// ToFloat64 returns float64 value dereferenced if the passed
+// in pointer was not nil. Returns a float64 zero value if the
+// pointer was nil.
+func ToFloat64(p *float64) (v float64) {
+ if p == nil {
+ return v
+ }
+
+ return *p
+}
+
+// ToFloat64Slice returns a slice of float64 values, that are
+// dereferenced if the passed in pointer was not nil. Returns a float64
+// zero value if the pointer was nil.
+func ToFloat64Slice(vs []*float64) []float64 {
+ ps := make([]float64, len(vs))
+ for i, v := range vs {
+ ps[i] = ToFloat64(v)
+ }
+
+ return ps
+}
+
+// ToFloat64Map returns a map of float64 values, that are
+// dereferenced if the passed in pointer was not nil. The float64
+// zero value is used if the pointer was nil.
+func ToFloat64Map(vs map[string]*float64) map[string]float64 {
+ ps := make(map[string]float64, len(vs))
+ for k, v := range vs {
+ ps[k] = ToFloat64(v)
+ }
+
+ return ps
+}
+
+// ToTime returns time.Time value dereferenced if the passed
+// in pointer was not nil. Returns a time.Time zero value if the
+// pointer was nil.
+func ToTime(p *time.Time) (v time.Time) {
+ if p == nil {
+ return v
+ }
+
+ return *p
+}
+
+// ToTimeSlice returns a slice of time.Time values, that are
+// dereferenced if the passed in pointer was not nil. Returns a time.Time
+// zero value if the pointer was nil.
+func ToTimeSlice(vs []*time.Time) []time.Time {
+ ps := make([]time.Time, len(vs))
+ for i, v := range vs {
+ ps[i] = ToTime(v)
+ }
+
+ return ps
+}
+
+// ToTimeMap returns a map of time.Time values, that are
+// dereferenced if the passed in pointer was not nil. The time.Time
+// zero value is used if the pointer was nil.
+func ToTimeMap(vs map[string]*time.Time) map[string]time.Time {
+ ps := make(map[string]time.Time, len(vs))
+ for k, v := range vs {
+ ps[k] = ToTime(v)
+ }
+
+ return ps
+}
diff --git a/vendor/github.com/aws/smithy-go/ptr/gen_scalars.go b/vendor/github.com/aws/smithy-go/ptr/gen_scalars.go
new file mode 100644
index 000000000..56c85fb3a
--- /dev/null
+++ b/vendor/github.com/aws/smithy-go/ptr/gen_scalars.go
@@ -0,0 +1,81 @@
+// +build codegen
+
+package ptr
+
+import "strings"
+
+func GetScalars() Scalars {
+ return Scalars{
+ {Type: "bool"},
+ {Type: "byte"},
+ {Type: "string"},
+ {Type: "int"},
+ {Type: "int8"},
+ {Type: "int16"},
+ {Type: "int32"},
+ {Type: "int64"},
+ {Type: "uint"},
+ {Type: "uint8"},
+ {Type: "uint16"},
+ {Type: "uint32"},
+ {Type: "uint64"},
+ {Type: "float32"},
+ {Type: "float64"},
+ {Type: "Time", Import: &Import{Path: "time"}},
+ }
+}
+
+// Import provides the import path and optional alias
+type Import struct {
+ Path string
+ Alias string
+}
+
+// Package returns the Go package name for the import. Returns alias if set.
+func (i Import) Package() string {
+ if v := i.Alias; len(v) != 0 {
+ return v
+ }
+
+ if v := i.Path; len(v) != 0 {
+ parts := strings.Split(v, "/")
+ pkg := parts[len(parts)-1]
+ return pkg
+ }
+
+ return ""
+}
+
+// Scalar provides the definition of a type to generate pointer utilities for.
+type Scalar struct {
+ Type string
+ Import *Import
+}
+
+// Name returns the exported function name for the type.
+func (t Scalar) Name() string {
+ return strings.Title(t.Type)
+}
+
+// Symbol returns the scalar's Go symbol with path if needed.
+func (t Scalar) Symbol() string {
+ if t.Import != nil {
+ return t.Import.Package() + "." + t.Type
+ }
+ return t.Type
+}
+
+// Scalars is a list of scalars.
+type Scalars []Scalar
+
+// Imports returns all imports for the scalars.
+func (ts Scalars) Imports() []*Import {
+ imports := []*Import{}
+ for _, t := range ts {
+ if v := t.Import; v != nil {
+ imports = append(imports, v)
+ }
+ }
+
+ return imports
+}
diff --git a/vendor/github.com/aws/smithy-go/ptr/to_ptr.go b/vendor/github.com/aws/smithy-go/ptr/to_ptr.go
new file mode 100644
index 000000000..a57d13015
--- /dev/null
+++ b/vendor/github.com/aws/smithy-go/ptr/to_ptr.go
@@ -0,0 +1,470 @@
+// Code generated by smithy-go/ptr/generate.go DO NOT EDIT.
+package ptr
+
+import (
+ "time"
+)
+
+// Bool returns a pointer value for the bool value passed in.
+func Bool(v bool) *bool {
+ return &v
+}
+
+// BoolSlice returns a slice of bool pointers from the values
+// passed in.
+func BoolSlice(vs []bool) []*bool {
+ ps := make([]*bool, len(vs))
+ for i, v := range vs {
+ vv := v
+ ps[i] = &vv
+ }
+
+ return ps
+}
+
+// BoolMap returns a map of bool pointers from the values
+// passed in.
+func BoolMap(vs map[string]bool) map[string]*bool {
+ ps := make(map[string]*bool, len(vs))
+ for k, v := range vs {
+ vv := v
+ ps[k] = &vv
+ }
+
+ return ps
+}
+
+// Byte returns a pointer value for the byte value passed in.
+func Byte(v byte) *byte {
+ return &v
+}
+
+// ByteSlice returns a slice of byte pointers from the values
+// passed in.
+func ByteSlice(vs []byte) []*byte {
+ ps := make([]*byte, len(vs))
+ for i, v := range vs {
+ vv := v
+ ps[i] = &vv
+ }
+
+ return ps
+}
+
+// ByteMap returns a map of byte pointers from the values
+// passed in.
+func ByteMap(vs map[string]byte) map[string]*byte {
+ ps := make(map[string]*byte, len(vs))
+ for k, v := range vs {
+ vv := v
+ ps[k] = &vv
+ }
+
+ return ps
+}
+
+// String returns a pointer value for the string value passed in.
+func String(v string) *string {
+ return &v
+}
+
+// StringSlice returns a slice of string pointers from the values
+// passed in.
+func StringSlice(vs []string) []*string {
+ ps := make([]*string, len(vs))
+ for i, v := range vs {
+ vv := v
+ ps[i] = &vv
+ }
+
+ return ps
+}
+
+// StringMap returns a map of string pointers from the values
+// passed in.
+func StringMap(vs map[string]string) map[string]*string {
+ ps := make(map[string]*string, len(vs))
+ for k, v := range vs {
+ vv := v
+ ps[k] = &vv
+ }
+
+ return ps
+}
+
+// Int returns a pointer value for the int value passed in.
+func Int(v int) *int {
+ return &v
+}
+
+// IntSlice returns a slice of int pointers from the values
+// passed in.
+func IntSlice(vs []int) []*int {
+ ps := make([]*int, len(vs))
+ for i, v := range vs {
+ vv := v
+ ps[i] = &vv
+ }
+
+ return ps
+}
+
+// IntMap returns a map of int pointers from the values
+// passed in.
+func IntMap(vs map[string]int) map[string]*int {
+ ps := make(map[string]*int, len(vs))
+ for k, v := range vs {
+ vv := v
+ ps[k] = &vv
+ }
+
+ return ps
+}
+
+// Int8 returns a pointer value for the int8 value passed in.
+func Int8(v int8) *int8 {
+ return &v
+}
+
+// Int8Slice returns a slice of int8 pointers from the values
+// passed in.
+func Int8Slice(vs []int8) []*int8 {
+ ps := make([]*int8, len(vs))
+ for i, v := range vs {
+ vv := v
+ ps[i] = &vv
+ }
+
+ return ps
+}
+
+// Int8Map returns a map of int8 pointers from the values
+// passed in.
+func Int8Map(vs map[string]int8) map[string]*int8 {
+ ps := make(map[string]*int8, len(vs))
+ for k, v := range vs {
+ vv := v
+ ps[k] = &vv
+ }
+
+ return ps
+}
+
+// Int16 returns a pointer value for the int16 value passed in.
+func Int16(v int16) *int16 {
+ return &v
+}
+
+// Int16Slice returns a slice of int16 pointers from the values
+// passed in.
+func Int16Slice(vs []int16) []*int16 {
+ ps := make([]*int16, len(vs))
+ for i, v := range vs {
+ vv := v
+ ps[i] = &vv
+ }
+
+ return ps
+}
+
+// Int16Map returns a map of int16 pointers from the values
+// passed in.
+func Int16Map(vs map[string]int16) map[string]*int16 {
+ ps := make(map[string]*int16, len(vs))
+ for k, v := range vs {
+ vv := v
+ ps[k] = &vv
+ }
+
+ return ps
+}
+
+// Int32 returns a pointer value for the int32 value passed in.
+func Int32(v int32) *int32 {
+ return &v
+}
+
+// Int32Slice returns a slice of int32 pointers from the values
+// passed in.
+func Int32Slice(vs []int32) []*int32 {
+ ps := make([]*int32, len(vs))
+ for i, v := range vs {
+ vv := v
+ ps[i] = &vv
+ }
+
+ return ps
+}
+
+// Int32Map returns a map of int32 pointers from the values
+// passed in.
+func Int32Map(vs map[string]int32) map[string]*int32 {
+ ps := make(map[string]*int32, len(vs))
+ for k, v := range vs {
+ vv := v
+ ps[k] = &vv
+ }
+
+ return ps
+}
+
+// Int64 returns a pointer value for the int64 value passed in.
+func Int64(v int64) *int64 {
+ return &v
+}
+
+// Int64Slice returns a slice of int64 pointers from the values
+// passed in.
+func Int64Slice(vs []int64) []*int64 {
+ ps := make([]*int64, len(vs))
+ for i, v := range vs {
+ vv := v
+ ps[i] = &vv
+ }
+
+ return ps
+}
+
+// Int64Map returns a map of int64 pointers from the values
+// passed in.
+func Int64Map(vs map[string]int64) map[string]*int64 {
+ ps := make(map[string]*int64, len(vs))
+ for k, v := range vs {
+ vv := v
+ ps[k] = &vv
+ }
+
+ return ps
+}
+
+// Uint returns a pointer value for the uint value passed in.
+func Uint(v uint) *uint {
+ return &v
+}
+
+// UintSlice returns a slice of uint pointers from the values
+// passed in.
+func UintSlice(vs []uint) []*uint {
+ ps := make([]*uint, len(vs))
+ for i, v := range vs {
+ vv := v
+ ps[i] = &vv
+ }
+
+ return ps
+}
+
+// UintMap returns a map of uint pointers from the values
+// passed in.
+func UintMap(vs map[string]uint) map[string]*uint {
+ ps := make(map[string]*uint, len(vs))
+ for k, v := range vs {
+ vv := v
+ ps[k] = &vv
+ }
+
+ return ps
+}
+
+// Uint8 returns a pointer value for the uint8 value passed in.
+func Uint8(v uint8) *uint8 {
+ return &v
+}
+
+// Uint8Slice returns a slice of uint8 pointers from the values
+// passed in.
+func Uint8Slice(vs []uint8) []*uint8 {
+ ps := make([]*uint8, len(vs))
+ for i, v := range vs {
+ vv := v
+ ps[i] = &vv
+ }
+
+ return ps
+}
+
+// Uint8Map returns a map of uint8 pointers from the values
+// passed in.
+func Uint8Map(vs map[string]uint8) map[string]*uint8 {
+ ps := make(map[string]*uint8, len(vs))
+ for k, v := range vs {
+ vv := v
+ ps[k] = &vv
+ }
+
+ return ps
+}
+
+// Uint16 returns a pointer value for the uint16 value passed in.
+func Uint16(v uint16) *uint16 {
+ return &v
+}
+
+// Uint16Slice returns a slice of uint16 pointers from the values
+// passed in.
+func Uint16Slice(vs []uint16) []*uint16 {
+ ps := make([]*uint16, len(vs))
+ for i, v := range vs {
+ vv := v
+ ps[i] = &vv
+ }
+
+ return ps
+}
+
+// Uint16Map returns a map of uint16 pointers from the values
+// passed in.
+func Uint16Map(vs map[string]uint16) map[string]*uint16 {
+ ps := make(map[string]*uint16, len(vs))
+ for k, v := range vs {
+ vv := v
+ ps[k] = &vv
+ }
+
+ return ps
+}
+
+// Uint32 returns a pointer value for the uint32 value passed in.
+func Uint32(v uint32) *uint32 {
+ return &v
+}
+
+// Uint32Slice returns a slice of uint32 pointers from the values
+// passed in.
+func Uint32Slice(vs []uint32) []*uint32 {
+ ps := make([]*uint32, len(vs))
+ for i, v := range vs {
+ vv := v
+ ps[i] = &vv
+ }
+
+ return ps
+}
+
+// Uint32Map returns a map of uint32 pointers from the values
+// passed in.
+func Uint32Map(vs map[string]uint32) map[string]*uint32 {
+ ps := make(map[string]*uint32, len(vs))
+ for k, v := range vs {
+ vv := v
+ ps[k] = &vv
+ }
+
+ return ps
+}
+
+// Uint64 returns a pointer value for the uint64 value passed in.
+func Uint64(v uint64) *uint64 {
+ return &v
+}
+
+// Uint64Slice returns a slice of uint64 pointers from the values
+// passed in.
+func Uint64Slice(vs []uint64) []*uint64 {
+ ps := make([]*uint64, len(vs))
+ for i, v := range vs {
+ vv := v
+ ps[i] = &vv
+ }
+
+ return ps
+}
+
+// Uint64Map returns a map of uint64 pointers from the values
+// passed in.
+func Uint64Map(vs map[string]uint64) map[string]*uint64 {
+ ps := make(map[string]*uint64, len(vs))
+ for k, v := range vs {
+ vv := v
+ ps[k] = &vv
+ }
+
+ return ps
+}
+
+// Float32 returns a pointer value for the float32 value passed in.
+func Float32(v float32) *float32 {
+ return &v
+}
+
+// Float32Slice returns a slice of float32 pointers from the values
+// passed in.
+func Float32Slice(vs []float32) []*float32 {
+ ps := make([]*float32, len(vs))
+ for i, v := range vs {
+ vv := v
+ ps[i] = &vv
+ }
+
+ return ps
+}
+
+// Float32Map returns a map of float32 pointers from the values
+// passed in.
+func Float32Map(vs map[string]float32) map[string]*float32 {
+ ps := make(map[string]*float32, len(vs))
+ for k, v := range vs {
+ vv := v
+ ps[k] = &vv
+ }
+
+ return ps
+}
+
+// Float64 returns a pointer value for the float64 value passed in.
+func Float64(v float64) *float64 {
+ return &v
+}
+
+// Float64Slice returns a slice of float64 pointers from the values
+// passed in.
+func Float64Slice(vs []float64) []*float64 {
+ ps := make([]*float64, len(vs))
+ for i, v := range vs {
+ vv := v
+ ps[i] = &vv
+ }
+
+ return ps
+}
+
+// Float64Map returns a map of float64 pointers from the values
+// passed in.
+func Float64Map(vs map[string]float64) map[string]*float64 {
+ ps := make(map[string]*float64, len(vs))
+ for k, v := range vs {
+ vv := v
+ ps[k] = &vv
+ }
+
+ return ps
+}
+
+// Time returns a pointer value for the time.Time value passed in.
+func Time(v time.Time) *time.Time {
+ return &v
+}
+
+// TimeSlice returns a slice of time.Time pointers from the values
+// passed in.
+func TimeSlice(vs []time.Time) []*time.Time {
+ ps := make([]*time.Time, len(vs))
+ for i, v := range vs {
+ vv := v
+ ps[i] = &vv
+ }
+
+ return ps
+}
+
+// TimeMap returns a map of time.Time pointers from the values
+// passed in.
+func TimeMap(vs map[string]time.Time) map[string]*time.Time {
+ ps := make(map[string]*time.Time, len(vs))
+ for k, v := range vs {
+ vv := v
+ ps[k] = &vv
+ }
+
+ return ps
+}
diff --git a/vendor/github.com/aws/smithy-go/rand/doc.go b/vendor/github.com/aws/smithy-go/rand/doc.go
new file mode 100644
index 000000000..f8b25d562
--- /dev/null
+++ b/vendor/github.com/aws/smithy-go/rand/doc.go
@@ -0,0 +1,3 @@
+// Package rand provides utilities for creating and working with random value
+// generators.
+package rand
diff --git a/vendor/github.com/aws/smithy-go/rand/rand.go b/vendor/github.com/aws/smithy-go/rand/rand.go
new file mode 100644
index 000000000..4dc176d7d
--- /dev/null
+++ b/vendor/github.com/aws/smithy-go/rand/rand.go
@@ -0,0 +1,31 @@
+package rand
+
+import (
+ "crypto/rand"
+ "fmt"
+ "io"
+ "math/big"
+)
+
+func init() {
+ Reader = rand.Reader
+}
+
+// Reader provides a random reader that can reset during testing.
+var Reader io.Reader
+
+// Int63n returns a int64 between zero and value of max, read from an io.Reader source.
+func Int63n(reader io.Reader, max int64) (int64, error) {
+ bi, err := rand.Int(reader, big.NewInt(max))
+ if err != nil {
+ return 0, fmt.Errorf("failed to read random value, %w", err)
+ }
+
+ return bi.Int64(), nil
+}
+
+// CryptoRandInt63n returns a random int64 between zero and value of max
+//obtained from the crypto rand source.
+func CryptoRandInt63n(max int64) (int64, error) {
+ return Int63n(Reader, max)
+}
diff --git a/vendor/github.com/aws/smithy-go/rand/uuid.go b/vendor/github.com/aws/smithy-go/rand/uuid.go
new file mode 100644
index 000000000..962b94665
--- /dev/null
+++ b/vendor/github.com/aws/smithy-go/rand/uuid.go
@@ -0,0 +1,72 @@
+package rand
+
+import (
+ "encoding/hex"
+ "io"
+)
+
+const dash byte = '-'
+
+// UUIDIdempotencyToken provides a utility to get idempotency tokens in the
+// UUID format.
+type UUIDIdempotencyToken struct {
+ uuid *UUID
+}
+
+// NewUUIDIdempotencyToken returns a idempotency token provider returning
+// tokens in the UUID random format using the reader provided.
+func NewUUIDIdempotencyToken(r io.Reader) *UUIDIdempotencyToken {
+ return &UUIDIdempotencyToken{uuid: NewUUID(r)}
+}
+
+// GetIdempotencyToken returns a random UUID value for Idempotency token.
+func (u UUIDIdempotencyToken) GetIdempotencyToken() (string, error) {
+ return u.uuid.GetUUID()
+}
+
+// UUID provides computing random UUID version 4 values from a random source
+// reader.
+type UUID struct {
+ randSrc io.Reader
+}
+
+// NewUUID returns an initialized UUID value that can be used to retrieve
+// random UUID values.
+func NewUUID(r io.Reader) *UUID {
+ return &UUID{randSrc: r}
+}
+
+// GetUUID returns a UUID random string sourced from the random reader the
+// UUID was created with. Returns an error if unable to compute the UUID.
+func (r *UUID) GetUUID() (string, error) {
+ var b [16]byte
+ if _, err := io.ReadFull(r.randSrc, b[:]); err != nil {
+ return "", err
+ }
+
+ return uuidVersion4(b), nil
+}
+
+// uuidVersion4 returns a random UUID version 4 from the byte slice provided.
+func uuidVersion4(u [16]byte) string {
+ // https://en.wikipedia.org/wiki/Universally_unique_identifier#Version_4_.28random.29
+
+ // 13th character is "4"
+ u[6] = (u[6] & 0x0f) | 0x40 // Version 4
+ // 17th character is "8", "9", "a", or "b"
+ u[8] = (u[8] & 0x3f) | 0x80 // Variant is 10
+
+ var scratch [36]byte
+
+ hex.Encode(scratch[:8], u[0:4])
+ scratch[8] = dash
+ hex.Encode(scratch[9:13], u[4:6])
+ scratch[13] = dash
+ hex.Encode(scratch[14:18], u[6:8])
+ scratch[18] = dash
+ hex.Encode(scratch[19:23], u[8:10])
+ scratch[23] = dash
+ hex.Encode(scratch[24:], u[10:])
+
+ return string(scratch[:])
+}
diff --git a/vendor/github.com/aws/smithy-go/time/time.go b/vendor/github.com/aws/smithy-go/time/time.go
new file mode 100644
index 000000000..54f9f0fef
--- /dev/null
+++ b/vendor/github.com/aws/smithy-go/time/time.go
@@ -0,0 +1,80 @@
+package time
+
+import (
+ "context"
+ "math/big"
+ "time"
+)
+
+const (
+ // dateTimeFormat is a IMF-fixdate formatted time https://tools.ietf.org/html/rfc7231.html#section-7.1.1.1
+ dateTimeFormat = "2006-01-02T15:04:05.99Z"
+
+ // httpDateFormat is a date time defined by RFC3339 section 5.6 with no UTC offset.
+ httpDateFormat = "Mon, 02 Jan 2006 15:04:05 GMT"
+)
+
+var millisecondFloat = big.NewFloat(1e3)
+
+// FormatDateTime format value as a date-time (RFC3339 section 5.6)
+//
+// Example: 1985-04-12T23:20:50.52Z
+func FormatDateTime(value time.Time) string {
+ return value.Format(dateTimeFormat)
+}
+
+// ParseDateTime parse a string as a date-time
+//
+// Example: 1985-04-12T23:20:50.52Z
+func ParseDateTime(value string) (time.Time, error) {
+ return time.Parse(dateTimeFormat, value)
+}
+
+// FormatHTTPDate format value as a http-date (RFC 7231#section-7.1.1.1 IMF-fixdate)
+//
+// Example: Tue, 29 Apr 2014 18:30:38 GMT
+func FormatHTTPDate(value time.Time) string {
+ return value.Format(httpDateFormat)
+}
+
+// ParseHTTPDate parse a string as a http-date
+//
+// Example: Tue, 29 Apr 2014 18:30:38 GMT
+func ParseHTTPDate(value string) (time.Time, error) {
+ return time.Parse(httpDateFormat, value)
+}
+
+// FormatEpochSeconds returns value as a Unix time in seconds with with decimal precision
+//
+// Example: 1515531081.123
+func FormatEpochSeconds(value time.Time) float64 {
+ ms := value.UnixNano() / int64(time.Millisecond)
+ return float64(ms) / 1e3
+}
+
+// ParseEpochSeconds returns value as a Unix time in seconds with with decimal precision
+//
+// Example: 1515531081.123
+func ParseEpochSeconds(value float64) time.Time {
+ f := big.NewFloat(value)
+ f = f.Mul(f, millisecondFloat)
+ i, _ := f.Int64()
+ return time.Unix(0, i*1e6).UTC()
+}
+
+// SleepWithContext will wait for the timer duration to expire, or the context
+// is canceled. Which ever happens first. If the context is canceled the
+// Context's error will be returned.
+func SleepWithContext(ctx context.Context, dur time.Duration) error {
+ t := time.NewTimer(dur)
+ defer t.Stop()
+
+ select {
+ case <-t.C:
+ break
+ case <-ctx.Done():
+ return ctx.Err()
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/aws/smithy-go/transport/http/checksum_middleware.go b/vendor/github.com/aws/smithy-go/transport/http/checksum_middleware.go
new file mode 100644
index 000000000..7b1cd5675
--- /dev/null
+++ b/vendor/github.com/aws/smithy-go/transport/http/checksum_middleware.go
@@ -0,0 +1,65 @@
+package http
+
+import (
+ "context"
+ "fmt"
+
+ "github.com/aws/smithy-go/middleware"
+)
+
+const contentMD5Header = "Content-Md5"
+
+// contentMD5Checksum provides a middleware to compute and set
+// content-md5 checksum for a http request
+type contentMD5Checksum struct {
+}
+
+// AddContentChecksumMiddleware adds checksum middleware to middleware's
+// build step.
+func AddContentChecksumMiddleware(stack *middleware.Stack) error {
+ // This middleware must be executed before request body is set.
+ return stack.Build.Add(&contentMD5Checksum{}, middleware.Before)
+}
+
+// ID the identifier for the checksum middleware
+func (m *contentMD5Checksum) ID() string { return "ContentChecksum" }
+
+// HandleBuild adds behavior to compute md5 checksum and add content-md5 header
+// on http request
+func (m *contentMD5Checksum) HandleBuild(
+ ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler,
+) (
+ out middleware.BuildOutput, metadata middleware.Metadata, err error,
+) {
+ req, ok := in.Request.(*Request)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown request type %T", req)
+ }
+
+ // if Content-MD5 header is already present, return
+ if v := req.Header.Get(contentMD5Header); len(v) != 0 {
+ return next.HandleBuild(ctx, in)
+ }
+
+ // fetch the request stream.
+ stream := req.GetStream()
+ // compute checksum if payload is explicit
+ if stream != nil {
+ v, err := computeMD5Checksum(stream)
+ if err != nil {
+ return out, metadata, fmt.Errorf("error computing md5 checksum, %w", err)
+ }
+
+ // reset the request stream
+ if err := req.RewindStream(); err != nil {
+ return out, metadata, fmt.Errorf(
+ "error rewinding request stream after computing md5 checksum, %w", err)
+ }
+
+ // set the 'Content-MD5' header
+ req.Header.Set(contentMD5Header, string(v))
+ }
+
+ // set md5 header value
+ return next.HandleBuild(ctx, in)
+}
diff --git a/vendor/github.com/aws/smithy-go/transport/http/client.go b/vendor/github.com/aws/smithy-go/transport/http/client.go
new file mode 100644
index 000000000..55b5a95b7
--- /dev/null
+++ b/vendor/github.com/aws/smithy-go/transport/http/client.go
@@ -0,0 +1,120 @@
+package http
+
+import (
+ "context"
+ "fmt"
+ "net/http"
+
+ smithy "github.com/aws/smithy-go"
+ "github.com/aws/smithy-go/middleware"
+)
+
+// ClientDo provides the interface for custom HTTP client implementations.
+type ClientDo interface {
+ Do(*http.Request) (*http.Response, error)
+}
+
+// ClientDoFunc provides a helper to wrap an function as an HTTP client for
+// round tripping requests.
+type ClientDoFunc func(*http.Request) (*http.Response, error)
+
+// Do will invoke the underlying func, returning the result.
+func (fn ClientDoFunc) Do(r *http.Request) (*http.Response, error) {
+ return fn(r)
+}
+
+// ClientHandler wraps a client that implements the HTTP Do method. Standard
+// implementation is http.Client.
+type ClientHandler struct {
+ client ClientDo
+}
+
+// NewClientHandler returns an initialized middleware handler for the client.
+func NewClientHandler(client ClientDo) ClientHandler {
+ return ClientHandler{
+ client: client,
+ }
+}
+
+// Handle implements the middleware Handler interface, that will invoke the
+// underlying HTTP client. Requires the input to be an Smithy *Request. Returns
+// a smithy *Response, or error if the request failed.
+func (c ClientHandler) Handle(ctx context.Context, input interface{}) (
+ out interface{}, metadata middleware.Metadata, err error,
+) {
+ req, ok := input.(*Request)
+ if !ok {
+ return nil, metadata, fmt.Errorf("expect Smithy http.Request value as input, got unsupported type %T", input)
+ }
+
+ builtRequest := req.Build(ctx)
+ if err := ValidateEndpointHost(builtRequest.Host); err != nil {
+ return nil, metadata, err
+ }
+
+ resp, err := c.client.Do(builtRequest)
+ if resp == nil {
+ // Ensure a http response value is always present to prevent unexpected
+ // panics.
+ resp = &http.Response{
+ Header: http.Header{},
+ Body: http.NoBody,
+ }
+ }
+ if err != nil {
+ err = &RequestSendError{Err: err}
+
+ // Override the error with a context canceled error, if that was canceled.
+ select {
+ case <-ctx.Done():
+ err = &smithy.CanceledError{Err: ctx.Err()}
+ default:
+ }
+ }
+
+ // HTTP RoundTripper *should* close the request body. But this may not happen in a timely manner.
+ // So instead Smithy *Request Build wraps the body to be sent in a safe closer that will clear the
+ // stream reference so that it can be safely reused.
+ if builtRequest.Body != nil {
+ _ = builtRequest.Body.Close()
+ }
+
+ return &Response{Response: resp}, metadata, err
+}
+
+// RequestSendError provides a generic request transport error. This error
+// should wrap errors making HTTP client requests.
+//
+// The ClientHandler will wrap the HTTP client's error if the client request
+// fails, and did not fail because of context canceled.
+type RequestSendError struct {
+ Err error
+}
+
+// ConnectionError return that the error is related to not being able to send
+// the request, or receive a response from the service.
+func (e *RequestSendError) ConnectionError() bool {
+ return true
+}
+
+// Unwrap returns the underlying error, if there was one.
+func (e *RequestSendError) Unwrap() error {
+ return e.Err
+}
+
+func (e *RequestSendError) Error() string {
+ return fmt.Sprintf("request send failed, %v", e.Err)
+}
+
+// NopClient provides a client that ignores the request, and returns a empty
+// successful HTTP response value.
+type NopClient struct{}
+
+// Do ignores the request and returns a 200 status empty response.
+func (NopClient) Do(r *http.Request) (*http.Response, error) {
+ return &http.Response{
+ StatusCode: 200,
+ Header: http.Header{},
+ Body: http.NoBody,
+ }, nil
+}
diff --git a/vendor/github.com/aws/smithy-go/transport/http/doc.go b/vendor/github.com/aws/smithy-go/transport/http/doc.go
new file mode 100644
index 000000000..07366ac85
--- /dev/null
+++ b/vendor/github.com/aws/smithy-go/transport/http/doc.go
@@ -0,0 +1,5 @@
+/*
+Package http provides the HTTP transport client and request/response types
+needed to round trip API operation calls with an service.
+*/
+package http
diff --git a/vendor/github.com/aws/smithy-go/transport/http/headerlist.go b/vendor/github.com/aws/smithy-go/transport/http/headerlist.go
new file mode 100644
index 000000000..e232211b5
--- /dev/null
+++ b/vendor/github.com/aws/smithy-go/transport/http/headerlist.go
@@ -0,0 +1,90 @@
+package http
+
+import (
+ "fmt"
+ "strings"
+)
+
+func splitHeaderListValues(vs []string, splitFn func(string) ([]string, error)) ([]string, error) {
+ for i := 0; i < len(vs); i++ {
+ if len(vs[i]) == 0 {
+ continue
+ }
+
+ parts, err := splitFn(vs[i])
+ if err != nil {
+ return nil, err
+ }
+ if len(parts) < 2 {
+ continue
+ }
+
+ tmp := make([]string, len(vs)+len(parts)-1)
+ copy(tmp, vs[:i])
+
+ for j, p := range parts {
+ tmp[i+j] = strings.TrimSpace(p)
+ }
+
+ copy(tmp[i+len(parts):], vs[i+1:])
+
+ vs = tmp
+ i += len(parts) - 1
+ }
+
+ return vs, nil
+}
+
+// SplitHeaderListValues attempts to split the elements of the slice by commas,
+// and return a list of all values separated. Returns error if unable to
+// separate the values.
+func SplitHeaderListValues(vs []string) ([]string, error) {
+ return splitHeaderListValues(vs, commaSplit)
+}
+
+func commaSplit(v string) ([]string, error) {
+ return strings.Split(v, ","), nil
+}
+
+// SplitHTTPDateTimestampHeaderListValues attempts to split the HTTP-Date
+// timestamp values in the slice by commas, and return a list of all values
+// separated. The split is aware of HTTP-Date timestamp format, and will skip
+// comma within the timestamp value. Returns an error if unable to split the
+// timestamp values.
+func SplitHTTPDateTimestampHeaderListValues(vs []string) ([]string, error) {
+ return splitHeaderListValues(vs, splitHTTPDateHeaderValue)
+}
+
+func splitHTTPDateHeaderValue(v string) ([]string, error) {
+ if n := strings.Count(v, ","); n == 1 {
+ // Skip values with only a single HTTPDate value
+ return nil, nil
+ } else if n == 0 || n%2 == 0 {
+ return nil, fmt.Errorf("invalid timestamp HTTPDate header comma separations, %q", v)
+ }
+
+ var parts []string
+ var i, j int
+
+ var doSplit bool
+ for ; i < len(v); i++ {
+ if v[i] == ',' {
+ if doSplit {
+ doSplit = false
+ parts = append(parts, v[j:i])
+ j = i + 1
+ } else {
+ // Skip the first comma in the timestamp value since that
+ // separates the day from the rest of the timestamp.
+ //
+ // Tue, 17 Dec 2019 23:48:18 GMT
+ doSplit = true
+ }
+ }
+ }
+ if j < len(v) {
+ parts = append(parts, v[j:])
+ }
+
+ return parts, nil
+}
diff --git a/vendor/github.com/aws/smithy-go/transport/http/host.go b/vendor/github.com/aws/smithy-go/transport/http/host.go
new file mode 100644
index 000000000..b8c95d538
--- /dev/null
+++ b/vendor/github.com/aws/smithy-go/transport/http/host.go
@@ -0,0 +1,53 @@
+package http
+
+import (
+ "fmt"
+ "strings"
+)
+
+// ValidateEndpointHost validates that the host string passed in is a valid RFC
+// 3986 host. Returns error if the host is not valid.
+func ValidateEndpointHost(host string) error {
+ var errors strings.Builder
+ labels := strings.Split(host, ".")
+
+ for i, label := range labels {
+ if i == len(labels)-1 && len(label) == 0 {
+ // Allow trailing dot for FQDN hosts.
+ continue
+ }
+
+ if !ValidHostLabel(label) {
+ errors.WriteString("\nendpoint host domain labels must match \"[a-zA-Z0-9-]{1,63}\", but found: ")
+ errors.WriteString(label)
+ }
+ }
+
+ if len(host) > 255 {
+ errors.WriteString(fmt.Sprintf("\nendpoint host must be less than 255 characters, but was %d", len(host)))
+ }
+
+ if len(errors.String()) > 0 {
+ return fmt.Errorf("invalid endpoint host%s", errors.String())
+ }
+ return nil
+}
+
+// ValidHostLabel returns if the label is a valid RFC 3986 host label.
+func ValidHostLabel(label string) bool {
+ if l := len(label); l == 0 || l > 63 {
+ return false
+ }
+ for _, r := range label {
+ switch {
+ case r >= '0' && r <= '9':
+ case r >= 'A' && r <= 'Z':
+ case r >= 'a' && r <= 'z':
+ case r == '-':
+ default:
+ return false
+ }
+ }
+
+ return true
+}
diff --git a/vendor/github.com/aws/smithy-go/transport/http/internal/io/safe.go b/vendor/github.com/aws/smithy-go/transport/http/internal/io/safe.go
new file mode 100644
index 000000000..941a8d6b5
--- /dev/null
+++ b/vendor/github.com/aws/smithy-go/transport/http/internal/io/safe.go
@@ -0,0 +1,75 @@
+package io
+
+import (
+ "io"
+ "sync"
+)
+
+// NewSafeReadCloser returns a new safeReadCloser that wraps readCloser.
+func NewSafeReadCloser(readCloser io.ReadCloser) io.ReadCloser {
+ sr := &safeReadCloser{
+ readCloser: readCloser,
+ }
+
+ if _, ok := readCloser.(io.WriterTo); ok {
+ return &safeWriteToReadCloser{safeReadCloser: sr}
+ }
+
+ return sr
+}
+
+// safeWriteToReadCloser wraps a safeReadCloser but exposes a WriteTo interface implementation. This will panic
+// if the underlying io.ReadClose does not support WriteTo. Use NewSafeReadCloser to ensure the proper handling of this
+// type.
+type safeWriteToReadCloser struct {
+ *safeReadCloser
+}
+
+// WriteTo implements the io.WriteTo interface.
+func (r *safeWriteToReadCloser) WriteTo(w io.Writer) (int64, error) {
+ r.safeReadCloser.mtx.Lock()
+ defer r.safeReadCloser.mtx.Unlock()
+
+ if r.safeReadCloser.closed {
+ return 0, io.EOF
+ }
+
+ return r.safeReadCloser.readCloser.(io.WriterTo).WriteTo(w)
+}
+
+// safeReadCloser wraps a io.ReadCloser and presents an io.ReadCloser interface. When Close is called on safeReadCloser
+// the underlying Close method will be executed, and then the reference to the reader will be dropped. This type
+// is meant to be used with the net/http library which will retain a reference to the request body for the lifetime
+// of a goroutine connection. Wrapping in this manner will ensure that no data race conditions are falsely reported.
+// This type is thread-safe.
+type safeReadCloser struct {
+ readCloser io.ReadCloser
+ closed bool
+ mtx sync.Mutex
+}
+
+// Read reads up to len(p) bytes into p from the underlying read. If the reader is closed io.EOF will be returned.
+func (r *safeReadCloser) Read(p []byte) (n int, err error) {
+ r.mtx.Lock()
+ defer r.mtx.Unlock()
+ if r.closed {
+ return 0, io.EOF
+ }
+
+ return r.readCloser.Read(p)
+}
+
+// Close calls the underlying io.ReadCloser's Close method, removes the reference to the reader, and returns any error
+// reported from Close. Subsequent calls to Close will always return a nil error.
+func (r *safeReadCloser) Close() error {
+ r.mtx.Lock()
+ defer r.mtx.Unlock()
+ if r.closed {
+ return nil
+ }
+
+ r.closed = true
+ rc := r.readCloser
+ r.readCloser = nil
+ return rc.Close()
+}
diff --git a/vendor/github.com/aws/smithy-go/transport/http/md5_checksum.go b/vendor/github.com/aws/smithy-go/transport/http/md5_checksum.go
new file mode 100644
index 000000000..8e4acb898
--- /dev/null
+++ b/vendor/github.com/aws/smithy-go/transport/http/md5_checksum.go
@@ -0,0 +1,25 @@
+package http
+
+import (
+ "crypto/md5"
+ "encoding/base64"
+ "fmt"
+ "io"
+)
+
+// computeMD5Checksum computes base64 md5 checksum of an io.Reader contents.
+// Returns the byte slice of md5 checksum and an error.
+func computeMD5Checksum(r io.Reader) ([]byte, error) {
+ h := md5.New()
+ // copy errors may be assumed to be from the body.
+ _, err := io.Copy(h, r)
+ if err != nil {
+ return nil, fmt.Errorf("failed to read body: %w", err)
+ }
+
+ // encode the md5 checksum in base64.
+ sum := h.Sum(nil)
+ sum64 := make([]byte, base64.StdEncoding.EncodedLen(len(sum)))
+ base64.StdEncoding.Encode(sum64, sum)
+ return sum64, nil
+}
diff --git a/vendor/github.com/aws/smithy-go/transport/http/middleware_close_response_body.go b/vendor/github.com/aws/smithy-go/transport/http/middleware_close_response_body.go
new file mode 100644
index 000000000..866088572
--- /dev/null
+++ b/vendor/github.com/aws/smithy-go/transport/http/middleware_close_response_body.go
@@ -0,0 +1,69 @@
+package http
+
+import (
+ "context"
+ "fmt"
+
+ "github.com/aws/smithy-go/middleware"
+)
+
+// AddErrorCloseResponseBodyMiddleware adds the middleware to automatically
+// close the response body of an operation request if the request response
+// failed.
+func AddErrorCloseResponseBodyMiddleware(stack *middleware.Stack) error {
+ return stack.Deserialize.Insert(&errorCloseResponseBodyMiddleware{}, "OperationDeserializer", middleware.Before)
+}
+
+type errorCloseResponseBodyMiddleware struct{}
+
+func (*errorCloseResponseBodyMiddleware) ID() string {
+ return "ErrorCloseResponseBody"
+}
+
+func (m *errorCloseResponseBodyMiddleware) HandleDeserialize(
+ ctx context.Context, input middleware.DeserializeInput, next middleware.DeserializeHandler,
+) (
+ output middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err := next.HandleDeserialize(ctx, input)
+ if err != nil {
+ if resp, ok := out.RawResponse.(*Response); ok && resp != nil && resp.Body != nil {
+ // Do not validate that the response closes successfully.
+ resp.Body.Close()
+ }
+ }
+
+ return out, metadata, err
+}
+
+// AddCloseResponseBodyMiddleware adds the middleware to automatically close
+// the response body of an operation request, after the response had been
+// deserialized.
+func AddCloseResponseBodyMiddleware(stack *middleware.Stack) error {
+ return stack.Deserialize.Insert(&closeResponseBody{}, "OperationDeserializer", middleware.Before)
+}
+
+type closeResponseBody struct{}
+
+func (*closeResponseBody) ID() string {
+ return "CloseResponseBody"
+}
+
+func (m *closeResponseBody) HandleDeserialize(
+ ctx context.Context, input middleware.DeserializeInput, next middleware.DeserializeHandler,
+) (
+ output middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err := next.HandleDeserialize(ctx, input)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ if resp, ok := out.RawResponse.(*Response); ok {
+ if err = resp.Body.Close(); err != nil {
+ return out, metadata, fmt.Errorf("close response body failed, %w", err)
+ }
+ }
+
+ return out, metadata, err
+}
diff --git a/vendor/github.com/aws/smithy-go/transport/http/middleware_content_length.go b/vendor/github.com/aws/smithy-go/transport/http/middleware_content_length.go
new file mode 100644
index 000000000..a5668762b
--- /dev/null
+++ b/vendor/github.com/aws/smithy-go/transport/http/middleware_content_length.go
@@ -0,0 +1,90 @@
+package http
+
+import (
+ "context"
+ "fmt"
+
+ "github.com/aws/smithy-go/middleware"
+)
+
+// ComputeContentLength provides a middleware to set the content-length
+// header for the length of a serialize request body.
+type ComputeContentLength struct {
+}
+
+// AddComputeContentLengthMiddleware adds ComputeContentLength to the middleware
+// stack's Build step.
+func AddComputeContentLengthMiddleware(stack *middleware.Stack) error {
+ return stack.Build.Add(&ComputeContentLength{}, middleware.After)
+}
+
+// ID the identifier for the ComputeContentLength
+func (m *ComputeContentLength) ID() string { return "ComputeContentLength" }
+
+// HandleBuild adds the length of the serialized request to the HTTP header
+// if the length can be determined.
+func (m *ComputeContentLength) HandleBuild(
+ ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler,
+) (
+ out middleware.BuildOutput, metadata middleware.Metadata, err error,
+) {
+ req, ok := in.Request.(*Request)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown request type %T", req)
+ }
+
+ // do nothing if request content-length was set to 0 or above.
+ if req.ContentLength >= 0 {
+ return next.HandleBuild(ctx, in)
+ }
+
+ // attempt to compute stream length
+ if n, ok, err := req.StreamLength(); err != nil {
+ return out, metadata, fmt.Errorf(
+ "failed getting length of request stream, %w", err)
+ } else if ok {
+ req.ContentLength = n
+ if n == 0 {
+ // If the content length could be determined, and the body is empty
+ // the stream must be cleared to prevent unexpected chunk encoding.
+ req, _ = req.SetStream(nil)
+ in.Request = req
+ }
+ }
+
+ return next.HandleBuild(ctx, in)
+}
+
+// validateContentLength provides a middleware to validate the content-length
+// is valid (greater than zero), for the serialized request payload.
+type validateContentLength struct{}
+
+// ValidateContentLengthHeader adds middleware that validates request content-length
+// is set to value greater than zero.
+func ValidateContentLengthHeader(stack *middleware.Stack) error {
+ return stack.Build.Add(&validateContentLength{}, middleware.After)
+}
+
+// ID the identifier for the ComputeContentLength
+func (m *validateContentLength) ID() string { return "ValidateContentLength" }
+
+// HandleBuild adds the length of the serialized request to the HTTP header
+// if the length can be determined.
+func (m *validateContentLength) HandleBuild(
+ ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler,
+) (
+ out middleware.BuildOutput, metadata middleware.Metadata, err error,
+) {
+ req, ok := in.Request.(*Request)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown request type %T", req)
+ }
+
+ // if request content-length was set to less than 0, return an error
+ if req.ContentLength < 0 {
+ return out, metadata, fmt.Errorf(
+ "content length for payload is required and must be at least 0")
+ }
+
+ return next.HandleBuild(ctx, in)
+}
diff --git a/vendor/github.com/aws/smithy-go/transport/http/middleware_headers.go b/vendor/github.com/aws/smithy-go/transport/http/middleware_headers.go
new file mode 100644
index 000000000..49884e6af
--- /dev/null
+++ b/vendor/github.com/aws/smithy-go/transport/http/middleware_headers.go
@@ -0,0 +1,88 @@
+package http
+
+import (
+ "context"
+ "fmt"
+
+ "github.com/aws/smithy-go/middleware"
+)
+
+type headerValue struct {
+ header string
+ value string
+ append bool
+}
+
+type headerValueHelper struct {
+ headerValues []headerValue
+}
+
+func (h *headerValueHelper) addHeaderValue(value headerValue) {
+ h.headerValues = append(h.headerValues, value)
+}
+
+func (h *headerValueHelper) ID() string {
+ return "HTTPHeaderHelper"
+}
+
+func (h *headerValueHelper) HandleBuild(ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler) (out middleware.BuildOutput, metadata middleware.Metadata, err error) {
+ req, ok := in.Request.(*Request)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown transport type %T", in.Request)
+ }
+
+ for _, value := range h.headerValues {
+ if value.append {
+ req.Header.Add(value.header, value.value)
+ } else {
+ req.Header.Set(value.header, value.value)
+ }
+ }
+
+ return next.HandleBuild(ctx, in)
+}
+
+func getOrAddHeaderValueHelper(stack *middleware.Stack) (*headerValueHelper, error) {
+ id := (*headerValueHelper)(nil).ID()
+ m, ok := stack.Build.Get(id)
+ if !ok {
+ m = &headerValueHelper{}
+ err := stack.Build.Add(m, middleware.After)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ requestUserAgent, ok := m.(*headerValueHelper)
+ if !ok {
+ return nil, fmt.Errorf("%T for %s middleware did not match expected type", m, id)
+ }
+
+ return requestUserAgent, nil
+}
+
+// AddHeaderValue returns a stack mutator that adds the header value pair to header.
+// Appends to any existing values if present.
+func AddHeaderValue(header string, value string) func(stack *middleware.Stack) error {
+ return func(stack *middleware.Stack) error {
+ helper, err := getOrAddHeaderValueHelper(stack)
+ if err != nil {
+ return err
+ }
+ helper.addHeaderValue(headerValue{header: header, value: value, append: true})
+ return nil
+ }
+}
+
+// SetHeaderValue returns a stack mutator that adds the header value pair to header.
+// Replaces any existing values if present.
+func SetHeaderValue(header string, value string) func(stack *middleware.Stack) error {
+ return func(stack *middleware.Stack) error {
+ helper, err := getOrAddHeaderValueHelper(stack)
+ if err != nil {
+ return err
+ }
+ helper.addHeaderValue(headerValue{header: header, value: value, append: false})
+ return nil
+ }
+}
diff --git a/vendor/github.com/aws/smithy-go/transport/http/middleware_http_logging.go b/vendor/github.com/aws/smithy-go/transport/http/middleware_http_logging.go
new file mode 100644
index 000000000..71f0dccec
--- /dev/null
+++ b/vendor/github.com/aws/smithy-go/transport/http/middleware_http_logging.go
@@ -0,0 +1,73 @@
+package http
+
+import (
+ "context"
+ "fmt"
+ "net/http/httputil"
+
+ "github.com/aws/smithy-go/logging"
+ "github.com/aws/smithy-go/middleware"
+)
+
+// RequestResponseLogger is a deserialize middleware that will log the request and response HTTP messages and optionally
+// their respective bodies. Will not perform any logging if none of the options are set.
+type RequestResponseLogger struct {
+ LogRequest bool
+ LogRequestWithBody bool
+
+ LogResponse bool
+ LogResponseWithBody bool
+}
+
+// ID is the middleware identifier.
+func (r *RequestResponseLogger) ID() string {
+ return "RequestResponseLogger"
+}
+
+// HandleDeserialize will log the request and response HTTP messages if configured accordingly.
+func (r *RequestResponseLogger) HandleDeserialize(
+ ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler,
+) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ logger := middleware.GetLogger(ctx)
+
+ if r.LogRequest || r.LogRequestWithBody {
+ smithyRequest, ok := in.Request.(*Request)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown transport type %T", in)
+ }
+
+ rc := smithyRequest.Build(ctx)
+ reqBytes, err := httputil.DumpRequestOut(rc, r.LogRequestWithBody)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ logger.Logf(logging.Debug, "Request\n%v", string(reqBytes))
+
+ smithyRequest, err = smithyRequest.SetStream(rc.Body)
+ if err != nil {
+ return out, metadata, err
+ }
+ in.Request = smithyRequest
+ }
+
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+
+ if (err == nil) && (r.LogResponse || r.LogResponseWithBody) {
+ smithyResponse, ok := out.RawResponse.(*Response)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown transport type %T", out.RawResponse)
+ }
+
+ respBytes, err := httputil.DumpResponse(smithyResponse.Response, r.LogResponseWithBody)
+ if err != nil {
+ return out, metadata, fmt.Errorf("failed to dump response %w", err)
+ }
+
+ logger.Logf(logging.Debug, "Response\n%v", string(respBytes))
+ }
+
+ return out, metadata, err
+}
diff --git a/vendor/github.com/aws/smithy-go/transport/http/middleware_metadata.go b/vendor/github.com/aws/smithy-go/transport/http/middleware_metadata.go
new file mode 100644
index 000000000..443ececb2
--- /dev/null
+++ b/vendor/github.com/aws/smithy-go/transport/http/middleware_metadata.go
@@ -0,0 +1,51 @@
+package http
+
+import (
+ "context"
+
+ "github.com/aws/smithy-go/middleware"
+)
+
+type (
+ hostnameImmutableKey struct{}
+ hostPrefixDisableKey struct{}
+)
+
+// GetHostnameImmutable retrieves if the endpoint hostname should be considered
+// immutable or not.
+//
+// Scoped to stack values. Use middleware#ClearStackValues to clear all stack
+// values.
+func GetHostnameImmutable(ctx context.Context) (v bool) {
+ v, _ = middleware.GetStackValue(ctx, hostnameImmutableKey{}).(bool)
+ return v
+}
+
+// SetHostnameImmutable sets or modifies if the request's endpoint hostname
+// should be considered immutable or not.
+//
+// Scoped to stack values. Use middleware#ClearStackValues to clear all stack
+// values.
+func SetHostnameImmutable(ctx context.Context, value bool) context.Context {
+ return middleware.WithStackValue(ctx, hostnameImmutableKey{}, value)
+}
+
+// IsEndpointHostPrefixDisabled retrieves if the hostname prefixing is
+// disabled.
+//
+// Scoped to stack values. Use middleware#ClearStackValues to clear all stack
+// values.
+func IsEndpointHostPrefixDisabled(ctx context.Context) (v bool) {
+ v, _ = middleware.GetStackValue(ctx, hostPrefixDisableKey{}).(bool)
+ return v
+}
+
+// DisableEndpointHostPrefix sets or modifies if the request's endpoint host
+// prefixing to be disabled. If value is set to true, endpoint host prefixing
+// will be disabled.
+//
+// Scoped to stack values. Use middleware#ClearStackValues to clear all stack
+// values.
+func DisableEndpointHostPrefix(ctx context.Context, value bool) context.Context {
+ return middleware.WithStackValue(ctx, hostPrefixDisableKey{}, value)
+}
diff --git a/vendor/github.com/aws/smithy-go/transport/http/request.go b/vendor/github.com/aws/smithy-go/transport/http/request.go
new file mode 100644
index 000000000..caa73bec8
--- /dev/null
+++ b/vendor/github.com/aws/smithy-go/transport/http/request.go
@@ -0,0 +1,148 @@
+package http
+
+import (
+ "context"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "net/http"
+ "net/url"
+
+ iointernal "github.com/aws/smithy-go/transport/http/internal/io"
+)
+
+// Request provides the HTTP specific request structure for HTTP specific
+// middleware steps to use to serialize input, and send an operation's request.
+type Request struct {
+ *http.Request
+ stream io.Reader
+ isStreamSeekable bool
+ streamStartPos int64
+}
+
+// NewStackRequest returns an initialized request ready to populated with the
+// HTTP request details. Returns empty interface so the function can be used as
+// a parameter to the Smithy middleware Stack constructor.
+func NewStackRequest() interface{} {
+ return &Request{
+ Request: &http.Request{
+ URL: &url.URL{},
+ Header: http.Header{},
+ ContentLength: -1, // default to unknown length
+ },
+ }
+}
+
+// Clone returns a deep copy of the Request for the new context. A reference to
+// the Stream is copied, but the underlying stream is not copied.
+func (r *Request) Clone() *Request {
+ rc := *r
+ rc.Request = rc.Request.Clone(context.TODO())
+ return &rc
+}
+
+// StreamLength returns the number of bytes of the serialized stream attached
+// to the request and ok set. If the length cannot be determined, an error will
+// be returned.
+func (r *Request) StreamLength() (size int64, ok bool, err error) {
+ if r.stream == nil {
+ return 0, true, nil
+ }
+
+ if l, ok := r.stream.(interface{ Len() int }); ok {
+ return int64(l.Len()), true, nil
+ }
+
+ if !r.isStreamSeekable {
+ return 0, false, nil
+ }
+
+ s := r.stream.(io.Seeker)
+ endOffset, err := s.Seek(0, io.SeekEnd)
+ if err != nil {
+ return 0, false, err
+ }
+
+ // The reason to seek to streamStartPos instead of 0 is to ensure that the
+ // SDK only sends the stream from the starting position the user's
+ // application provided it to the SDK at. For example application opens a
+ // file, and wants to skip the first N bytes uploading the rest. The
+ // application would move the file's offset N bytes, then hand it off to
+ // the SDK to send the remaining. The SDK should respect that initial offset.
+ _, err = s.Seek(r.streamStartPos, io.SeekStart)
+ if err != nil {
+ return 0, false, err
+ }
+
+ return endOffset - r.streamStartPos, true, nil
+}
+
+// RewindStream will rewind the io.Reader to the relative start position if it
+// is an io.Seeker.
+func (r *Request) RewindStream() error {
+ // If there is no stream there is nothing to rewind.
+ if r.stream == nil {
+ return nil
+ }
+
+ if !r.isStreamSeekable {
+ return fmt.Errorf("request stream is not seekable")
+ }
+ _, err := r.stream.(io.Seeker).Seek(r.streamStartPos, io.SeekStart)
+ return err
+}
+
+// GetStream returns the request stream io.Reader if a stream is set. If no
+// stream is present nil will be returned.
+func (r *Request) GetStream() io.Reader {
+ return r.stream
+}
+
+// IsStreamSeekable returns if the stream is seekable.
+func (r *Request) IsStreamSeekable() bool {
+ return r.isStreamSeekable
+}
+
+// SetStream returns a clone of the request with the stream set to the provided reader.
+// May return an error if the provided reader is seekable but returns an error.
+func (r *Request) SetStream(reader io.Reader) (rc *Request, err error) {
+ rc = r.Clone()
+
+ switch v := reader.(type) {
+ case io.Seeker:
+ n, err := v.Seek(0, io.SeekCurrent)
+ if err != nil {
+ return r, err
+ }
+ rc.isStreamSeekable = true
+ rc.streamStartPos = n
+ default:
+ rc.isStreamSeekable = false
+ }
+ rc.stream = reader
+
+ return rc, err
+}
+
+// Build returns a build standard HTTP request value from the Smithy request.
+// The request's stream is wrapped in a safe container that allows it to be
+// reused for subsequent attempts.
+func (r *Request) Build(ctx context.Context) *http.Request {
+ req := r.Request.Clone(ctx)
+
+ if r.stream != nil {
+ req.Body = iointernal.NewSafeReadCloser(ioutil.NopCloser(r.stream))
+ } else {
+ // we update the content-length to 0,
+ // if request stream was not set.
+ req.ContentLength = 0
+ }
+
+ return req
+}
+
+// RequestCloner is a function that can take an input request type and clone the request
+// for use in a subsequent retry attempt
+func RequestCloner(v interface{}) interface{} {
+ return v.(*Request).Clone()
+}
diff --git a/vendor/github.com/aws/smithy-go/transport/http/response.go b/vendor/github.com/aws/smithy-go/transport/http/response.go
new file mode 100644
index 000000000..7c65fa31e
--- /dev/null
+++ b/vendor/github.com/aws/smithy-go/transport/http/response.go
@@ -0,0 +1,34 @@
+package http
+
+import (
+ "fmt"
+ "net/http"
+)
+
+// Response provides the HTTP specific response structure for HTTP specific
+// middleware steps to use to deserialize the response from an operation call.
+type Response struct {
+ *http.Response
+}
+
+// ResponseError provides the HTTP centric error type wrapping the underlying
+// error with the HTTP response value.
+type ResponseError struct {
+ Response *Response
+ Err error
+}
+
+// HttpStatusCode returns the HTTP response status code received from the service
+func (e *ResponseError) HTTPStatusCode() int { return e.Response.StatusCode }
+
+// HTTPResponse returns the HTTP response received from the service.
+func (e *ResponseError) HTTPResponse() *Response { return e.Response }
+
+// Unwrap returns the nested error if any, or nil.
+func (e *ResponseError) Unwrap() error { return e.Err }
+
+func (e *ResponseError) Error() string {
+ return fmt.Sprintf(
+ "http response error StatusCode: %d, %v",
+ e.Response.StatusCode, e.Err)
+}
diff --git a/vendor/github.com/aws/smithy-go/transport/http/time.go b/vendor/github.com/aws/smithy-go/transport/http/time.go
new file mode 100644
index 000000000..a6535c87f
--- /dev/null
+++ b/vendor/github.com/aws/smithy-go/transport/http/time.go
@@ -0,0 +1,28 @@
+package http
+
+import (
+ "fmt"
+ "time"
+)
+
+// Time parsing function sourced from stdlib with an additional time format so
+// non-compliant timestamps are still parseable.
+// https://github.com/golang/go/blob/8869086d8f0a31033ccdc103106c768dc17216b1/src/net/http/header.go#L110-L127
+var timeFormats = []string{
+ "Mon, _2 Jan 2006 15:04:05 GMT", // Modifies http.TimeFormat with a leading underscore for day number (leading 0 optional).
+ "Mon, _2 Jan 06 15:04:05 GMT", // two digit year
+ time.RFC850,
+ time.ANSIC,
+}
+
+// ParseTime parses a time header like the HTTP Date header.
+// This uses a more relaxed rule set for date parsing compared to the standard library.
+func ParseTime(text string) (t time.Time, err error) {
+ for _, layout := range timeFormats {
+ t, err = time.Parse(layout, text)
+ if err == nil {
+ return t, nil
+ }
+ }
+ return time.Time{}, fmt.Errorf("unknown time format: %w", err)
+}
diff --git a/vendor/github.com/aws/smithy-go/transport/http/user_agent.go b/vendor/github.com/aws/smithy-go/transport/http/user_agent.go
new file mode 100644
index 000000000..71a7e0d8a
--- /dev/null
+++ b/vendor/github.com/aws/smithy-go/transport/http/user_agent.go
@@ -0,0 +1,37 @@
+package http
+
+import (
+ "strings"
+)
+
+// UserAgentBuilder is a builder for a HTTP User-Agent string.
+type UserAgentBuilder struct {
+ sb strings.Builder
+}
+
+// NewUserAgentBuilder returns a new UserAgentBuilder.
+func NewUserAgentBuilder() *UserAgentBuilder {
+ return &UserAgentBuilder{sb: strings.Builder{}}
+}
+
+// AddKey adds the named component/product to the agent string
+func (u *UserAgentBuilder) AddKey(key string) {
+ u.appendTo(key)
+}
+
+// AddKeyValue adds the named key to the agent string with the given value.
+func (u *UserAgentBuilder) AddKeyValue(key, value string) {
+ u.appendTo(key + "/" + value)
+}
+
+// Build returns the constructed User-Agent string. May be called multiple times.
+func (u *UserAgentBuilder) Build() string {
+ return u.sb.String()
+}
+
+func (u *UserAgentBuilder) appendTo(value string) {
+ if u.sb.Len() > 0 {
+ u.sb.WriteRune(' ')
+ }
+ u.sb.WriteString(value)
+}
diff --git a/vendor/github.com/aws/smithy-go/validation.go b/vendor/github.com/aws/smithy-go/validation.go
new file mode 100644
index 000000000..b5eedc1f9
--- /dev/null
+++ b/vendor/github.com/aws/smithy-go/validation.go
@@ -0,0 +1,140 @@
+package smithy
+
+import (
+ "bytes"
+ "fmt"
+ "strings"
+)
+
+// An InvalidParamsError provides wrapping of invalid parameter errors found when
+// validating API operation input parameters.
+type InvalidParamsError struct {
+ // Context is the base context of the invalid parameter group.
+ Context string
+ errs []InvalidParamError
+}
+
+// Add adds a new invalid parameter error to the collection of invalid
+// parameters. The context of the invalid parameter will be updated to reflect
+// this collection.
+func (e *InvalidParamsError) Add(err InvalidParamError) {
+ err.SetContext(e.Context)
+ e.errs = append(e.errs, err)
+}
+
+// AddNested adds the invalid parameter errors from another InvalidParamsError
+// value into this collection. The nested errors will have their nested context
+// updated and base context to reflect the merging.
+//
+// Use for nested validations errors.
+func (e *InvalidParamsError) AddNested(nestedCtx string, nested InvalidParamsError) {
+ for _, err := range nested.errs {
+ err.SetContext(e.Context)
+ err.AddNestedContext(nestedCtx)
+ e.errs = append(e.errs, err)
+ }
+}
+
+// Len returns the number of invalid parameter errors
+func (e *InvalidParamsError) Len() int {
+ return len(e.errs)
+}
+
+// Error returns the string formatted form of the invalid parameters.
+func (e InvalidParamsError) Error() string {
+ w := &bytes.Buffer{}
+ fmt.Fprintf(w, "%d validation error(s) found.\n", len(e.errs))
+
+ for _, err := range e.errs {
+ fmt.Fprintf(w, "- %s\n", err.Error())
+ }
+
+ return w.String()
+}
+
+// Errs returns a slice of the invalid parameters
+func (e InvalidParamsError) Errs() []error {
+ errs := make([]error, len(e.errs))
+ for i := 0; i < len(errs); i++ {
+ errs[i] = e.errs[i]
+ }
+
+ return errs
+}
+
+// An InvalidParamError represents an invalid parameter error type.
+type InvalidParamError interface {
+ error
+
+ // Field name the error occurred on.
+ Field() string
+
+ // SetContext updates the context of the error.
+ SetContext(string)
+
+ // AddNestedContext updates the error's context to include a nested level.
+ AddNestedContext(string)
+}
+
+type invalidParamError struct {
+ context string
+ nestedContext string
+ field string
+ reason string
+}
+
+// Error returns the string version of the invalid parameter error.
+func (e invalidParamError) Error() string {
+ return fmt.Sprintf("%s, %s.", e.reason, e.Field())
+}
+
+// Field Returns the field and context the error occurred.
+func (e invalidParamError) Field() string {
+ sb := &strings.Builder{}
+ sb.WriteString(e.context)
+ if sb.Len() > 0 {
+ if len(e.nestedContext) == 0 || (len(e.nestedContext) > 0 && e.nestedContext[:1] != "[") {
+ sb.WriteRune('.')
+ }
+ }
+ if len(e.nestedContext) > 0 {
+ sb.WriteString(e.nestedContext)
+ sb.WriteRune('.')
+ }
+ sb.WriteString(e.field)
+ return sb.String()
+}
+
+// SetContext updates the base context of the error.
+func (e *invalidParamError) SetContext(ctx string) {
+ e.context = ctx
+}
+
+// AddNestedContext prepends a context to the field's path.
+func (e *invalidParamError) AddNestedContext(ctx string) {
+ if len(e.nestedContext) == 0 {
+ e.nestedContext = ctx
+ return
+ }
+ // Check if our nested context is an index into a slice or map
+ if e.nestedContext[:1] != "[" {
+ e.nestedContext = fmt.Sprintf("%s.%s", ctx, e.nestedContext)
+ return
+ }
+ e.nestedContext = ctx + e.nestedContext
+}
+
+// An ParamRequiredError represents an required parameter error.
+type ParamRequiredError struct {
+ invalidParamError
+}
+
+// NewErrParamRequired creates a new required parameter error.
+func NewErrParamRequired(field string) *ParamRequiredError {
+ return &ParamRequiredError{
+ invalidParamError{
+ field: field,
+ reason: fmt.Sprintf("missing required field"),
+ },
+ }
+}
diff --git a/vendor/github.com/aws/smithy-go/waiter/logger.go b/vendor/github.com/aws/smithy-go/waiter/logger.go
new file mode 100644
index 000000000..064ecafab
--- /dev/null
+++ b/vendor/github.com/aws/smithy-go/waiter/logger.go
@@ -0,0 +1,36 @@
+package waiter
+
+import (
+ "context"
+ "fmt"
+
+ "github.com/aws/smithy-go/logging"
+ "github.com/aws/smithy-go/middleware"
+)
+
+// Logger is the Logger middleware used by the waiter to log an attempt
+type Logger struct {
+ // Attempt is the current attempt to be logged
+ Attempt int64
+}
+
+// ID representing the Logger middleware
+func (*Logger) ID() string {
+ return "WaiterLogger"
+}
+
+// HandleInitialize performs handling of request in initialize stack step
+func (m *Logger) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ logger := middleware.GetLogger(ctx)
+
+ logger.Logf(logging.Debug, fmt.Sprintf("attempting waiter request, attempt count: %d", m.Attempt))
+
+ return next.HandleInitialize(ctx, in)
+}
+
+// AddLogger is helper util to add waiter logger after `SetLogger` middleware in
+func (m Logger) AddLogger(stack *middleware.Stack) error {
+ return stack.Initialize.Insert(&m, "SetLogger", middleware.After)
+}
diff --git a/vendor/github.com/aws/smithy-go/waiter/waiter.go b/vendor/github.com/aws/smithy-go/waiter/waiter.go
new file mode 100644
index 000000000..03e46e2ee
--- /dev/null
+++ b/vendor/github.com/aws/smithy-go/waiter/waiter.go
@@ -0,0 +1,66 @@
+package waiter
+
+import (
+ "fmt"
+ "math"
+ "time"
+
+ "github.com/aws/smithy-go/rand"
+)
+
+// ComputeDelay computes delay between waiter attempts. The function takes in a current attempt count,
+// minimum delay, maximum delay, and remaining wait time for waiter as input. The inputs minDelay and maxDelay
+// must always be greater than 0, along with minDelay lesser than or equal to maxDelay.
+//
+// Returns the computed delay and if next attempt count is possible within the given input time constraints.
+// Note that the zeroth attempt results in no delay.
+func ComputeDelay(attempt int64, minDelay, maxDelay, remainingTime time.Duration) (delay time.Duration, err error) {
+ // zeroth attempt, no delay
+ if attempt <= 0 {
+ return 0, nil
+ }
+
+ // remainingTime is zero or less, no delay
+ if remainingTime <= 0 {
+ return 0, nil
+ }
+
+ // validate min delay is greater than 0
+ if minDelay == 0 {
+ return 0, fmt.Errorf("minDelay must be greater than zero when computing Delay")
+ }
+
+ // validate max delay is greater than 0
+ if maxDelay == 0 {
+ return 0, fmt.Errorf("maxDelay must be greater than zero when computing Delay")
+ }
+
+ // Get attempt ceiling to prevent integer overflow.
+ attemptCeiling := (math.Log(float64(maxDelay/minDelay)) / math.Log(2)) + 1
+
+ if attempt > int64(attemptCeiling) {
+ delay = maxDelay
+ } else {
+ // Compute exponential delay based on attempt.
+ ri := 1 << uint64(attempt-1)
+ // compute delay
+ delay = minDelay * time.Duration(ri)
+ }
+
+ if delay != minDelay {
+ // randomize to get jitter between min delay and delay value
+ d, err := rand.CryptoRandInt63n(int64(delay - minDelay))
+ if err != nil {
+ return 0, fmt.Errorf("error computing retry jitter, %w", err)
+ }
+
+ delay = time.Duration(d) + minDelay
+ }
+
+ // check if this is the last attempt possible and compute delay accordingly
+ if remainingTime-delay <= minDelay {
+ delay = remainingTime - minDelay
+ }
+
+ return delay, nil
+}
diff --git a/vendor/github.com/exoscale/packer-plugin-exoscale/post-processor/exoscale-import/post-processor.go b/vendor/github.com/exoscale/packer-plugin-exoscale/post-processor/exoscale-import/post-processor.go
index 5c3678559..b696a707b 100644
--- a/vendor/github.com/exoscale/packer-plugin-exoscale/post-processor/exoscale-import/post-processor.go
+++ b/vendor/github.com/exoscale/packer-plugin-exoscale/post-processor/exoscale-import/post-processor.go
@@ -5,6 +5,10 @@ import (
"errors"
"fmt"
+ "github.com/aws/aws-sdk-go-v2/aws"
+ awsconfig "github.com/aws/aws-sdk-go-v2/config"
+ "github.com/aws/aws-sdk-go-v2/credentials"
+ "github.com/aws/aws-sdk-go-v2/service/s3"
"github.com/exoscale/egoscale"
"github.com/hashicorp/packer-plugin-sdk/multistep"
"github.com/hashicorp/packer-plugin-sdk/multistep/commonsteps"
@@ -27,6 +31,7 @@ type PostProcessor struct {
config *Config
runner multistep.Runner
exo *egoscale.Client
+ sos *s3.Client
}
func (p *PostProcessor) Configure(raws ...interface{}) error {
@@ -53,9 +58,35 @@ func (p *PostProcessor) PostProcess(ctx context.Context, ui packer.Ui, a packer.
p.exo = egoscale.NewClient(p.config.APIEndpoint, p.config.APIKey, p.config.APISecret)
+ cfg, err := awsconfig.LoadDefaultConfig(
+ ctx,
+ awsconfig.WithRegion(p.config.TemplateZone),
+
+ awsconfig.WithEndpointResolver(aws.EndpointResolverFunc(
+ func(service, region string) (aws.Endpoint, error) {
+ return aws.Endpoint{
+ URL: p.config.SOSEndpoint,
+ SigningRegion: p.config.TemplateZone,
+ }, nil
+ })),
+
+ awsconfig.WithCredentialsProvider(credentials.NewStaticCredentialsProvider(
+ p.config.APIKey,
+ p.config.APISecret,
+ "")),
+ )
+ if err != nil {
+ return nil, false, false, fmt.Errorf("unable to initialize SOS client: %s", err)
+ }
+
+ p.sos = s3.NewFromConfig(cfg, func(o *s3.Options) {
+ o.UsePathStyle = true
+ })
+
state := new(multistep.BasicStateBag)
state.Put("config", p.config)
state.Put("exo", p.exo)
+ state.Put("sos", p.sos)
state.Put("ui", ui)
state.Put("artifact", a)
diff --git a/vendor/github.com/exoscale/packer-plugin-exoscale/post-processor/exoscale-import/step_delete_image.go b/vendor/github.com/exoscale/packer-plugin-exoscale/post-processor/exoscale-import/step_delete_image.go
index e75f6fad9..b499d96f5 100644
--- a/vendor/github.com/exoscale/packer-plugin-exoscale/post-processor/exoscale-import/step_delete_image.go
+++ b/vendor/github.com/exoscale/packer-plugin-exoscale/post-processor/exoscale-import/step_delete_image.go
@@ -5,10 +5,8 @@ import (
"fmt"
"path/filepath"
- "github.com/aws/aws-sdk-go/aws"
- "github.com/aws/aws-sdk-go/aws/credentials"
- "github.com/aws/aws-sdk-go/aws/session"
- "github.com/aws/aws-sdk-go/service/s3"
+ "github.com/aws/aws-sdk-go-v2/aws"
+ "github.com/aws/aws-sdk-go-v2/service/s3"
"github.com/hashicorp/packer-plugin-sdk/multistep"
"github.com/hashicorp/packer-plugin-sdk/packer"
)
@@ -19,6 +17,7 @@ func (s *stepDeleteImage) Run(ctx context.Context, state multistep.StateBag) mul
var (
ui = state.Get("ui").(packer.Ui)
config = state.Get("config").(*Config)
+ sos = state.Get("sos").(*s3.Client)
artifact = state.Get("artifact").(packer.Artifact)
imageFile = artifact.Files()[0]
@@ -31,20 +30,11 @@ func (s *stepDeleteImage) Run(ctx context.Context, state multistep.StateBag) mul
ui.Say("Deleting uploaded template image")
- sess, err := session.NewSessionWithOptions(session.Options{Config: aws.Config{
- Region: aws.String(config.TemplateZone),
- Endpoint: aws.String(config.SOSEndpoint),
- Credentials: credentials.NewStaticCredentials(config.APIKey, config.APISecret, "")}})
- if err != nil {
- ui.Error(fmt.Sprintf("unable to initialize session: %v", err))
- return multistep.ActionHalt
- }
-
- svc := s3.New(sess)
- if _, err := svc.DeleteObject(&s3.DeleteObjectInput{
- Bucket: aws.String(config.ImageBucket),
- Key: aws.String(bucketFile),
- }); err != nil {
+ if _, err := sos.DeleteObject(ctx,
+ &s3.DeleteObjectInput{
+ Bucket: aws.String(config.ImageBucket),
+ Key: aws.String(bucketFile),
+ }); err != nil {
ui.Error(fmt.Sprintf("unable to delete template image: %v", err))
return multistep.ActionHalt
}
diff --git a/vendor/github.com/exoscale/packer-plugin-exoscale/post-processor/exoscale-import/step_upload_image.go b/vendor/github.com/exoscale/packer-plugin-exoscale/post-processor/exoscale-import/step_upload_image.go
index e88de4bf0..ea1e1efd3 100644
--- a/vendor/github.com/exoscale/packer-plugin-exoscale/post-processor/exoscale-import/step_upload_image.go
+++ b/vendor/github.com/exoscale/packer-plugin-exoscale/post-processor/exoscale-import/step_upload_image.go
@@ -9,10 +9,10 @@ import (
"os"
"path/filepath"
- "github.com/aws/aws-sdk-go/aws"
- "github.com/aws/aws-sdk-go/aws/credentials"
- "github.com/aws/aws-sdk-go/aws/session"
- "github.com/aws/aws-sdk-go/service/s3/s3manager"
+ "github.com/aws/aws-sdk-go-v2/aws"
+ s3manager "github.com/aws/aws-sdk-go-v2/feature/s3/manager"
+ "github.com/aws/aws-sdk-go-v2/service/s3"
+ s3types "github.com/aws/aws-sdk-go-v2/service/s3/types"
"github.com/hashicorp/packer-plugin-sdk/multistep"
"github.com/hashicorp/packer-plugin-sdk/packer"
)
@@ -58,23 +58,16 @@ func (s *stepUploadImage) Run(ctx context.Context, state multistep.StateBag) mul
return multistep.ActionHalt
}
- sess, err := session.NewSessionWithOptions(session.Options{Config: aws.Config{
- Region: aws.String(config.TemplateZone),
- Endpoint: aws.String(config.SOSEndpoint),
- Credentials: credentials.NewStaticCredentials(config.APIKey, config.APISecret, "")}})
- if err != nil {
- ui.Error(fmt.Sprintf("unable to initialize session: %v", err))
- return multistep.ActionHalt
- }
-
- uploader := s3manager.NewUploader(sess)
- output, err := uploader.UploadWithContext(ctx, &s3manager.UploadInput{
- Body: pf,
- Bucket: aws.String(config.ImageBucket),
- Key: aws.String(bucketFile),
- ContentMD5: aws.String(base64.StdEncoding.EncodeToString(hash.Sum(nil))),
- ACL: aws.String("public-read"),
- })
+ output, err := s3manager.
+ NewUploader(state.Get("sos").(*s3.Client)).
+ Upload(ctx,
+ &s3.PutObjectInput{
+ Bucket: aws.String(config.ImageBucket),
+ Key: aws.String(bucketFile),
+ Body: pf,
+ ContentMD5: aws.String(base64.StdEncoding.EncodeToString(hash.Sum(nil))),
+ ACL: s3types.ObjectCannedACLPublicRead,
+ })
if err != nil {
ui.Error(fmt.Sprintf("unable to upload template image: %v", err))
return multistep.ActionHalt
diff --git a/vendor/github.com/hashicorp/packer-plugin-sdk/version/version.go b/vendor/github.com/hashicorp/packer-plugin-sdk/version/version.go
index 5c8ee2b2d..a88582177 100644
--- a/vendor/github.com/hashicorp/packer-plugin-sdk/version/version.go
+++ b/vendor/github.com/hashicorp/packer-plugin-sdk/version/version.go
@@ -13,7 +13,7 @@ import (
var GitCommit string
// Package version helps plugin creators set and track the sdk version using
-var Version = "0.1.1"
+var Version = "0.1.2"
// A pre-release marker for the version. If this is "" (empty string)
// then it means that it is a final release. Otherwise, this is a pre-release
diff --git a/vendor/modules.txt b/vendor/modules.txt
index 218da3526..a8fb7056e 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -178,6 +178,77 @@ github.com/aws/aws-sdk-go/service/sso
github.com/aws/aws-sdk-go/service/sso/ssoiface
github.com/aws/aws-sdk-go/service/sts
github.com/aws/aws-sdk-go/service/sts/stsiface
+# github.com/aws/aws-sdk-go-v2 v1.2.1
+github.com/aws/aws-sdk-go-v2/aws
+github.com/aws/aws-sdk-go-v2/aws/arn
+github.com/aws/aws-sdk-go-v2/aws/middleware
+github.com/aws/aws-sdk-go-v2/aws/protocol/query
+github.com/aws/aws-sdk-go-v2/aws/protocol/restjson
+github.com/aws/aws-sdk-go-v2/aws/protocol/xml
+github.com/aws/aws-sdk-go-v2/aws/ratelimit
+github.com/aws/aws-sdk-go-v2/aws/retry
+github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4
+github.com/aws/aws-sdk-go-v2/aws/signer/v4
+github.com/aws/aws-sdk-go-v2/aws/transport/http
+github.com/aws/aws-sdk-go-v2/internal/awsutil
+github.com/aws/aws-sdk-go-v2/internal/endpoints
+github.com/aws/aws-sdk-go-v2/internal/ini
+github.com/aws/aws-sdk-go-v2/internal/rand
+github.com/aws/aws-sdk-go-v2/internal/sdk
+github.com/aws/aws-sdk-go-v2/internal/sdkio
+github.com/aws/aws-sdk-go-v2/internal/strings
+github.com/aws/aws-sdk-go-v2/internal/sync/singleflight
+github.com/aws/aws-sdk-go-v2/internal/timeconv
+# github.com/aws/aws-sdk-go-v2/config v1.1.2
+github.com/aws/aws-sdk-go-v2/config
+# github.com/aws/aws-sdk-go-v2/credentials v1.1.2
+github.com/aws/aws-sdk-go-v2/credentials
+github.com/aws/aws-sdk-go-v2/credentials/ec2rolecreds
+github.com/aws/aws-sdk-go-v2/credentials/endpointcreds
+github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client
+github.com/aws/aws-sdk-go-v2/credentials/processcreds
+github.com/aws/aws-sdk-go-v2/credentials/ssocreds
+github.com/aws/aws-sdk-go-v2/credentials/stscreds
+# github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.0.3
+github.com/aws/aws-sdk-go-v2/feature/ec2/imds
+# github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.0.3
+github.com/aws/aws-sdk-go-v2/feature/s3/manager
+# github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.0.2
+github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding
+# github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.0.3
+github.com/aws/aws-sdk-go-v2/service/internal/presigned-url
+# github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.1.1
+github.com/aws/aws-sdk-go-v2/service/internal/s3shared
+github.com/aws/aws-sdk-go-v2/service/internal/s3shared/arn
+github.com/aws/aws-sdk-go-v2/service/internal/s3shared/config
+# github.com/aws/aws-sdk-go-v2/service/s3 v1.2.1
+github.com/aws/aws-sdk-go-v2/service/s3
+github.com/aws/aws-sdk-go-v2/service/s3/internal/arn
+github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations
+github.com/aws/aws-sdk-go-v2/service/s3/internal/endpoints
+github.com/aws/aws-sdk-go-v2/service/s3/types
+# github.com/aws/aws-sdk-go-v2/service/sso v1.1.2
+github.com/aws/aws-sdk-go-v2/service/sso
+github.com/aws/aws-sdk-go-v2/service/sso/internal/endpoints
+github.com/aws/aws-sdk-go-v2/service/sso/types
+# github.com/aws/aws-sdk-go-v2/service/sts v1.1.2
+github.com/aws/aws-sdk-go-v2/service/sts
+github.com/aws/aws-sdk-go-v2/service/sts/internal/endpoints
+github.com/aws/aws-sdk-go-v2/service/sts/types
+# github.com/aws/smithy-go v1.2.0
+github.com/aws/smithy-go
+github.com/aws/smithy-go/encoding
+github.com/aws/smithy-go/encoding/httpbinding
+github.com/aws/smithy-go/encoding/xml
+github.com/aws/smithy-go/io
+github.com/aws/smithy-go/logging
+github.com/aws/smithy-go/middleware
+github.com/aws/smithy-go/ptr
+github.com/aws/smithy-go/rand
+github.com/aws/smithy-go/time
+github.com/aws/smithy-go/transport/http
+github.com/aws/smithy-go/transport/http/internal/io
+github.com/aws/smithy-go/waiter
# github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d
github.com/bgentry/go-netrc/netrc
# github.com/bgentry/speakeasy v0.1.0
@@ -224,7 +295,7 @@ github.com/exoscale/egoscale
github.com/exoscale/egoscale/v2
github.com/exoscale/egoscale/v2/api
github.com/exoscale/egoscale/v2/internal/public-api
-# github.com/exoscale/packer-plugin-exoscale v0.1.0
+# github.com/exoscale/packer-plugin-exoscale v0.1.1
## explicit
github.com/exoscale/packer-plugin-exoscale/post-processor/exoscale-import
# github.com/fatih/camelcase v1.0.0
@@ -440,7 +511,7 @@ github.com/hashicorp/packer-plugin-docker/post-processor/docker-import
github.com/hashicorp/packer-plugin-docker/post-processor/docker-push
github.com/hashicorp/packer-plugin-docker/post-processor/docker-save
github.com/hashicorp/packer-plugin-docker/post-processor/docker-tag
-# github.com/hashicorp/packer-plugin-sdk v0.1.1
+# github.com/hashicorp/packer-plugin-sdk v0.1.2
## explicit
github.com/hashicorp/packer-plugin-sdk/acctest
github.com/hashicorp/packer-plugin-sdk/acctest/provisioneracc