aboutsummaryrefslogtreecommitdiffstats
path: root/vendor/github.com/minio
diff options
context:
space:
mode:
authorLibravatar Rutger Broekhoff2023-12-29 21:31:53 +0100
committerLibravatar Rutger Broekhoff2023-12-29 21:31:53 +0100
commit404aeae4545d2426c089a5f8d5e82dae56f5212b (patch)
tree2d84e00af272b39fc04f3795ae06bc48970e57b5 /vendor/github.com/minio
parent209d8b0187ed025dec9ac149ebcced3462877bff (diff)
downloadgitolfs3-404aeae4545d2426c089a5f8d5e82dae56f5212b.tar.gz
gitolfs3-404aeae4545d2426c089a5f8d5e82dae56f5212b.zip
Make Nix builds work
Diffstat (limited to 'vendor/github.com/minio')
-rw-r--r--vendor/github.com/minio/md5-simd/LICENSE202
-rw-r--r--vendor/github.com/minio/md5-simd/LICENSE.Golang27
-rw-r--r--vendor/github.com/minio/md5-simd/README.md198
-rw-r--r--vendor/github.com/minio/md5-simd/block16_amd64.s228
-rw-r--r--vendor/github.com/minio/md5-simd/block8_amd64.s281
-rw-r--r--vendor/github.com/minio/md5-simd/block_amd64.go210
-rw-r--r--vendor/github.com/minio/md5-simd/md5-digest_amd64.go188
-rw-r--r--vendor/github.com/minio/md5-simd/md5-server_amd64.go397
-rw-r--r--vendor/github.com/minio/md5-simd/md5-server_fallback.go12
-rw-r--r--vendor/github.com/minio/md5-simd/md5-util_amd64.go85
-rw-r--r--vendor/github.com/minio/md5-simd/md5.go63
-rw-r--r--vendor/github.com/minio/md5-simd/md5block_amd64.go11
-rw-r--r--vendor/github.com/minio/md5-simd/md5block_amd64.s714
-rw-r--r--vendor/github.com/minio/minio-go/v7/.gitignore6
-rw-r--r--vendor/github.com/minio/minio-go/v7/.golangci.yml27
-rw-r--r--vendor/github.com/minio/minio-go/v7/CNAME1
-rw-r--r--vendor/github.com/minio/minio-go/v7/CONTRIBUTING.md22
-rw-r--r--vendor/github.com/minio/minio-go/v7/LICENSE202
-rw-r--r--vendor/github.com/minio/minio-go/v7/MAINTAINERS.md35
-rw-r--r--vendor/github.com/minio/minio-go/v7/Makefile38
-rw-r--r--vendor/github.com/minio/minio-go/v7/NOTICE9
-rw-r--r--vendor/github.com/minio/minio-go/v7/README.md312
-rw-r--r--vendor/github.com/minio/minio-go/v7/api-bucket-encryption.go134
-rw-r--r--vendor/github.com/minio/minio-go/v7/api-bucket-lifecycle.go169
-rw-r--r--vendor/github.com/minio/minio-go/v7/api-bucket-notification.go261
-rw-r--r--vendor/github.com/minio/minio-go/v7/api-bucket-policy.go147
-rw-r--r--vendor/github.com/minio/minio-go/v7/api-bucket-replication.go355
-rw-r--r--vendor/github.com/minio/minio-go/v7/api-bucket-tagging.go134
-rw-r--r--vendor/github.com/minio/minio-go/v7/api-bucket-versioning.go146
-rw-r--r--vendor/github.com/minio/minio-go/v7/api-compose-object.go594
-rw-r--r--vendor/github.com/minio/minio-go/v7/api-copy-object.go76
-rw-r--r--vendor/github.com/minio/minio-go/v7/api-datatypes.go254
-rw-r--r--vendor/github.com/minio/minio-go/v7/api-error-response.go284
-rw-r--r--vendor/github.com/minio/minio-go/v7/api-get-object-acl.go152
-rw-r--r--vendor/github.com/minio/minio-go/v7/api-get-object-file.go127
-rw-r--r--vendor/github.com/minio/minio-go/v7/api-get-object.go683
-rw-r--r--vendor/github.com/minio/minio-go/v7/api-get-options.go203
-rw-r--r--vendor/github.com/minio/minio-go/v7/api-list.go1057
-rw-r--r--vendor/github.com/minio/minio-go/v7/api-object-legal-hold.go176
-rw-r--r--vendor/github.com/minio/minio-go/v7/api-object-lock.go241
-rw-r--r--vendor/github.com/minio/minio-go/v7/api-object-retention.go165
-rw-r--r--vendor/github.com/minio/minio-go/v7/api-object-tagging.go177
-rw-r--r--vendor/github.com/minio/minio-go/v7/api-presigned.go228
-rw-r--r--vendor/github.com/minio/minio-go/v7/api-put-bucket.go123
-rw-r--r--vendor/github.com/minio/minio-go/v7/api-put-object-common.go149
-rw-r--r--vendor/github.com/minio/minio-go/v7/api-put-object-fan-out.go164
-rw-r--r--vendor/github.com/minio/minio-go/v7/api-put-object-file-context.go64
-rw-r--r--vendor/github.com/minio/minio-go/v7/api-put-object-multipart.go465
-rw-r--r--vendor/github.com/minio/minio-go/v7/api-put-object-streaming.go809
-rw-r--r--vendor/github.com/minio/minio-go/v7/api-put-object.go473
-rw-r--r--vendor/github.com/minio/minio-go/v7/api-putobject-snowball.go246
-rw-r--r--vendor/github.com/minio/minio-go/v7/api-remove.go548
-rw-r--r--vendor/github.com/minio/minio-go/v7/api-restore.go182
-rw-r--r--vendor/github.com/minio/minio-go/v7/api-s3-datatypes.go390
-rw-r--r--vendor/github.com/minio/minio-go/v7/api-select.go757
-rw-r--r--vendor/github.com/minio/minio-go/v7/api-stat.go116
-rw-r--r--vendor/github.com/minio/minio-go/v7/api.go995
-rw-r--r--vendor/github.com/minio/minio-go/v7/bucket-cache.go256
-rw-r--r--vendor/github.com/minio/minio-go/v7/checksum.go210
-rw-r--r--vendor/github.com/minio/minio-go/v7/code_of_conduct.md80
-rw-r--r--vendor/github.com/minio/minio-go/v7/constants.go110
-rw-r--r--vendor/github.com/minio/minio-go/v7/core.go150
-rw-r--r--vendor/github.com/minio/minio-go/v7/functional_tests.go13004
-rw-r--r--vendor/github.com/minio/minio-go/v7/hook-reader.go101
-rw-r--r--vendor/github.com/minio/minio-go/v7/pkg/credentials/assume_role.go242
-rw-r--r--vendor/github.com/minio/minio-go/v7/pkg/credentials/chain.go88
-rw-r--r--vendor/github.com/minio/minio-go/v7/pkg/credentials/config.json.sample17
-rw-r--r--vendor/github.com/minio/minio-go/v7/pkg/credentials/credentials.go193
-rw-r--r--vendor/github.com/minio/minio-go/v7/pkg/credentials/credentials.json7
-rw-r--r--vendor/github.com/minio/minio-go/v7/pkg/credentials/credentials.sample15
-rw-r--r--vendor/github.com/minio/minio-go/v7/pkg/credentials/doc.go60
-rw-r--r--vendor/github.com/minio/minio-go/v7/pkg/credentials/env_aws.go71
-rw-r--r--vendor/github.com/minio/minio-go/v7/pkg/credentials/env_minio.go68
-rw-r--r--vendor/github.com/minio/minio-go/v7/pkg/credentials/error_response.go95
-rw-r--r--vendor/github.com/minio/minio-go/v7/pkg/credentials/file_aws_credentials.go157
-rw-r--r--vendor/github.com/minio/minio-go/v7/pkg/credentials/file_minio_client.go139
-rw-r--r--vendor/github.com/minio/minio-go/v7/pkg/credentials/iam_aws.go433
-rw-r--r--vendor/github.com/minio/minio-go/v7/pkg/credentials/signature_type.go77
-rw-r--r--vendor/github.com/minio/minio-go/v7/pkg/credentials/static.go67
-rw-r--r--vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_client_grants.go182
-rw-r--r--vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_custom_identity.go146
-rw-r--r--vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_ldap_identity.go189
-rw-r--r--vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_tls_identity.go211
-rw-r--r--vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_web_identity.go205
-rw-r--r--vendor/github.com/minio/minio-go/v7/pkg/encrypt/fips_disabled.go24
-rw-r--r--vendor/github.com/minio/minio-go/v7/pkg/encrypt/fips_enabled.go24
-rw-r--r--vendor/github.com/minio/minio-go/v7/pkg/encrypt/server-side.go198
-rw-r--r--vendor/github.com/minio/minio-go/v7/pkg/lifecycle/lifecycle.go491
-rw-r--r--vendor/github.com/minio/minio-go/v7/pkg/notification/info.go78
-rw-r--r--vendor/github.com/minio/minio-go/v7/pkg/notification/notification.go440
-rw-r--r--vendor/github.com/minio/minio-go/v7/pkg/replication/replication.go971
-rw-r--r--vendor/github.com/minio/minio-go/v7/pkg/s3utils/utils.go411
-rw-r--r--vendor/github.com/minio/minio-go/v7/pkg/set/stringset.go200
-rw-r--r--vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-streaming-unsigned-trailer.go224
-rw-r--r--vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-streaming.go403
-rw-r--r--vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-v2.go319
-rw-r--r--vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-v4.go351
-rw-r--r--vendor/github.com/minio/minio-go/v7/pkg/signer/utils.go62
-rw-r--r--vendor/github.com/minio/minio-go/v7/pkg/sse/sse.go66
-rw-r--r--vendor/github.com/minio/minio-go/v7/pkg/tags/tags.go413
-rw-r--r--vendor/github.com/minio/minio-go/v7/post-policy.go349
-rw-r--r--vendor/github.com/minio/minio-go/v7/retry-continous.go69
-rw-r--r--vendor/github.com/minio/minio-go/v7/retry.go148
-rw-r--r--vendor/github.com/minio/minio-go/v7/s3-endpoints.go64
-rw-r--r--vendor/github.com/minio/minio-go/v7/s3-error.go61
-rw-r--r--vendor/github.com/minio/minio-go/v7/transport.go83
-rw-r--r--vendor/github.com/minio/minio-go/v7/utils.go693
-rw-r--r--vendor/github.com/minio/sha256-simd/.gitignore1
-rw-r--r--vendor/github.com/minio/sha256-simd/LICENSE202
-rw-r--r--vendor/github.com/minio/sha256-simd/README.md137
-rw-r--r--vendor/github.com/minio/sha256-simd/cpuid_other.go50
-rw-r--r--vendor/github.com/minio/sha256-simd/sha256.go468
-rw-r--r--vendor/github.com/minio/sha256-simd/sha256blockAvx512_amd64.asm686
-rw-r--r--vendor/github.com/minio/sha256-simd/sha256blockAvx512_amd64.go501
-rw-r--r--vendor/github.com/minio/sha256-simd/sha256blockAvx512_amd64.s267
-rw-r--r--vendor/github.com/minio/sha256-simd/sha256block_amd64.go31
-rw-r--r--vendor/github.com/minio/sha256-simd/sha256block_amd64.s266
-rw-r--r--vendor/github.com/minio/sha256-simd/sha256block_arm64.go37
-rw-r--r--vendor/github.com/minio/sha256-simd/sha256block_arm64.s192
-rw-r--r--vendor/github.com/minio/sha256-simd/sha256block_other.go29
-rw-r--r--vendor/github.com/minio/sha256-simd/test-architectures.sh15
121 files changed, 40109 insertions, 0 deletions
diff --git a/vendor/github.com/minio/md5-simd/LICENSE b/vendor/github.com/minio/md5-simd/LICENSE
new file mode 100644
index 0000000..d645695
--- /dev/null
+++ b/vendor/github.com/minio/md5-simd/LICENSE
@@ -0,0 +1,202 @@
1
2 Apache License
3 Version 2.0, January 2004
4 http://www.apache.org/licenses/
5
6 TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
7
8 1. Definitions.
9
10 "License" shall mean the terms and conditions for use, reproduction,
11 and distribution as defined by Sections 1 through 9 of this document.
12
13 "Licensor" shall mean the copyright owner or entity authorized by
14 the copyright owner that is granting the License.
15
16 "Legal Entity" shall mean the union of the acting entity and all
17 other entities that control, are controlled by, or are under common
18 control with that entity. For the purposes of this definition,
19 "control" means (i) the power, direct or indirect, to cause the
20 direction or management of such entity, whether by contract or
21 otherwise, or (ii) ownership of fifty percent (50%) or more of the
22 outstanding shares, or (iii) beneficial ownership of such entity.
23
24 "You" (or "Your") shall mean an individual or Legal Entity
25 exercising permissions granted by this License.
26
27 "Source" form shall mean the preferred form for making modifications,
28 including but not limited to software source code, documentation
29 source, and configuration files.
30
31 "Object" form shall mean any form resulting from mechanical
32 transformation or translation of a Source form, including but
33 not limited to compiled object code, generated documentation,
34 and conversions to other media types.
35
36 "Work" shall mean the work of authorship, whether in Source or
37 Object form, made available under the License, as indicated by a
38 copyright notice that is included in or attached to the work
39 (an example is provided in the Appendix below).
40
41 "Derivative Works" shall mean any work, whether in Source or Object
42 form, that is based on (or derived from) the Work and for which the
43 editorial revisions, annotations, elaborations, or other modifications
44 represent, as a whole, an original work of authorship. For the purposes
45 of this License, Derivative Works shall not include works that remain
46 separable from, or merely link (or bind by name) to the interfaces of,
47 the Work and Derivative Works thereof.
48
49 "Contribution" shall mean any work of authorship, including
50 the original version of the Work and any modifications or additions
51 to that Work or Derivative Works thereof, that is intentionally
52 submitted to Licensor for inclusion in the Work by the copyright owner
53 or by an individual or Legal Entity authorized to submit on behalf of
54 the copyright owner. For the purposes of this definition, "submitted"
55 means any form of electronic, verbal, or written communication sent
56 to the Licensor or its representatives, including but not limited to
57 communication on electronic mailing lists, source code control systems,
58 and issue tracking systems that are managed by, or on behalf of, the
59 Licensor for the purpose of discussing and improving the Work, but
60 excluding communication that is conspicuously marked or otherwise
61 designated in writing by the copyright owner as "Not a Contribution."
62
63 "Contributor" shall mean Licensor and any individual or Legal Entity
64 on behalf of whom a Contribution has been received by Licensor and
65 subsequently incorporated within the Work.
66
67 2. Grant of Copyright License. Subject to the terms and conditions of
68 this License, each Contributor hereby grants to You a perpetual,
69 worldwide, non-exclusive, no-charge, royalty-free, irrevocable
70 copyright license to reproduce, prepare Derivative Works of,
71 publicly display, publicly perform, sublicense, and distribute the
72 Work and such Derivative Works in Source or Object form.
73
74 3. Grant of Patent License. Subject to the terms and conditions of
75 this License, each Contributor hereby grants to You a perpetual,
76 worldwide, non-exclusive, no-charge, royalty-free, irrevocable
77 (except as stated in this section) patent license to make, have made,
78 use, offer to sell, sell, import, and otherwise transfer the Work,
79 where such license applies only to those patent claims licensable
80 by such Contributor that are necessarily infringed by their
81 Contribution(s) alone or by combination of their Contribution(s)
82 with the Work to which such Contribution(s) was submitted. If You
83 institute patent litigation against any entity (including a
84 cross-claim or counterclaim in a lawsuit) alleging that the Work
85 or a Contribution incorporated within the Work constitutes direct
86 or contributory patent infringement, then any patent licenses
87 granted to You under this License for that Work shall terminate
88 as of the date such litigation is filed.
89
90 4. Redistribution. You may reproduce and distribute copies of the
91 Work or Derivative Works thereof in any medium, with or without
92 modifications, and in Source or Object form, provided that You
93 meet the following conditions:
94
95 (a) You must give any other recipients of the Work or
96 Derivative Works a copy of this License; and
97
98 (b) You must cause any modified files to carry prominent notices
99 stating that You changed the files; and
100
101 (c) You must retain, in the Source form of any Derivative Works
102 that You distribute, all copyright, patent, trademark, and
103 attribution notices from the Source form of the Work,
104 excluding those notices that do not pertain to any part of
105 the Derivative Works; and
106
107 (d) If the Work includes a "NOTICE" text file as part of its
108 distribution, then any Derivative Works that You distribute must
109 include a readable copy of the attribution notices contained
110 within such NOTICE file, excluding those notices that do not
111 pertain to any part of the Derivative Works, in at least one
112 of the following places: within a NOTICE text file distributed
113 as part of the Derivative Works; within the Source form or
114 documentation, if provided along with the Derivative Works; or,
115 within a display generated by the Derivative Works, if and
116 wherever such third-party notices normally appear. The contents
117 of the NOTICE file are for informational purposes only and
118 do not modify the License. You may add Your own attribution
119 notices within Derivative Works that You distribute, alongside
120 or as an addendum to the NOTICE text from the Work, provided
121 that such additional attribution notices cannot be construed
122 as modifying the License.
123
124 You may add Your own copyright statement to Your modifications and
125 may provide additional or different license terms and conditions
126 for use, reproduction, or distribution of Your modifications, or
127 for any such Derivative Works as a whole, provided Your use,
128 reproduction, and distribution of the Work otherwise complies with
129 the conditions stated in this License.
130
131 5. Submission of Contributions. Unless You explicitly state otherwise,
132 any Contribution intentionally submitted for inclusion in the Work
133 by You to the Licensor shall be under the terms and conditions of
134 this License, without any additional terms or conditions.
135 Notwithstanding the above, nothing herein shall supersede or modify
136 the terms of any separate license agreement you may have executed
137 with Licensor regarding such Contributions.
138
139 6. Trademarks. This License does not grant permission to use the trade
140 names, trademarks, service marks, or product names of the Licensor,
141 except as required for reasonable and customary use in describing the
142 origin of the Work and reproducing the content of the NOTICE file.
143
144 7. Disclaimer of Warranty. Unless required by applicable law or
145 agreed to in writing, Licensor provides the Work (and each
146 Contributor provides its Contributions) on an "AS IS" BASIS,
147 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
148 implied, including, without limitation, any warranties or conditions
149 of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
150 PARTICULAR PURPOSE. You are solely responsible for determining the
151 appropriateness of using or redistributing the Work and assume any
152 risks associated with Your exercise of permissions under this License.
153
154 8. Limitation of Liability. In no event and under no legal theory,
155 whether in tort (including negligence), contract, or otherwise,
156 unless required by applicable law (such as deliberate and grossly
157 negligent acts) or agreed to in writing, shall any Contributor be
158 liable to You for damages, including any direct, indirect, special,
159 incidental, or consequential damages of any character arising as a
160 result of this License or out of the use or inability to use the
161 Work (including but not limited to damages for loss of goodwill,
162 work stoppage, computer failure or malfunction, or any and all
163 other commercial damages or losses), even if such Contributor
164 has been advised of the possibility of such damages.
165
166 9. Accepting Warranty or Additional Liability. While redistributing
167 the Work or Derivative Works thereof, You may choose to offer,
168 and charge a fee for, acceptance of support, warranty, indemnity,
169 or other liability obligations and/or rights consistent with this
170 License. However, in accepting such obligations, You may act only
171 on Your own behalf and on Your sole responsibility, not on behalf
172 of any other Contributor, and only if You agree to indemnify,
173 defend, and hold each Contributor harmless for any liability
174 incurred by, or claims asserted against, such Contributor by reason
175 of your accepting any such warranty or additional liability.
176
177 END OF TERMS AND CONDITIONS
178
179 APPENDIX: How to apply the Apache License to your work.
180
181 To apply the Apache License to your work, attach the following
182 boilerplate notice, with the fields enclosed by brackets "[]"
183 replaced with your own identifying information. (Don't include
184 the brackets!) The text should be enclosed in the appropriate
185 comment syntax for the file format. We also recommend that a
186 file or class name and description of purpose be included on the
187 same "printed page" as the copyright notice for easier
188 identification within third-party archives.
189
190 Copyright [yyyy] [name of copyright owner]
191
192 Licensed under the Apache License, Version 2.0 (the "License");
193 you may not use this file except in compliance with the License.
194 You may obtain a copy of the License at
195
196 http://www.apache.org/licenses/LICENSE-2.0
197
198 Unless required by applicable law or agreed to in writing, software
199 distributed under the License is distributed on an "AS IS" BASIS,
200 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
201 See the License for the specific language governing permissions and
202 limitations under the License.
diff --git a/vendor/github.com/minio/md5-simd/LICENSE.Golang b/vendor/github.com/minio/md5-simd/LICENSE.Golang
new file mode 100644
index 0000000..6a66aea
--- /dev/null
+++ b/vendor/github.com/minio/md5-simd/LICENSE.Golang
@@ -0,0 +1,27 @@
1Copyright (c) 2009 The Go Authors. All rights reserved.
2
3Redistribution and use in source and binary forms, with or without
4modification, are permitted provided that the following conditions are
5met:
6
7 * Redistributions of source code must retain the above copyright
8notice, this list of conditions and the following disclaimer.
9 * Redistributions in binary form must reproduce the above
10copyright notice, this list of conditions and the following disclaimer
11in the documentation and/or other materials provided with the
12distribution.
13 * Neither the name of Google Inc. nor the names of its
14contributors may be used to endorse or promote products derived from
15this software without specific prior written permission.
16
17THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/minio/md5-simd/README.md b/vendor/github.com/minio/md5-simd/README.md
new file mode 100644
index 0000000..fa6fce1
--- /dev/null
+++ b/vendor/github.com/minio/md5-simd/README.md
@@ -0,0 +1,198 @@
1
2# md5-simd
3
4This is a SIMD accelerated MD5 package, allowing up to either 8 (AVX2) or 16 (AVX512) independent MD5 sums to be calculated on a single CPU core.
5
6It was originally based on the [md5vec](https://github.com/igneous-systems/md5vec) repository by Igneous Systems, but has been made more flexible by amongst others supporting different message sizes per lane and adding AVX512.
7
8`md5-simd` integrates a similar mechanism as described in [minio/sha256-simd](https://github.com/minio/sha256-simd#support-for-avx512) for making it easy for clients to take advantages of the parallel nature of the MD5 calculation. This will result in reduced overall CPU load.
9
10It is important to understand that `md5-simd` **does not speed up** a single threaded MD5 hash sum.
11Rather it allows multiple __independent__ MD5 sums to be computed in parallel on the same CPU core,
12thereby making more efficient usage of the computing resources.
13
14## Usage
15
16[![Documentation](https://godoc.org/github.com/minio/md5-simd?status.svg)](https://pkg.go.dev/github.com/minio/md5-simd?tab=doc)
17
18
19In order to use `md5-simd`, you must first create an `Server` which can be
20used to instantiate one or more objects for MD5 hashing.
21
22These objects conform to the regular [`hash.Hash`](https://pkg.go.dev/hash?tab=doc#Hash) interface
23and as such the normal Write/Reset/Sum functionality works as expected.
24
25As an example:
26```
27 // Create server
28 server := md5simd.NewServer()
29 defer server.Close()
30
31 // Create hashing object (conforming to hash.Hash)
32 md5Hash := server.NewHash()
33 defer md5Hash.Close()
34
35 // Write one (or more) blocks
36 md5Hash.Write(block)
37
38 // Return digest
39 digest := md5Hash.Sum([]byte{})
40```
41
42To keep performance both a [Server](https://pkg.go.dev/github.com/minio/md5-simd?tab=doc#Server)
43and individual [Hasher](https://pkg.go.dev/github.com/minio/md5-simd?tab=doc#Hasher) should
44be closed using the `Close()` function when no longer needed.
45
46A Hasher can efficiently be re-used by using [`Reset()`](https://pkg.go.dev/hash?tab=doc#Hash) functionality.
47
48In case your system does not support the instructions required it will fall back to using `crypto/md5` for hashing.
49
50## Limitations
51
52As explained above `md5-simd` does not speed up an individual MD5 hash sum computation,
53unless some hierarchical tree construct is used but this will result in different outcomes.
54Running a single hash on a server results in approximately half the throughput.
55
56Instead, it allows running multiple MD5 calculations in parallel on a single CPU core.
57This can be beneficial in e.g. multi-threaded server applications where many go-routines
58are dealing with many requests and multiple MD5 calculations can be packed/scheduled for parallel execution on a single core.
59
60This will result in a lower overall CPU usage as compared to using the standard `crypto/md5`
61functionality where each MD5 hash computation will consume a single thread (core).
62
63It is best to test and measure the overall CPU usage in a representative usage scenario in your application
64to get an overall understanding of the benefits of `md5-simd` as compared to `crypto/md5`, ideally under heavy CPU load.
65
66Also note that `md5-simd` is best meant to work with large objects,
67so if your application only hashes small objects of a few kilobytes
68you may be better of by using `crypto/md5`.
69
70## Performance
71
72For the best performance writes should be a multiple of 64 bytes, ideally a multiple of 32KB.
73To help with that a [`buffered := bufio.NewWriterSize(hasher, 32<<10)`](https://golang.org/pkg/bufio/#NewWriterSize)
74can be inserted if you are unsure of the sizes of the writes.
75Remember to [flush](https://golang.org/pkg/bufio/#Writer.Flush) `buffered` before reading the hash.
76
77A single 'server' can process 16 streams concurrently with 1 core (AVX-512) or 2 cores (AVX2).
78In situations where it is likely that more than 16 streams are fully loaded it may be beneficial
79to use multiple servers.
80
81The following chart compares the multi-core performance between `crypto/md5` vs the AVX2 vs the AVX512 code:
82
83![md5-performance-overview](chart/Multi-core-MD5-Aggregated-Hashing-Performance.png)
84
85Compared to `crypto/md5`, the AVX2 version is up to 4x faster:
86
87```
88$ benchcmp crypto-md5.txt avx2.txt
89benchmark old MB/s new MB/s speedup
90BenchmarkParallel/32KB-4 2229.22 7370.50 3.31x
91BenchmarkParallel/64KB-4 2233.61 8248.46 3.69x
92BenchmarkParallel/128KB-4 2235.43 8660.74 3.87x
93BenchmarkParallel/256KB-4 2236.39 8863.87 3.96x
94BenchmarkParallel/512KB-4 2238.05 8985.39 4.01x
95BenchmarkParallel/1MB-4 2233.56 9042.62 4.05x
96BenchmarkParallel/2MB-4 2224.11 9014.46 4.05x
97BenchmarkParallel/4MB-4 2199.78 8993.61 4.09x
98BenchmarkParallel/8MB-4 2182.48 8748.22 4.01x
99```
100
101Compared to `crypto/md5`, the AVX512 is up to 8x faster (for larger block sizes):
102
103```
104$ benchcmp crypto-md5.txt avx512.txt
105benchmark old MB/s new MB/s speedup
106BenchmarkParallel/32KB-4 2229.22 11605.78 5.21x
107BenchmarkParallel/64KB-4 2233.61 14329.65 6.42x
108BenchmarkParallel/128KB-4 2235.43 16166.39 7.23x
109BenchmarkParallel/256KB-4 2236.39 15570.09 6.96x
110BenchmarkParallel/512KB-4 2238.05 16705.83 7.46x
111BenchmarkParallel/1MB-4 2233.56 16941.95 7.59x
112BenchmarkParallel/2MB-4 2224.11 17136.01 7.70x
113BenchmarkParallel/4MB-4 2199.78 17218.61 7.83x
114BenchmarkParallel/8MB-4 2182.48 17252.88 7.91x
115```
116
117These measurements were performed on AWS EC2 instance of type `c5.xlarge` equipped with a Xeon Platinum 8124M CPU at 3.0 GHz.
118
119If only one or two inputs are available the scalar calculation method will be used for the
120optimal speed in these cases.
121
122## Operation
123
124To make operation as easy as possible there is a “Server” coordinating everything. The server keeps track of individual hash states and updates them as new data comes in. This can be visualized as follows:
125
126![server-architecture](chart/server-architecture.png)
127
128The data is sent to the server from each hash input in blocks of up to 32KB per round. In our testing we found this to be the block size that yielded the best results.
129
130Whenever there is data available the server will collect data for up to 16 hashes and process all 16 lanes in parallel. This means that if 16 hashes have data available all the lanes will be filled. However since that may not be the case, the server will fill less lanes and do a round anyway. Lanes can also be partially filled if less than 32KB of data is written.
131
132![server-lanes-example](chart/server-lanes-example.png)
133
134In this example 4 lanes are fully filled and 2 lanes are partially filled. In this case the black areas will simply be masked out from the results and ignored. This is also why calculating a single hash on a server will not result in any speedup and hash writes should be a multiple of 32KB for the best performance.
135
136For AVX512 all 16 calculations will be done on a single core, on AVX2 on 2 cores if there is data for more than 8 lanes.
137So for optimal usage there should be data available for all 16 hashes. It may be perfectly reasonable to use more than 16 concurrent hashes.
138
139
140## Design & Tech
141
142md5-simd has both an AVX2 (8-lane parallel), and an AVX512 (16-lane parallel version) algorithm to accelerate the computation with the following function definitions:
143```
144//go:noescape
145func block8(state *uint32, base uintptr, bufs *int32, cache *byte, n int)
146
147//go:noescape
148func block16(state *uint32, ptrs *int64, mask uint64, n int)
149```
150
151The AVX2 version is based on the [md5vec](https://github.com/igneous-systems/md5vec) repository and is essentially unchanged except for minor (cosmetic) changes.
152
153The AVX512 version is derived from the AVX2 version but adds some further optimizations and simplifications.
154
155### Caching in upper ZMM registers
156
157The AVX2 version passes in a `cache8` block of memory (about 0.5 KB) for temporary storage of intermediate results during `ROUND1` which are subsequently used during `ROUND2` through to `ROUND4`.
158
159Since AVX512 has double the amount of registers (32 ZMM registers as compared to 16 YMM registers), it is possible to use the upper 16 ZMM registers for keeping the intermediate states on the CPU. As such, there is no need to pass in a corresponding `cache16` into the AVX512 block function.
160
161### Direct loading using 64-bit pointers
162
163The AVX2 uses the `VPGATHERDD` instruction (for YMM) to do a parallel load of 8 lanes using (8 independent) 32-bit offets. Since there is no control over how the 8 slices that are passed into the (Golang) `blockMd5` function are laid out into memory, it is not possible to derive a "base" address and corresponding offsets (all within 32-bits) for all 8 slices.
164
165As such the AVX2 version uses an interim buffer to collect the byte slices to be hashed from all 8 inut slices and passed this buffer along with (fixed) 32-bit offsets into the assembly code.
166
167For the AVX512 version this interim buffer is not needed since the AVX512 code uses a pair of `VPGATHERQD` instructions to directly dereference 64-bit pointers (from a base register address that is initialized to zero).
168
169Note that two load (gather) instructions are needed because the AVX512 version processes 16-lanes in parallel, requiring 16 times 64-bit = 1024 bits in total to be loaded. A simple `VALIGND` and `VPORD` are subsequently used to merge the lower and upper halves together into a single ZMM register (that contains 16 lanes of 32-bit DWORDS).
170
171### Masking support
172
173Due to the fact that pointers are passed directly from the Golang slices, we need to protect against NULL pointers.
174For this a 16-bit mask is passed in the AVX512 assembly code which is used during the `VPGATHERQD` instructions to mask out lanes that could otherwise result in segment violations.
175
176### Minor optimizations
177
178The `roll` macro (three instructions on AVX2) is no longer needed for AVX512 and is replaced by a single `VPROLD` instruction.
179
180Also several logical operations from the various ROUNDS of the AVX2 version could be combined into a single instruction using ternary logic (with the `VPTERMLOGD` instruction), resulting in a further simplification and speed-up.
181
182## Low level block function performance
183
184The benchmark below shows the (single thread) maximum performance of the `block()` function for AVX2 (having 8 lanes) and AVX512 (having 16 lanes). Also the baseline single-core performance from the standard `crypto/md5` package is shown for comparison.
185
186```
187BenchmarkCryptoMd5-4 687.66 MB/s 0 B/op 0 allocs/op
188BenchmarkBlock8-4 4144.80 MB/s 0 B/op 0 allocs/op
189BenchmarkBlock16-4 8228.88 MB/s 0 B/op 0 allocs/op
190```
191
192## License
193
194`md5-simd` is released under the Apache License v2.0. You can find the complete text in the file LICENSE.
195
196## Contributing
197
198Contributions are welcome, please send PRs for any enhancements. \ No newline at end of file
diff --git a/vendor/github.com/minio/md5-simd/block16_amd64.s b/vendor/github.com/minio/md5-simd/block16_amd64.s
new file mode 100644
index 0000000..be0a43a
--- /dev/null
+++ b/vendor/github.com/minio/md5-simd/block16_amd64.s
@@ -0,0 +1,228 @@
1// Copyright (c) 2020 MinIO Inc. All rights reserved.
2// Use of this source code is governed by a license that can be
3// found in the LICENSE file.
4
5//+build !noasm,!appengine,gc
6
7// This is the AVX512 implementation of the MD5 block function (16-way parallel)
8
9#define prep(index) \
10 KMOVQ kmask, ktmp \
11 VPGATHERDD index*4(base)(ptrs*1), ktmp, mem
12
13#define ROUND1(a, b, c, d, index, const, shift) \
14 VPXORQ c, tmp, tmp \
15 VPADDD 64*const(consts), a, a \
16 VPADDD mem, a, a \
17 VPTERNLOGD $0x6C, b, d, tmp \
18 prep(index) \
19 VPADDD tmp, a, a \
20 VPROLD $shift, a, a \
21 VMOVAPD c, tmp \
22 VPADDD b, a, a
23
24#define ROUND1noload(a, b, c, d, const, shift) \
25 VPXORQ c, tmp, tmp \
26 VPADDD 64*const(consts), a, a \
27 VPADDD mem, a, a \
28 VPTERNLOGD $0x6C, b, d, tmp \
29 VPADDD tmp, a, a \
30 VPROLD $shift, a, a \
31 VMOVAPD c, tmp \
32 VPADDD b, a, a
33
34#define ROUND2(a, b, c, d, zreg, const, shift) \
35 VPADDD 64*const(consts), a, a \
36 VPADDD zreg, a, a \
37 VANDNPD c, tmp, tmp \
38 VPTERNLOGD $0xEC, b, tmp, tmp2 \
39 VMOVAPD c, tmp \
40 VPADDD tmp2, a, a \
41 VMOVAPD c, tmp2 \
42 VPROLD $shift, a, a \
43 VPADDD b, a, a
44
45#define ROUND3(a, b, c, d, zreg, const, shift) \
46 VPADDD 64*const(consts), a, a \
47 VPADDD zreg, a, a \
48 VPTERNLOGD $0x96, b, d, tmp \
49 VPADDD tmp, a, a \
50 VPROLD $shift, a, a \
51 VMOVAPD b, tmp \
52 VPADDD b, a, a
53
54#define ROUND4(a, b, c, d, zreg, const, shift) \
55 VPADDD 64*const(consts), a, a \
56 VPADDD zreg, a, a \
57 VPTERNLOGD $0x36, b, c, tmp \
58 VPADDD tmp, a, a \
59 VPROLD $shift, a, a \
60 VPXORQ c, ones, tmp \
61 VPADDD b, a, a
62
63TEXT ·block16(SB), 4, $0-40
64
65 MOVQ state+0(FP), BX
66 MOVQ base+8(FP), SI
67 MOVQ ptrs+16(FP), AX
68 KMOVQ mask+24(FP), K1
69 MOVQ n+32(FP), DX
70 MOVQ ·avx512md5consts+0(SB), DI
71
72#define a Z0
73#define b Z1
74#define c Z2
75#define d Z3
76
77#define sa Z4
78#define sb Z5
79#define sc Z6
80#define sd Z7
81
82#define tmp Z8
83#define tmp2 Z9
84#define ptrs Z10
85#define ones Z12
86#define mem Z15
87
88#define kmask K1
89#define ktmp K3
90
91// ----------------------------------------------------------
92// Registers Z16 through to Z31 are used for caching purposes
93// ----------------------------------------------------------
94
95#define dig BX
96#define count DX
97#define base SI
98#define consts DI
99
100 // load digest into state registers
101 VMOVUPD (dig), a
102 VMOVUPD 0x40(dig), b
103 VMOVUPD 0x80(dig), c
104 VMOVUPD 0xc0(dig), d
105
106 // load source pointers
107 VMOVUPD 0x00(AX), ptrs
108
109 MOVQ $-1, AX
110 VPBROADCASTQ AX, ones
111
112loop:
113 VMOVAPD a, sa
114 VMOVAPD b, sb
115 VMOVAPD c, sc
116 VMOVAPD d, sd
117
118 prep(0)
119 VMOVAPD d, tmp
120 VMOVAPD mem, Z16
121
122 ROUND1(a,b,c,d, 1,0x00, 7)
123 VMOVAPD mem, Z17
124 ROUND1(d,a,b,c, 2,0x01,12)
125 VMOVAPD mem, Z18
126 ROUND1(c,d,a,b, 3,0x02,17)
127 VMOVAPD mem, Z19
128 ROUND1(b,c,d,a, 4,0x03,22)
129 VMOVAPD mem, Z20
130 ROUND1(a,b,c,d, 5,0x04, 7)
131 VMOVAPD mem, Z21
132 ROUND1(d,a,b,c, 6,0x05,12)
133 VMOVAPD mem, Z22
134 ROUND1(c,d,a,b, 7,0x06,17)
135 VMOVAPD mem, Z23
136 ROUND1(b,c,d,a, 8,0x07,22)
137 VMOVAPD mem, Z24
138 ROUND1(a,b,c,d, 9,0x08, 7)
139 VMOVAPD mem, Z25
140 ROUND1(d,a,b,c,10,0x09,12)
141 VMOVAPD mem, Z26
142 ROUND1(c,d,a,b,11,0x0a,17)
143 VMOVAPD mem, Z27
144 ROUND1(b,c,d,a,12,0x0b,22)
145 VMOVAPD mem, Z28
146 ROUND1(a,b,c,d,13,0x0c, 7)
147 VMOVAPD mem, Z29
148 ROUND1(d,a,b,c,14,0x0d,12)
149 VMOVAPD mem, Z30
150 ROUND1(c,d,a,b,15,0x0e,17)
151 VMOVAPD mem, Z31
152
153 ROUND1noload(b,c,d,a, 0x0f,22)
154
155 VMOVAPD d, tmp
156 VMOVAPD d, tmp2
157
158 ROUND2(a,b,c,d, Z17,0x10, 5)
159 ROUND2(d,a,b,c, Z22,0x11, 9)
160 ROUND2(c,d,a,b, Z27,0x12,14)
161 ROUND2(b,c,d,a, Z16,0x13,20)
162 ROUND2(a,b,c,d, Z21,0x14, 5)
163 ROUND2(d,a,b,c, Z26,0x15, 9)
164 ROUND2(c,d,a,b, Z31,0x16,14)
165 ROUND2(b,c,d,a, Z20,0x17,20)
166 ROUND2(a,b,c,d, Z25,0x18, 5)
167 ROUND2(d,a,b,c, Z30,0x19, 9)
168 ROUND2(c,d,a,b, Z19,0x1a,14)
169 ROUND2(b,c,d,a, Z24,0x1b,20)
170 ROUND2(a,b,c,d, Z29,0x1c, 5)
171 ROUND2(d,a,b,c, Z18,0x1d, 9)
172 ROUND2(c,d,a,b, Z23,0x1e,14)
173 ROUND2(b,c,d,a, Z28,0x1f,20)
174
175 VMOVAPD c, tmp
176
177 ROUND3(a,b,c,d, Z21,0x20, 4)
178 ROUND3(d,a,b,c, Z24,0x21,11)
179 ROUND3(c,d,a,b, Z27,0x22,16)
180 ROUND3(b,c,d,a, Z30,0x23,23)
181 ROUND3(a,b,c,d, Z17,0x24, 4)
182 ROUND3(d,a,b,c, Z20,0x25,11)
183 ROUND3(c,d,a,b, Z23,0x26,16)
184 ROUND3(b,c,d,a, Z26,0x27,23)
185 ROUND3(a,b,c,d, Z29,0x28, 4)
186 ROUND3(d,a,b,c, Z16,0x29,11)
187 ROUND3(c,d,a,b, Z19,0x2a,16)
188 ROUND3(b,c,d,a, Z22,0x2b,23)
189 ROUND3(a,b,c,d, Z25,0x2c, 4)
190 ROUND3(d,a,b,c, Z28,0x2d,11)
191 ROUND3(c,d,a,b, Z31,0x2e,16)
192 ROUND3(b,c,d,a, Z18,0x2f,23)
193
194 VPXORQ d, ones, tmp
195
196 ROUND4(a,b,c,d, Z16,0x30, 6)
197 ROUND4(d,a,b,c, Z23,0x31,10)
198 ROUND4(c,d,a,b, Z30,0x32,15)
199 ROUND4(b,c,d,a, Z21,0x33,21)
200 ROUND4(a,b,c,d, Z28,0x34, 6)
201 ROUND4(d,a,b,c, Z19,0x35,10)
202 ROUND4(c,d,a,b, Z26,0x36,15)
203 ROUND4(b,c,d,a, Z17,0x37,21)
204 ROUND4(a,b,c,d, Z24,0x38, 6)
205 ROUND4(d,a,b,c, Z31,0x39,10)
206 ROUND4(c,d,a,b, Z22,0x3a,15)
207 ROUND4(b,c,d,a, Z29,0x3b,21)
208 ROUND4(a,b,c,d, Z20,0x3c, 6)
209 ROUND4(d,a,b,c, Z27,0x3d,10)
210 ROUND4(c,d,a,b, Z18,0x3e,15)
211 ROUND4(b,c,d,a, Z25,0x3f,21)
212
213 VPADDD sa, a, a
214 VPADDD sb, b, b
215 VPADDD sc, c, c
216 VPADDD sd, d, d
217
218 LEAQ 64(base), base
219 SUBQ $64, count
220 JNE loop
221
222 VMOVUPD a, (dig)
223 VMOVUPD b, 0x40(dig)
224 VMOVUPD c, 0x80(dig)
225 VMOVUPD d, 0xc0(dig)
226
227 VZEROUPPER
228 RET
diff --git a/vendor/github.com/minio/md5-simd/block8_amd64.s b/vendor/github.com/minio/md5-simd/block8_amd64.s
new file mode 100644
index 0000000..f57db17
--- /dev/null
+++ b/vendor/github.com/minio/md5-simd/block8_amd64.s
@@ -0,0 +1,281 @@
1//+build !noasm,!appengine,gc
2
3// Copyright (c) 2018 Igneous Systems
4// MIT License
5//
6// Permission is hereby granted, free of charge, to any person obtaining a copy
7// of this software and associated documentation files (the "Software"), to deal
8// in the Software without restriction, including without limitation the rights
9// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10// copies of the Software, and to permit persons to whom the Software is
11// furnished to do so, subject to the following conditions:
12//
13// The above copyright notice and this permission notice shall be included in all
14// copies or substantial portions of the Software.
15//
16// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22// SOFTWARE.
23
24// Copyright (c) 2020 MinIO Inc. All rights reserved.
25// Use of this source code is governed by a license that can be
26// found in the LICENSE file.
27
28// This is the AVX2 implementation of the MD5 block function (8-way parallel)
29
30// block8(state *uint64, base uintptr, bufs *int32, cache *byte, n int)
31TEXT ·block8(SB), 4, $0-40
32 MOVQ state+0(FP), BX
33 MOVQ base+8(FP), SI
34 MOVQ bufs+16(FP), AX
35 MOVQ cache+24(FP), CX
36 MOVQ n+32(FP), DX
37 MOVQ ·avx256md5consts+0(SB), DI
38
39 // Align cache (which is stack allocated by the compiler)
40 // to a 256 bit boundary (ymm register alignment)
41 // The cache8 type is deliberately oversized to permit this.
42 ADDQ $31, CX
43 ANDB $-32, CL
44
45#define a Y0
46#define b Y1
47#define c Y2
48#define d Y3
49
50#define sa Y4
51#define sb Y5
52#define sc Y6
53#define sd Y7
54
55#define tmp Y8
56#define tmp2 Y9
57
58#define mask Y10
59#define off Y11
60
61#define ones Y12
62
63#define rtmp1 Y13
64#define rtmp2 Y14
65
66#define mem Y15
67
68#define dig BX
69#define cache CX
70#define count DX
71#define base SI
72#define consts DI
73
74#define prepmask \
75 VPXOR mask, mask, mask \
76 VPCMPGTD mask, off, mask
77
78#define prep(index) \
79 VMOVAPD mask, rtmp2 \
80 VPGATHERDD rtmp2, index*4(base)(off*1), mem
81
82#define load(index) \
83 VMOVAPD index*32(cache), mem
84
85#define store(index) \
86 VMOVAPD mem, index*32(cache)
87
88#define roll(shift, a) \
89 VPSLLD $shift, a, rtmp1 \
90 VPSRLD $32-shift, a, a \
91 VPOR rtmp1, a, a
92
93#define ROUND1(a, b, c, d, index, const, shift) \
94 VPXOR c, tmp, tmp \
95 VPADDD 32*const(consts), a, a \
96 VPADDD mem, a, a \
97 VPAND b, tmp, tmp \
98 VPXOR d, tmp, tmp \
99 prep(index) \
100 VPADDD tmp, a, a \
101 roll(shift,a) \
102 VMOVAPD c, tmp \
103 VPADDD b, a, a
104
105#define ROUND1load(a, b, c, d, index, const, shift) \
106 VXORPD c, tmp, tmp \
107 VPADDD 32*const(consts), a, a \
108 VPADDD mem, a, a \
109 VPAND b, tmp, tmp \
110 VPXOR d, tmp, tmp \
111 load(index) \
112 VPADDD tmp, a, a \
113 roll(shift,a) \
114 VMOVAPD c, tmp \
115 VPADDD b, a, a
116
117#define ROUND2(a, b, c, d, index, const, shift) \
118 VPADDD 32*const(consts), a, a \
119 VPADDD mem, a, a \
120 VPAND b, tmp2, tmp2 \
121 VANDNPD c, tmp, tmp \
122 load(index) \
123 VPOR tmp, tmp2, tmp2 \
124 VMOVAPD c, tmp \
125 VPADDD tmp2, a, a \
126 VMOVAPD c, tmp2 \
127 roll(shift,a) \
128 VPADDD b, a, a
129
130#define ROUND3(a, b, c, d, index, const, shift) \
131 VPADDD 32*const(consts), a, a \
132 VPADDD mem, a, a \
133 load(index) \
134 VPXOR d, tmp, tmp \
135 VPXOR b, tmp, tmp \
136 VPADDD tmp, a, a \
137 roll(shift,a) \
138 VMOVAPD b, tmp \
139 VPADDD b, a, a
140
141#define ROUND4(a, b, c, d, index, const, shift) \
142 VPADDD 32*const(consts), a, a \
143 VPADDD mem, a, a \
144 VPOR b, tmp, tmp \
145 VPXOR c, tmp, tmp \
146 VPADDD tmp, a, a \
147 load(index) \
148 roll(shift,a) \
149 VPXOR c, ones, tmp \
150 VPADDD b, a, a
151
152 // load digest into state registers
153 VMOVUPD (dig), a
154 VMOVUPD 32(dig), b
155 VMOVUPD 64(dig), c
156 VMOVUPD 96(dig), d
157
158 // load source buffer offsets
159 VMOVUPD (AX), off
160
161 prepmask
162 VPCMPEQD ones, ones, ones
163
164loop:
165 VMOVAPD a, sa
166 VMOVAPD b, sb
167 VMOVAPD c, sc
168 VMOVAPD d, sd
169
170 prep(0)
171 VMOVAPD d, tmp
172 store(0)
173
174 ROUND1(a,b,c,d, 1,0x00, 7)
175 store(1)
176 ROUND1(d,a,b,c, 2,0x01,12)
177 store(2)
178 ROUND1(c,d,a,b, 3,0x02,17)
179 store(3)
180 ROUND1(b,c,d,a, 4,0x03,22)
181 store(4)
182 ROUND1(a,b,c,d, 5,0x04, 7)
183 store(5)
184 ROUND1(d,a,b,c, 6,0x05,12)
185 store(6)
186 ROUND1(c,d,a,b, 7,0x06,17)
187 store(7)
188 ROUND1(b,c,d,a, 8,0x07,22)
189 store(8)
190 ROUND1(a,b,c,d, 9,0x08, 7)
191 store(9)
192 ROUND1(d,a,b,c,10,0x09,12)
193 store(10)
194 ROUND1(c,d,a,b,11,0x0a,17)
195 store(11)
196 ROUND1(b,c,d,a,12,0x0b,22)
197 store(12)
198 ROUND1(a,b,c,d,13,0x0c, 7)
199 store(13)
200 ROUND1(d,a,b,c,14,0x0d,12)
201 store(14)
202 ROUND1(c,d,a,b,15,0x0e,17)
203 store(15)
204 ROUND1load(b,c,d,a, 1,0x0f,22)
205
206 VMOVAPD d, tmp
207 VMOVAPD d, tmp2
208
209 ROUND2(a,b,c,d, 6,0x10, 5)
210 ROUND2(d,a,b,c,11,0x11, 9)
211 ROUND2(c,d,a,b, 0,0x12,14)
212 ROUND2(b,c,d,a, 5,0x13,20)
213 ROUND2(a,b,c,d,10,0x14, 5)
214 ROUND2(d,a,b,c,15,0x15, 9)
215 ROUND2(c,d,a,b, 4,0x16,14)
216 ROUND2(b,c,d,a, 9,0x17,20)
217 ROUND2(a,b,c,d,14,0x18, 5)
218 ROUND2(d,a,b,c, 3,0x19, 9)
219 ROUND2(c,d,a,b, 8,0x1a,14)
220 ROUND2(b,c,d,a,13,0x1b,20)
221 ROUND2(a,b,c,d, 2,0x1c, 5)
222 ROUND2(d,a,b,c, 7,0x1d, 9)
223 ROUND2(c,d,a,b,12,0x1e,14)
224 ROUND2(b,c,d,a, 0,0x1f,20)
225
226 load(5)
227 VMOVAPD c, tmp
228
229 ROUND3(a,b,c,d, 8,0x20, 4)
230 ROUND3(d,a,b,c,11,0x21,11)
231 ROUND3(c,d,a,b,14,0x22,16)
232 ROUND3(b,c,d,a, 1,0x23,23)
233 ROUND3(a,b,c,d, 4,0x24, 4)
234 ROUND3(d,a,b,c, 7,0x25,11)
235 ROUND3(c,d,a,b,10,0x26,16)
236 ROUND3(b,c,d,a,13,0x27,23)
237 ROUND3(a,b,c,d, 0,0x28, 4)
238 ROUND3(d,a,b,c, 3,0x29,11)
239 ROUND3(c,d,a,b, 6,0x2a,16)
240 ROUND3(b,c,d,a, 9,0x2b,23)
241 ROUND3(a,b,c,d,12,0x2c, 4)
242 ROUND3(d,a,b,c,15,0x2d,11)
243 ROUND3(c,d,a,b, 2,0x2e,16)
244 ROUND3(b,c,d,a, 0,0x2f,23)
245
246 load(0)
247 VPXOR d, ones, tmp
248
249 ROUND4(a,b,c,d, 7,0x30, 6)
250 ROUND4(d,a,b,c,14,0x31,10)
251 ROUND4(c,d,a,b, 5,0x32,15)
252 ROUND4(b,c,d,a,12,0x33,21)
253 ROUND4(a,b,c,d, 3,0x34, 6)
254 ROUND4(d,a,b,c,10,0x35,10)
255 ROUND4(c,d,a,b, 1,0x36,15)
256 ROUND4(b,c,d,a, 8,0x37,21)
257 ROUND4(a,b,c,d,15,0x38, 6)
258 ROUND4(d,a,b,c, 6,0x39,10)
259 ROUND4(c,d,a,b,13,0x3a,15)
260 ROUND4(b,c,d,a, 4,0x3b,21)
261 ROUND4(a,b,c,d,11,0x3c, 6)
262 ROUND4(d,a,b,c, 2,0x3d,10)
263 ROUND4(c,d,a,b, 9,0x3e,15)
264 ROUND4(b,c,d,a, 0,0x3f,21)
265
266 VPADDD sa, a, a
267 VPADDD sb, b, b
268 VPADDD sc, c, c
269 VPADDD sd, d, d
270
271 LEAQ 64(base), base
272 SUBQ $64, count
273 JNE loop
274
275 VMOVUPD a, (dig)
276 VMOVUPD b, 32(dig)
277 VMOVUPD c, 64(dig)
278 VMOVUPD d, 96(dig)
279
280 VZEROUPPER
281 RET
diff --git a/vendor/github.com/minio/md5-simd/block_amd64.go b/vendor/github.com/minio/md5-simd/block_amd64.go
new file mode 100644
index 0000000..16edda2
--- /dev/null
+++ b/vendor/github.com/minio/md5-simd/block_amd64.go
@@ -0,0 +1,210 @@
1//+build !noasm,!appengine,gc
2
3// Copyright (c) 2020 MinIO Inc. All rights reserved.
4// Use of this source code is governed by a license that can be
5// found in the LICENSE file.
6
7package md5simd
8
9import (
10 "fmt"
11 "math"
12 "unsafe"
13
14 "github.com/klauspost/cpuid/v2"
15)
16
17var hasAVX512 bool
18
19func init() {
20 // VANDNPD requires AVX512DQ. Technically it could be VPTERNLOGQ which is AVX512F.
21 hasAVX512 = cpuid.CPU.Supports(cpuid.AVX512F, cpuid.AVX512DQ)
22}
23
24//go:noescape
25func block8(state *uint32, base uintptr, bufs *int32, cache *byte, n int)
26
27//go:noescape
28func block16(state *uint32, base uintptr, ptrs *int32, mask uint64, n int)
29
30// 8-way 4x uint32 digests in 4 ymm registers
31// (ymm0, ymm1, ymm2, ymm3)
32type digest8 struct {
33 v0, v1, v2, v3 [8]uint32
34}
35
36// Stack cache for 8x64 byte md5.BlockSize bytes.
37// Must be 32-byte aligned, so allocate 512+32 and
38// align upwards at runtime.
39type cache8 [512 + 32]byte
40
41// MD5 magic numbers for one lane of hashing; inflated
42// 8x below at init time.
43var md5consts = [64]uint32{
44 0xd76aa478, 0xe8c7b756, 0x242070db, 0xc1bdceee,
45 0xf57c0faf, 0x4787c62a, 0xa8304613, 0xfd469501,
46 0x698098d8, 0x8b44f7af, 0xffff5bb1, 0x895cd7be,
47 0x6b901122, 0xfd987193, 0xa679438e, 0x49b40821,
48 0xf61e2562, 0xc040b340, 0x265e5a51, 0xe9b6c7aa,
49 0xd62f105d, 0x02441453, 0xd8a1e681, 0xe7d3fbc8,
50 0x21e1cde6, 0xc33707d6, 0xf4d50d87, 0x455a14ed,
51 0xa9e3e905, 0xfcefa3f8, 0x676f02d9, 0x8d2a4c8a,
52 0xfffa3942, 0x8771f681, 0x6d9d6122, 0xfde5380c,
53 0xa4beea44, 0x4bdecfa9, 0xf6bb4b60, 0xbebfbc70,
54 0x289b7ec6, 0xeaa127fa, 0xd4ef3085, 0x04881d05,
55 0xd9d4d039, 0xe6db99e5, 0x1fa27cf8, 0xc4ac5665,
56 0xf4292244, 0x432aff97, 0xab9423a7, 0xfc93a039,
57 0x655b59c3, 0x8f0ccc92, 0xffeff47d, 0x85845dd1,
58 0x6fa87e4f, 0xfe2ce6e0, 0xa3014314, 0x4e0811a1,
59 0xf7537e82, 0xbd3af235, 0x2ad7d2bb, 0xeb86d391,
60}
61
62// inflate the consts 8-way for 8x md5 (256 bit ymm registers)
63var avx256md5consts = func(c []uint32) []uint32 {
64 inf := make([]uint32, 8*len(c))
65 for i := range c {
66 for j := 0; j < 8; j++ {
67 inf[(i*8)+j] = c[i]
68 }
69 }
70 return inf
71}(md5consts[:])
72
73// 16-way 4x uint32 digests in 4 zmm registers
74type digest16 struct {
75 v0, v1, v2, v3 [16]uint32
76}
77
78// inflate the consts 16-way for 16x md5 (512 bit zmm registers)
79var avx512md5consts = func(c []uint32) []uint32 {
80 inf := make([]uint32, 16*len(c))
81 for i := range c {
82 for j := 0; j < 16; j++ {
83 inf[(i*16)+j] = c[i]
84 }
85 }
86 return inf
87}(md5consts[:])
88
89// Interface function to assembly code
90func (s *md5Server) blockMd5_x16(d *digest16, input [16][]byte, half bool) {
91 if hasAVX512 {
92 blockMd5_avx512(d, input, s.allBufs, &s.maskRounds16)
93 return
94 }
95
96 // Preparing data using copy is slower since copies aren't inlined.
97
98 // Calculate on this goroutine
99 if half {
100 for i := range s.i8[0][:] {
101 s.i8[0][i] = input[i]
102 }
103 for i := range s.d8a.v0[:] {
104 s.d8a.v0[i], s.d8a.v1[i], s.d8a.v2[i], s.d8a.v3[i] = d.v0[i], d.v1[i], d.v2[i], d.v3[i]
105 }
106 blockMd5_avx2(&s.d8a, s.i8[0], s.allBufs, &s.maskRounds8a)
107 for i := range s.d8a.v0[:] {
108 d.v0[i], d.v1[i], d.v2[i], d.v3[i] = s.d8a.v0[i], s.d8a.v1[i], s.d8a.v2[i], s.d8a.v3[i]
109 }
110 return
111 }
112
113 for i := range s.i8[0][:] {
114 s.i8[0][i], s.i8[1][i] = input[i], input[8+i]
115 }
116
117 for i := range s.d8a.v0[:] {
118 j := (i + 8) & 15
119 s.d8a.v0[i], s.d8a.v1[i], s.d8a.v2[i], s.d8a.v3[i] = d.v0[i], d.v1[i], d.v2[i], d.v3[i]
120 s.d8b.v0[i], s.d8b.v1[i], s.d8b.v2[i], s.d8b.v3[i] = d.v0[j], d.v1[j], d.v2[j], d.v3[j]
121 }
122
123 // Benchmarks appears to be slightly faster when spinning up 2 goroutines instead
124 // of using the current for one of the blocks.
125 s.wg.Add(2)
126 go func() { blockMd5_avx2(&s.d8a, s.i8[0], s.allBufs, &s.maskRounds8a); s.wg.Done() }()
127 go func() { blockMd5_avx2(&s.d8b, s.i8[1], s.allBufs, &s.maskRounds8b); s.wg.Done() }()
128 s.wg.Wait()
129 for i := range s.d8a.v0[:] {
130 d.v0[i], d.v1[i], d.v2[i], d.v3[i] = s.d8a.v0[i], s.d8a.v1[i], s.d8a.v2[i], s.d8a.v3[i]
131 }
132 for i := range s.d8b.v0[:] {
133 j := (i + 8) & 15
134 d.v0[j], d.v1[j], d.v2[j], d.v3[j] = s.d8b.v0[i], s.d8b.v1[i], s.d8b.v2[i], s.d8b.v3[i]
135 }
136}
137
138// Interface function to AVX512 assembly code
139func blockMd5_avx512(s *digest16, input [16][]byte, base []byte, maskRounds *[16]maskRounds) {
140 baseMin := uint64(uintptr(unsafe.Pointer(&(base[0]))))
141 ptrs := [16]int32{}
142
143 for i := range ptrs {
144 if len(input[i]) > 0 {
145 if len(input[i]) > internalBlockSize {
146 panic(fmt.Sprintf("Sanity check fails for lane %d: maximum input length cannot exceed internalBlockSize", i))
147 }
148
149 off := uint64(uintptr(unsafe.Pointer(&(input[i][0])))) - baseMin
150 if off > math.MaxUint32 {
151 panic(fmt.Sprintf("invalid buffer sent with offset %x", off))
152 }
153 ptrs[i] = int32(off)
154 }
155 }
156
157 sdup := *s // create copy of initial states to receive intermediate updates
158
159 rounds := generateMaskAndRounds16(input, maskRounds)
160
161 for r := 0; r < rounds; r++ {
162 m := maskRounds[r]
163
164 block16(&sdup.v0[0], uintptr(baseMin), &ptrs[0], m.mask, int(64*m.rounds))
165
166 for j := 0; j < len(ptrs); j++ {
167 ptrs[j] += int32(64 * m.rounds) // update pointers for next round
168 if m.mask&(1<<j) != 0 { // update digest if still masked as active
169 (*s).v0[j], (*s).v1[j], (*s).v2[j], (*s).v3[j] = sdup.v0[j], sdup.v1[j], sdup.v2[j], sdup.v3[j]
170 }
171 }
172 }
173}
174
175// Interface function to AVX2 assembly code
176func blockMd5_avx2(s *digest8, input [8][]byte, base []byte, maskRounds *[8]maskRounds) {
177 baseMin := uint64(uintptr(unsafe.Pointer(&(base[0])))) - 4
178 ptrs := [8]int32{}
179
180 for i := range ptrs {
181 if len(input[i]) > 0 {
182 if len(input[i]) > internalBlockSize {
183 panic(fmt.Sprintf("Sanity check fails for lane %d: maximum input length cannot exceed internalBlockSize", i))
184 }
185
186 off := uint64(uintptr(unsafe.Pointer(&(input[i][0])))) - baseMin
187 if off > math.MaxUint32 {
188 panic(fmt.Sprintf("invalid buffer sent with offset %x", off))
189 }
190 ptrs[i] = int32(off)
191 }
192 }
193
194 sdup := *s // create copy of initial states to receive intermediate updates
195
196 rounds := generateMaskAndRounds8(input, maskRounds)
197
198 for r := 0; r < rounds; r++ {
199 m := maskRounds[r]
200 var cache cache8 // stack storage for block8 tmp state
201 block8(&sdup.v0[0], uintptr(baseMin), &ptrs[0], &cache[0], int(64*m.rounds))
202
203 for j := 0; j < len(ptrs); j++ {
204 ptrs[j] += int32(64 * m.rounds) // update pointers for next round
205 if m.mask&(1<<j) != 0 { // update digest if still masked as active
206 (*s).v0[j], (*s).v1[j], (*s).v2[j], (*s).v3[j] = sdup.v0[j], sdup.v1[j], sdup.v2[j], sdup.v3[j]
207 }
208 }
209 }
210}
diff --git a/vendor/github.com/minio/md5-simd/md5-digest_amd64.go b/vendor/github.com/minio/md5-simd/md5-digest_amd64.go
new file mode 100644
index 0000000..5ea23a4
--- /dev/null
+++ b/vendor/github.com/minio/md5-simd/md5-digest_amd64.go
@@ -0,0 +1,188 @@
1//+build !noasm,!appengine,gc
2
3// Copyright (c) 2020 MinIO Inc. All rights reserved.
4// Use of this source code is governed by a license that can be
5// found in the LICENSE file.
6
7package md5simd
8
9import (
10 "encoding/binary"
11 "errors"
12 "fmt"
13 "sync"
14 "sync/atomic"
15)
16
17// md5Digest - Type for computing MD5 using either AVX2 or AVX512
18type md5Digest struct {
19 uid uint64
20 blocksCh chan blockInput
21 cycleServer chan uint64
22 x [BlockSize]byte
23 nx int
24 len uint64
25 buffers <-chan []byte
26}
27
28// NewHash - initialize instance for Md5 implementation.
29func (s *md5Server) NewHash() Hasher {
30 uid := atomic.AddUint64(&s.uidCounter, 1)
31 blockCh := make(chan blockInput, buffersPerLane)
32 s.newInput <- newClient{
33 uid: uid,
34 input: blockCh,
35 }
36 return &md5Digest{
37 uid: uid,
38 buffers: s.buffers,
39 blocksCh: blockCh,
40 cycleServer: s.cycle,
41 }
42}
43
44// Size - Return size of checksum
45func (d *md5Digest) Size() int { return Size }
46
47// BlockSize - Return blocksize of checksum
48func (d md5Digest) BlockSize() int { return BlockSize }
49
50func (d *md5Digest) Reset() {
51 if d.blocksCh == nil {
52 panic("reset after close")
53 }
54 d.nx = 0
55 d.len = 0
56 d.sendBlock(blockInput{uid: d.uid, reset: true}, false)
57}
58
59// write to digest
60func (d *md5Digest) Write(p []byte) (nn int, err error) {
61 if d.blocksCh == nil {
62 return 0, errors.New("md5Digest closed")
63 }
64
65 // break input into chunks of maximum internalBlockSize size
66 for {
67 l := len(p)
68 if l > internalBlockSize {
69 l = internalBlockSize
70 }
71 nnn, err := d.write(p[:l])
72 if err != nil {
73 return nn, err
74 }
75 nn += nnn
76 p = p[l:]
77
78 if len(p) == 0 {
79 break
80 }
81
82 }
83 return
84}
85
86func (d *md5Digest) write(p []byte) (nn int, err error) {
87
88 nn = len(p)
89 d.len += uint64(nn)
90 if d.nx > 0 {
91 n := copy(d.x[d.nx:], p)
92 d.nx += n
93 if d.nx == BlockSize {
94 // Create a copy of the overflow buffer in order to send it async over the channel
95 // (since we will modify the overflow buffer down below with any access beyond multiples of 64)
96 tmp := <-d.buffers
97 tmp = tmp[:BlockSize]
98 copy(tmp, d.x[:])
99 d.sendBlock(blockInput{uid: d.uid, msg: tmp}, len(p)-n < BlockSize)
100 d.nx = 0
101 }
102 p = p[n:]
103 }
104 if len(p) >= BlockSize {
105 n := len(p) &^ (BlockSize - 1)
106 buf := <-d.buffers
107 buf = buf[:n]
108 copy(buf, p)
109 d.sendBlock(blockInput{uid: d.uid, msg: buf}, len(p)-n < BlockSize)
110 p = p[n:]
111 }
112 if len(p) > 0 {
113 d.nx = copy(d.x[:], p)
114 }
115 return
116}
117
118func (d *md5Digest) Close() {
119 if d.blocksCh != nil {
120 close(d.blocksCh)
121 d.blocksCh = nil
122 }
123}
124
125var sumChPool sync.Pool
126
127func init() {
128 sumChPool.New = func() interface{} {
129 return make(chan sumResult, 1)
130 }
131}
132
133// Sum - Return MD5 sum in bytes
134func (d *md5Digest) Sum(in []byte) (result []byte) {
135 if d.blocksCh == nil {
136 panic("sum after close")
137 }
138
139 trail := <-d.buffers
140 trail = append(trail[:0], d.x[:d.nx]...)
141
142 length := d.len
143 // Padding. Add a 1 bit and 0 bits until 56 bytes mod 64.
144 var tmp [64]byte
145 tmp[0] = 0x80
146 if length%64 < 56 {
147 trail = append(trail, tmp[0:56-length%64]...)
148 } else {
149 trail = append(trail, tmp[0:64+56-length%64]...)
150 }
151
152 // Length in bits.
153 length <<= 3
154 binary.LittleEndian.PutUint64(tmp[:], length) // append length in bits
155
156 trail = append(trail, tmp[0:8]...)
157 if len(trail)%BlockSize != 0 {
158 panic(fmt.Errorf("internal error: sum block was not aligned. len=%d, nx=%d", len(trail), d.nx))
159 }
160 sumCh := sumChPool.Get().(chan sumResult)
161 d.sendBlock(blockInput{uid: d.uid, msg: trail, sumCh: sumCh}, true)
162
163 sum := <-sumCh
164 sumChPool.Put(sumCh)
165
166 return append(in, sum.digest[:]...)
167}
168
169// sendBlock will send a block for processing.
170// If cycle is true we will block on cycle, otherwise we will only block
171// if the block channel is full.
172func (d *md5Digest) sendBlock(bi blockInput, cycle bool) {
173 if cycle {
174 select {
175 case d.blocksCh <- bi:
176 d.cycleServer <- d.uid
177 }
178 return
179 }
180 // Only block on cycle if we filled the buffer
181 select {
182 case d.blocksCh <- bi:
183 return
184 default:
185 d.cycleServer <- d.uid
186 d.blocksCh <- bi
187 }
188}
diff --git a/vendor/github.com/minio/md5-simd/md5-server_amd64.go b/vendor/github.com/minio/md5-simd/md5-server_amd64.go
new file mode 100644
index 0000000..94f741c
--- /dev/null
+++ b/vendor/github.com/minio/md5-simd/md5-server_amd64.go
@@ -0,0 +1,397 @@
1//+build !noasm,!appengine,gc
2
3// Copyright (c) 2020 MinIO Inc. All rights reserved.
4// Use of this source code is governed by a license that can be
5// found in the LICENSE file.
6
7package md5simd
8
9import (
10 "encoding/binary"
11 "fmt"
12 "runtime"
13 "sync"
14
15 "github.com/klauspost/cpuid/v2"
16)
17
18// MD5 initialization constants
19const (
20 // Lanes is the number of concurrently calculated hashes.
21 Lanes = 16
22
23 init0 = 0x67452301
24 init1 = 0xefcdab89
25 init2 = 0x98badcfe
26 init3 = 0x10325476
27
28 // Use scalar routine when below this many lanes
29 useScalarBelow = 3
30)
31
32// md5ServerUID - Does not start at 0 but next multiple of 16 so as to be able to
33// differentiate with default initialisation value of 0
34const md5ServerUID = Lanes
35
36const buffersPerLane = 3
37
38// Message to send across input channel
39type blockInput struct {
40 uid uint64
41 msg []byte
42 sumCh chan sumResult
43 reset bool
44}
45
46type sumResult struct {
47 digest [Size]byte
48}
49
50type lanesInfo [Lanes]blockInput
51
52// md5Server - Type to implement parallel handling of MD5 invocations
53type md5Server struct {
54 uidCounter uint64
55 cycle chan uint64 // client with uid has update.
56 newInput chan newClient // Add new client.
57 digests map[uint64][Size]byte // Map of uids to (interim) digest results
58 maskRounds16 [16]maskRounds // Pre-allocated static array for max 16 rounds
59 maskRounds8a [8]maskRounds // Pre-allocated static array for max 8 rounds (1st AVX2 core)
60 maskRounds8b [8]maskRounds // Pre-allocated static array for max 8 rounds (2nd AVX2 core)
61 allBufs []byte // Preallocated buffer.
62 buffers chan []byte // Preallocated buffers, sliced from allBufs.
63
64 i8 [2][8][]byte // avx2 temporary vars
65 d8a, d8b digest8
66 wg sync.WaitGroup
67}
68
69// NewServer - Create new object for parallel processing handling
70func NewServer() Server {
71 if !cpuid.CPU.Supports(cpuid.AVX2) {
72 return &fallbackServer{}
73 }
74 md5srv := &md5Server{}
75 md5srv.digests = make(map[uint64][Size]byte)
76 md5srv.newInput = make(chan newClient, Lanes)
77 md5srv.cycle = make(chan uint64, Lanes*10)
78 md5srv.uidCounter = md5ServerUID - 1
79 md5srv.allBufs = make([]byte, 32+buffersPerLane*Lanes*internalBlockSize)
80 md5srv.buffers = make(chan []byte, buffersPerLane*Lanes)
81 // Fill buffers.
82 for i := 0; i < buffersPerLane*Lanes; i++ {
83 s := 32 + i*internalBlockSize
84 md5srv.buffers <- md5srv.allBufs[s : s+internalBlockSize : s+internalBlockSize]
85 }
86
87 // Start a single thread for reading from the input channel
88 go md5srv.process(md5srv.newInput)
89 return md5srv
90}
91
92type newClient struct {
93 uid uint64
94 input chan blockInput
95}
96
97// process - Sole handler for reading from the input channel.
98func (s *md5Server) process(newClients chan newClient) {
99 // To fill up as many lanes as possible:
100 //
101 // 1. Wait for a cycle id.
102 // 2. If not already in a lane, add, otherwise leave on channel
103 // 3. Start timer
104 // 4. Check if lanes is full, if so, goto 10 (process).
105 // 5. If timeout, goto 10.
106 // 6. Wait for new id (goto 2) or timeout (goto 10).
107 // 10. Process.
108 // 11. Check all input if there is already input, if so add to lanes.
109 // 12. Goto 1
110
111 // lanes contains the lanes.
112 var lanes lanesInfo
113 // lanesFilled contains the number of filled lanes for current cycle.
114 var lanesFilled int
115 // clients contains active clients
116 var clients = make(map[uint64]chan blockInput, Lanes)
117
118 addToLane := func(uid uint64) {
119 cl, ok := clients[uid]
120 if !ok {
121 // Unknown client. Maybe it was already removed.
122 return
123 }
124 // Check if we already have it.
125 for _, lane := range lanes[:lanesFilled] {
126 if lane.uid == uid {
127 return
128 }
129 }
130 // Continue until we get a block or there is nothing on channel
131 for {
132 select {
133 case block, ok := <-cl:
134 if !ok {
135 // Client disconnected
136 delete(clients, block.uid)
137 return
138 }
139 if block.uid != uid {
140 panic(fmt.Errorf("uid mismatch, %d (block) != %d (client)", block.uid, uid))
141 }
142 // If reset message, reset and we're done
143 if block.reset {
144 delete(s.digests, uid)
145 continue
146 }
147
148 // If requesting sum, we will need to maintain state.
149 if block.sumCh != nil {
150 var dig digest
151 d, ok := s.digests[uid]
152 if ok {
153 dig.s[0] = binary.LittleEndian.Uint32(d[0:4])
154 dig.s[1] = binary.LittleEndian.Uint32(d[4:8])
155 dig.s[2] = binary.LittleEndian.Uint32(d[8:12])
156 dig.s[3] = binary.LittleEndian.Uint32(d[12:16])
157 } else {
158 dig.s[0], dig.s[1], dig.s[2], dig.s[3] = init0, init1, init2, init3
159 }
160
161 sum := sumResult{}
162 // Add end block to current digest.
163 blockScalar(&dig.s, block.msg)
164
165 binary.LittleEndian.PutUint32(sum.digest[0:], dig.s[0])
166 binary.LittleEndian.PutUint32(sum.digest[4:], dig.s[1])
167 binary.LittleEndian.PutUint32(sum.digest[8:], dig.s[2])
168 binary.LittleEndian.PutUint32(sum.digest[12:], dig.s[3])
169 block.sumCh <- sum
170 if block.msg != nil {
171 s.buffers <- block.msg
172 }
173 continue
174 }
175 if len(block.msg) == 0 {
176 continue
177 }
178 lanes[lanesFilled] = block
179 lanesFilled++
180 return
181 default:
182 return
183 }
184 }
185 }
186 addNewClient := func(cl newClient) {
187 if _, ok := clients[cl.uid]; ok {
188 panic("internal error: duplicate client registration")
189 }
190 clients[cl.uid] = cl.input
191 }
192
193 allLanesFilled := func() bool {
194 return lanesFilled == Lanes || lanesFilled >= len(clients)
195 }
196
197 for {
198 // Step 1.
199 for lanesFilled == 0 {
200 select {
201 case cl, ok := <-newClients:
202 if !ok {
203 return
204 }
205 addNewClient(cl)
206 // Check if it already sent a payload.
207 addToLane(cl.uid)
208 continue
209 case uid := <-s.cycle:
210 addToLane(uid)
211 }
212 }
213
214 fillLanes:
215 for !allLanesFilled() {
216 select {
217 case cl, ok := <-newClients:
218 if !ok {
219 return
220 }
221 addNewClient(cl)
222
223 case uid := <-s.cycle:
224 addToLane(uid)
225 default:
226 // Nothing more queued...
227 break fillLanes
228 }
229 }
230
231 // If we did not fill all lanes, check if there is more waiting
232 if !allLanesFilled() {
233 runtime.Gosched()
234 for uid := range clients {
235 addToLane(uid)
236 if allLanesFilled() {
237 break
238 }
239 }
240 }
241 if false {
242 if !allLanesFilled() {
243 fmt.Println("Not all lanes filled", lanesFilled, "of", len(clients))
244 //pprof.Lookup("goroutine").WriteTo(os.Stdout, 1)
245 } else if true {
246 fmt.Println("all lanes filled")
247 }
248 }
249 // Process the lanes we could collect
250 s.blocks(lanes[:lanesFilled])
251
252 // Clear lanes...
253 lanesFilled = 0
254 // Add all current queued
255 for uid := range clients {
256 addToLane(uid)
257 if allLanesFilled() {
258 break
259 }
260 }
261 }
262}
263
264func (s *md5Server) Close() {
265 if s.newInput != nil {
266 close(s.newInput)
267 s.newInput = nil
268 }
269}
270
271// Invoke assembly and send results back
272func (s *md5Server) blocks(lanes []blockInput) {
273 if len(lanes) < useScalarBelow {
274 // Use scalar routine when below this many lanes
275 switch len(lanes) {
276 case 0:
277 case 1:
278 lane := lanes[0]
279 var d digest
280 a, ok := s.digests[lane.uid]
281 if ok {
282 d.s[0] = binary.LittleEndian.Uint32(a[0:4])
283 d.s[1] = binary.LittleEndian.Uint32(a[4:8])
284 d.s[2] = binary.LittleEndian.Uint32(a[8:12])
285 d.s[3] = binary.LittleEndian.Uint32(a[12:16])
286 } else {
287 d.s[0] = init0
288 d.s[1] = init1
289 d.s[2] = init2
290 d.s[3] = init3
291 }
292 if len(lane.msg) > 0 {
293 // Update...
294 blockScalar(&d.s, lane.msg)
295 }
296 dig := [Size]byte{}
297 binary.LittleEndian.PutUint32(dig[0:], d.s[0])
298 binary.LittleEndian.PutUint32(dig[4:], d.s[1])
299 binary.LittleEndian.PutUint32(dig[8:], d.s[2])
300 binary.LittleEndian.PutUint32(dig[12:], d.s[3])
301 s.digests[lane.uid] = dig
302
303 if lane.msg != nil {
304 s.buffers <- lane.msg
305 }
306 lanes[0] = blockInput{}
307
308 default:
309 s.wg.Add(len(lanes))
310 var results [useScalarBelow]digest
311 for i := range lanes {
312 lane := lanes[i]
313 go func(i int) {
314 var d digest
315 defer s.wg.Done()
316 a, ok := s.digests[lane.uid]
317 if ok {
318 d.s[0] = binary.LittleEndian.Uint32(a[0:4])
319 d.s[1] = binary.LittleEndian.Uint32(a[4:8])
320 d.s[2] = binary.LittleEndian.Uint32(a[8:12])
321 d.s[3] = binary.LittleEndian.Uint32(a[12:16])
322 } else {
323 d.s[0] = init0
324 d.s[1] = init1
325 d.s[2] = init2
326 d.s[3] = init3
327 }
328 if len(lane.msg) == 0 {
329 results[i] = d
330 return
331 }
332 // Update...
333 blockScalar(&d.s, lane.msg)
334 results[i] = d
335 }(i)
336 }
337 s.wg.Wait()
338 for i, lane := range lanes {
339 dig := [Size]byte{}
340 binary.LittleEndian.PutUint32(dig[0:], results[i].s[0])
341 binary.LittleEndian.PutUint32(dig[4:], results[i].s[1])
342 binary.LittleEndian.PutUint32(dig[8:], results[i].s[2])
343 binary.LittleEndian.PutUint32(dig[12:], results[i].s[3])
344 s.digests[lane.uid] = dig
345
346 if lane.msg != nil {
347 s.buffers <- lane.msg
348 }
349 lanes[i] = blockInput{}
350 }
351 }
352 return
353 }
354
355 inputs := [16][]byte{}
356 for i := range lanes {
357 inputs[i] = lanes[i].msg
358 }
359
360 // Collect active digests...
361 state := s.getDigests(lanes)
362 // Process all lanes...
363 s.blockMd5_x16(&state, inputs, len(lanes) <= 8)
364
365 for i, lane := range lanes {
366 uid := lane.uid
367 dig := [Size]byte{}
368 binary.LittleEndian.PutUint32(dig[0:], state.v0[i])
369 binary.LittleEndian.PutUint32(dig[4:], state.v1[i])
370 binary.LittleEndian.PutUint32(dig[8:], state.v2[i])
371 binary.LittleEndian.PutUint32(dig[12:], state.v3[i])
372
373 s.digests[uid] = dig
374 if lane.msg != nil {
375 s.buffers <- lane.msg
376 }
377 lanes[i] = blockInput{}
378 }
379}
380
381func (s *md5Server) getDigests(lanes []blockInput) (d digest16) {
382 for i, lane := range lanes {
383 a, ok := s.digests[lane.uid]
384 if ok {
385 d.v0[i] = binary.LittleEndian.Uint32(a[0:4])
386 d.v1[i] = binary.LittleEndian.Uint32(a[4:8])
387 d.v2[i] = binary.LittleEndian.Uint32(a[8:12])
388 d.v3[i] = binary.LittleEndian.Uint32(a[12:16])
389 } else {
390 d.v0[i] = init0
391 d.v1[i] = init1
392 d.v2[i] = init2
393 d.v3[i] = init3
394 }
395 }
396 return
397}
diff --git a/vendor/github.com/minio/md5-simd/md5-server_fallback.go b/vendor/github.com/minio/md5-simd/md5-server_fallback.go
new file mode 100644
index 0000000..7814dad
--- /dev/null
+++ b/vendor/github.com/minio/md5-simd/md5-server_fallback.go
@@ -0,0 +1,12 @@
1//+build !amd64 appengine !gc noasm
2
3// Copyright (c) 2020 MinIO Inc. All rights reserved.
4// Use of this source code is governed by a license that can be
5// found in the LICENSE file.
6
7package md5simd
8
9// NewServer - Create new object for parallel processing handling
10func NewServer() *fallbackServer {
11 return &fallbackServer{}
12}
diff --git a/vendor/github.com/minio/md5-simd/md5-util_amd64.go b/vendor/github.com/minio/md5-simd/md5-util_amd64.go
new file mode 100644
index 0000000..73981b0
--- /dev/null
+++ b/vendor/github.com/minio/md5-simd/md5-util_amd64.go
@@ -0,0 +1,85 @@
1//+build !noasm,!appengine,gc
2
3// Copyright (c) 2020 MinIO Inc. All rights reserved.
4// Use of this source code is governed by a license that can be
5// found in the LICENSE file.
6
7package md5simd
8
9// Helper struct for sorting blocks based on length
10type lane struct {
11 len uint
12 pos uint
13}
14
15type digest struct {
16 s [4]uint32
17}
18
19// Helper struct for generating number of rounds in combination with mask for valid lanes
20type maskRounds struct {
21 mask uint64
22 rounds uint64
23}
24
25func generateMaskAndRounds8(input [8][]byte, mr *[8]maskRounds) (rounds int) {
26 // Sort on blocks length small to large
27 var sorted [8]lane
28 for c, inpt := range input[:] {
29 sorted[c] = lane{uint(len(inpt)), uint(c)}
30 for i := c - 1; i >= 0; i-- {
31 // swap so largest is at the end...
32 if sorted[i].len > sorted[i+1].len {
33 sorted[i], sorted[i+1] = sorted[i+1], sorted[i]
34 continue
35 }
36 break
37 }
38 }
39
40 // Create mask array including 'rounds' (of processing blocks of 64 bytes) between masks
41 m, round := uint64(0xff), uint64(0)
42
43 for _, s := range sorted[:] {
44 if s.len > 0 {
45 if uint64(s.len)>>6 > round {
46 mr[rounds] = maskRounds{m, (uint64(s.len) >> 6) - round}
47 rounds++
48 }
49 round = uint64(s.len) >> 6
50 }
51 m = m & ^(1 << uint(s.pos))
52 }
53 return
54}
55
56func generateMaskAndRounds16(input [16][]byte, mr *[16]maskRounds) (rounds int) {
57 // Sort on blocks length small to large
58 var sorted [16]lane
59 for c, inpt := range input[:] {
60 sorted[c] = lane{uint(len(inpt)), uint(c)}
61 for i := c - 1; i >= 0; i-- {
62 // swap so largest is at the end...
63 if sorted[i].len > sorted[i+1].len {
64 sorted[i], sorted[i+1] = sorted[i+1], sorted[i]
65 continue
66 }
67 break
68 }
69 }
70
71 // Create mask array including 'rounds' (of processing blocks of 64 bytes) between masks
72 m, round := uint64(0xffff), uint64(0)
73
74 for _, s := range sorted[:] {
75 if s.len > 0 {
76 if uint64(s.len)>>6 > round {
77 mr[rounds] = maskRounds{m, (uint64(s.len) >> 6) - round}
78 rounds++
79 }
80 round = uint64(s.len) >> 6
81 }
82 m = m & ^(1 << uint(s.pos))
83 }
84 return
85}
diff --git a/vendor/github.com/minio/md5-simd/md5.go b/vendor/github.com/minio/md5-simd/md5.go
new file mode 100644
index 0000000..11b0cb9
--- /dev/null
+++ b/vendor/github.com/minio/md5-simd/md5.go
@@ -0,0 +1,63 @@
1package md5simd
2
3import (
4 "crypto/md5"
5 "hash"
6 "sync"
7)
8
9const (
10 // The blocksize of MD5 in bytes.
11 BlockSize = 64
12
13 // The size of an MD5 checksum in bytes.
14 Size = 16
15
16 // internalBlockSize is the internal block size.
17 internalBlockSize = 32 << 10
18)
19
20type Server interface {
21 NewHash() Hasher
22 Close()
23}
24
25type Hasher interface {
26 hash.Hash
27 Close()
28}
29
30// StdlibHasher returns a Hasher that uses the stdlib for hashing.
31// Used hashers are stored in a pool for fast reuse.
32func StdlibHasher() Hasher {
33 return &md5Wrapper{Hash: md5Pool.New().(hash.Hash)}
34}
35
36// md5Wrapper is a wrapper around the builtin hasher.
37type md5Wrapper struct {
38 hash.Hash
39}
40
41var md5Pool = sync.Pool{New: func() interface{} {
42 return md5.New()
43}}
44
45// fallbackServer - Fallback when no assembly is available.
46type fallbackServer struct {
47}
48
49// NewHash -- return regular Golang md5 hashing from crypto
50func (s *fallbackServer) NewHash() Hasher {
51 return &md5Wrapper{Hash: md5Pool.New().(hash.Hash)}
52}
53
54func (s *fallbackServer) Close() {
55}
56
57func (m *md5Wrapper) Close() {
58 if m.Hash != nil {
59 m.Reset()
60 md5Pool.Put(m.Hash)
61 m.Hash = nil
62 }
63}
diff --git a/vendor/github.com/minio/md5-simd/md5block_amd64.go b/vendor/github.com/minio/md5-simd/md5block_amd64.go
new file mode 100644
index 0000000..4c27936
--- /dev/null
+++ b/vendor/github.com/minio/md5-simd/md5block_amd64.go
@@ -0,0 +1,11 @@
1// Code generated by command: go run gen.go -out ../md5block_amd64.s -stubs ../md5block_amd64.go -pkg=md5simd. DO NOT EDIT.
2
3// +build !appengine
4// +build !noasm
5// +build gc
6
7package md5simd
8
9// Encode p to digest
10//go:noescape
11func blockScalar(dig *[4]uint32, p []byte)
diff --git a/vendor/github.com/minio/md5-simd/md5block_amd64.s b/vendor/github.com/minio/md5-simd/md5block_amd64.s
new file mode 100644
index 0000000..fbc4a21
--- /dev/null
+++ b/vendor/github.com/minio/md5-simd/md5block_amd64.s
@@ -0,0 +1,714 @@
1// Code generated by command: go run gen.go -out ../md5block_amd64.s -stubs ../md5block_amd64.go -pkg=md5simd. DO NOT EDIT.
2
3// +build !appengine
4// +build !noasm
5// +build gc
6
7// func blockScalar(dig *[4]uint32, p []byte)
8TEXT ·blockScalar(SB), $0-32
9 MOVQ p_len+16(FP), AX
10 MOVQ dig+0(FP), CX
11 MOVQ p_base+8(FP), DX
12 SHRQ $0x06, AX
13 SHLQ $0x06, AX
14 LEAQ (DX)(AX*1), AX
15 CMPQ DX, AX
16 JEQ end
17 MOVL (CX), BX
18 MOVL 4(CX), BP
19 MOVL 8(CX), SI
20 MOVL 12(CX), CX
21 MOVL $0xffffffff, DI
22
23loop:
24 MOVL (DX), R8
25 MOVL CX, R9
26 MOVL BX, R10
27 MOVL BP, R11
28 MOVL SI, R12
29 MOVL CX, R13
30
31 // ROUND1
32 XORL SI, R9
33 ADDL $0xd76aa478, BX
34 ADDL R8, BX
35 ANDL BP, R9
36 XORL CX, R9
37 MOVL 4(DX), R8
38 ADDL R9, BX
39 ROLL $0x07, BX
40 MOVL SI, R9
41 ADDL BP, BX
42 XORL BP, R9
43 ADDL $0xe8c7b756, CX
44 ADDL R8, CX
45 ANDL BX, R9
46 XORL SI, R9
47 MOVL 8(DX), R8
48 ADDL R9, CX
49 ROLL $0x0c, CX
50 MOVL BP, R9
51 ADDL BX, CX
52 XORL BX, R9
53 ADDL $0x242070db, SI
54 ADDL R8, SI
55 ANDL CX, R9
56 XORL BP, R9
57 MOVL 12(DX), R8
58 ADDL R9, SI
59 ROLL $0x11, SI
60 MOVL BX, R9
61 ADDL CX, SI
62 XORL CX, R9
63 ADDL $0xc1bdceee, BP
64 ADDL R8, BP
65 ANDL SI, R9
66 XORL BX, R9
67 MOVL 16(DX), R8
68 ADDL R9, BP
69 ROLL $0x16, BP
70 MOVL CX, R9
71 ADDL SI, BP
72 XORL SI, R9
73 ADDL $0xf57c0faf, BX
74 ADDL R8, BX
75 ANDL BP, R9
76 XORL CX, R9
77 MOVL 20(DX), R8
78 ADDL R9, BX
79 ROLL $0x07, BX
80 MOVL SI, R9
81 ADDL BP, BX
82 XORL BP, R9
83 ADDL $0x4787c62a, CX
84 ADDL R8, CX
85 ANDL BX, R9
86 XORL SI, R9
87 MOVL 24(DX), R8
88 ADDL R9, CX
89 ROLL $0x0c, CX
90 MOVL BP, R9
91 ADDL BX, CX
92 XORL BX, R9
93 ADDL $0xa8304613, SI
94 ADDL R8, SI
95 ANDL CX, R9
96 XORL BP, R9
97 MOVL 28(DX), R8
98 ADDL R9, SI
99 ROLL $0x11, SI
100 MOVL BX, R9
101 ADDL CX, SI
102 XORL CX, R9
103 ADDL $0xfd469501, BP
104 ADDL R8, BP
105 ANDL SI, R9
106 XORL BX, R9
107 MOVL 32(DX), R8
108 ADDL R9, BP
109 ROLL $0x16, BP
110 MOVL CX, R9
111 ADDL SI, BP
112 XORL SI, R9
113 ADDL $0x698098d8, BX
114 ADDL R8, BX
115 ANDL BP, R9
116 XORL CX, R9
117 MOVL 36(DX), R8
118 ADDL R9, BX
119 ROLL $0x07, BX
120 MOVL SI, R9
121 ADDL BP, BX
122 XORL BP, R9
123 ADDL $0x8b44f7af, CX
124 ADDL R8, CX
125 ANDL BX, R9
126 XORL SI, R9
127 MOVL 40(DX), R8
128 ADDL R9, CX
129 ROLL $0x0c, CX
130 MOVL BP, R9
131 ADDL BX, CX
132 XORL BX, R9
133 ADDL $0xffff5bb1, SI
134 ADDL R8, SI
135 ANDL CX, R9
136 XORL BP, R9
137 MOVL 44(DX), R8
138 ADDL R9, SI
139 ROLL $0x11, SI
140 MOVL BX, R9
141 ADDL CX, SI
142 XORL CX, R9
143 ADDL $0x895cd7be, BP
144 ADDL R8, BP
145 ANDL SI, R9
146 XORL BX, R9
147 MOVL 48(DX), R8
148 ADDL R9, BP
149 ROLL $0x16, BP
150 MOVL CX, R9
151 ADDL SI, BP
152 XORL SI, R9
153 ADDL $0x6b901122, BX
154 ADDL R8, BX
155 ANDL BP, R9
156 XORL CX, R9
157 MOVL 52(DX), R8
158 ADDL R9, BX
159 ROLL $0x07, BX
160 MOVL SI, R9
161 ADDL BP, BX
162 XORL BP, R9
163 ADDL $0xfd987193, CX
164 ADDL R8, CX
165 ANDL BX, R9
166 XORL SI, R9
167 MOVL 56(DX), R8
168 ADDL R9, CX
169 ROLL $0x0c, CX
170 MOVL BP, R9
171 ADDL BX, CX
172 XORL BX, R9
173 ADDL $0xa679438e, SI
174 ADDL R8, SI
175 ANDL CX, R9
176 XORL BP, R9
177 MOVL 60(DX), R8
178 ADDL R9, SI
179 ROLL $0x11, SI
180 MOVL BX, R9
181 ADDL CX, SI
182 XORL CX, R9
183 ADDL $0x49b40821, BP
184 ADDL R8, BP
185 ANDL SI, R9
186 XORL BX, R9
187 MOVL 4(DX), R8
188 ADDL R9, BP
189 ROLL $0x16, BP
190 MOVL CX, R9
191 ADDL SI, BP
192
193 // ROUND2
194 MOVL CX, R9
195 MOVL CX, R14
196 XORL DI, R9
197 ADDL $0xf61e2562, BX
198 ADDL R8, BX
199 ANDL BP, R14
200 ANDL SI, R9
201 MOVL 24(DX), R8
202 ORL R9, R14
203 MOVL SI, R9
204 ADDL R14, BX
205 MOVL SI, R14
206 ROLL $0x05, BX
207 ADDL BP, BX
208 XORL DI, R9
209 ADDL $0xc040b340, CX
210 ADDL R8, CX
211 ANDL BX, R14
212 ANDL BP, R9
213 MOVL 44(DX), R8
214 ORL R9, R14
215 MOVL BP, R9
216 ADDL R14, CX
217 MOVL BP, R14
218 ROLL $0x09, CX
219 ADDL BX, CX
220 XORL DI, R9
221 ADDL $0x265e5a51, SI
222 ADDL R8, SI
223 ANDL CX, R14
224 ANDL BX, R9
225 MOVL (DX), R8
226 ORL R9, R14
227 MOVL BX, R9
228 ADDL R14, SI
229 MOVL BX, R14
230 ROLL $0x0e, SI
231 ADDL CX, SI
232 XORL DI, R9
233 ADDL $0xe9b6c7aa, BP
234 ADDL R8, BP
235 ANDL SI, R14
236 ANDL CX, R9
237 MOVL 20(DX), R8
238 ORL R9, R14
239 MOVL CX, R9
240 ADDL R14, BP
241 MOVL CX, R14
242 ROLL $0x14, BP
243 ADDL SI, BP
244 XORL DI, R9
245 ADDL $0xd62f105d, BX
246 ADDL R8, BX
247 ANDL BP, R14
248 ANDL SI, R9
249 MOVL 40(DX), R8
250 ORL R9, R14
251 MOVL SI, R9
252 ADDL R14, BX
253 MOVL SI, R14
254 ROLL $0x05, BX
255 ADDL BP, BX
256 XORL DI, R9
257 ADDL $0x02441453, CX
258 ADDL R8, CX
259 ANDL BX, R14
260 ANDL BP, R9
261 MOVL 60(DX), R8
262 ORL R9, R14
263 MOVL BP, R9
264 ADDL R14, CX
265 MOVL BP, R14
266 ROLL $0x09, CX
267 ADDL BX, CX
268 XORL DI, R9
269 ADDL $0xd8a1e681, SI
270 ADDL R8, SI
271 ANDL CX, R14
272 ANDL BX, R9
273 MOVL 16(DX), R8
274 ORL R9, R14
275 MOVL BX, R9
276 ADDL R14, SI
277 MOVL BX, R14
278 ROLL $0x0e, SI
279 ADDL CX, SI
280 XORL DI, R9
281 ADDL $0xe7d3fbc8, BP
282 ADDL R8, BP
283 ANDL SI, R14
284 ANDL CX, R9
285 MOVL 36(DX), R8
286 ORL R9, R14
287 MOVL CX, R9
288 ADDL R14, BP
289 MOVL CX, R14
290 ROLL $0x14, BP
291 ADDL SI, BP
292 XORL DI, R9
293 ADDL $0x21e1cde6, BX
294 ADDL R8, BX
295 ANDL BP, R14
296 ANDL SI, R9
297 MOVL 56(DX), R8
298 ORL R9, R14
299 MOVL SI, R9
300 ADDL R14, BX
301 MOVL SI, R14
302 ROLL $0x05, BX
303 ADDL BP, BX
304 XORL DI, R9
305 ADDL $0xc33707d6, CX
306 ADDL R8, CX
307 ANDL BX, R14
308 ANDL BP, R9
309 MOVL 12(DX), R8
310 ORL R9, R14
311 MOVL BP, R9
312 ADDL R14, CX
313 MOVL BP, R14
314 ROLL $0x09, CX
315 ADDL BX, CX
316 XORL DI, R9
317 ADDL $0xf4d50d87, SI
318 ADDL R8, SI
319 ANDL CX, R14
320 ANDL BX, R9
321 MOVL 32(DX), R8
322 ORL R9, R14
323 MOVL BX, R9
324 ADDL R14, SI
325 MOVL BX, R14
326 ROLL $0x0e, SI
327 ADDL CX, SI
328 XORL DI, R9
329 ADDL $0x455a14ed, BP
330 ADDL R8, BP
331 ANDL SI, R14
332 ANDL CX, R9
333 MOVL 52(DX), R8
334 ORL R9, R14
335 MOVL CX, R9
336 ADDL R14, BP
337 MOVL CX, R14
338 ROLL $0x14, BP
339 ADDL SI, BP
340 XORL DI, R9
341 ADDL $0xa9e3e905, BX
342 ADDL R8, BX
343 ANDL BP, R14
344 ANDL SI, R9
345 MOVL 8(DX), R8
346 ORL R9, R14
347 MOVL SI, R9
348 ADDL R14, BX
349 MOVL SI, R14
350 ROLL $0x05, BX
351 ADDL BP, BX
352 XORL DI, R9
353 ADDL $0xfcefa3f8, CX
354 ADDL R8, CX
355 ANDL BX, R14
356 ANDL BP, R9
357 MOVL 28(DX), R8
358 ORL R9, R14
359 MOVL BP, R9
360 ADDL R14, CX
361 MOVL BP, R14
362 ROLL $0x09, CX
363 ADDL BX, CX
364 XORL DI, R9
365 ADDL $0x676f02d9, SI
366 ADDL R8, SI
367 ANDL CX, R14
368 ANDL BX, R9
369 MOVL 48(DX), R8
370 ORL R9, R14
371 MOVL BX, R9
372 ADDL R14, SI
373 MOVL BX, R14
374 ROLL $0x0e, SI
375 ADDL CX, SI
376 XORL DI, R9
377 ADDL $0x8d2a4c8a, BP
378 ADDL R8, BP
379 ANDL SI, R14
380 ANDL CX, R9
381 MOVL 20(DX), R8
382 ORL R9, R14
383 MOVL CX, R9
384 ADDL R14, BP
385 MOVL CX, R14
386 ROLL $0x14, BP
387 ADDL SI, BP
388
389 // ROUND3
390 MOVL SI, R9
391 ADDL $0xfffa3942, BX
392 ADDL R8, BX
393 MOVL 32(DX), R8
394 XORL CX, R9
395 XORL BP, R9
396 ADDL R9, BX
397 ROLL $0x04, BX
398 MOVL BP, R9
399 ADDL BP, BX
400 ADDL $0x8771f681, CX
401 ADDL R8, CX
402 MOVL 44(DX), R8
403 XORL SI, R9
404 XORL BX, R9
405 ADDL R9, CX
406 ROLL $0x0b, CX
407 MOVL BX, R9
408 ADDL BX, CX
409 ADDL $0x6d9d6122, SI
410 ADDL R8, SI
411 MOVL 56(DX), R8
412 XORL BP, R9
413 XORL CX, R9
414 ADDL R9, SI
415 ROLL $0x10, SI
416 MOVL CX, R9
417 ADDL CX, SI
418 ADDL $0xfde5380c, BP
419 ADDL R8, BP
420 MOVL 4(DX), R8
421 XORL BX, R9
422 XORL SI, R9
423 ADDL R9, BP
424 ROLL $0x17, BP
425 MOVL SI, R9
426 ADDL SI, BP
427 ADDL $0xa4beea44, BX
428 ADDL R8, BX
429 MOVL 16(DX), R8
430 XORL CX, R9
431 XORL BP, R9
432 ADDL R9, BX
433 ROLL $0x04, BX
434 MOVL BP, R9
435 ADDL BP, BX
436 ADDL $0x4bdecfa9, CX
437 ADDL R8, CX
438 MOVL 28(DX), R8
439 XORL SI, R9
440 XORL BX, R9
441 ADDL R9, CX
442 ROLL $0x0b, CX
443 MOVL BX, R9
444 ADDL BX, CX
445 ADDL $0xf6bb4b60, SI
446 ADDL R8, SI
447 MOVL 40(DX), R8
448 XORL BP, R9
449 XORL CX, R9
450 ADDL R9, SI
451 ROLL $0x10, SI
452 MOVL CX, R9
453 ADDL CX, SI
454 ADDL $0xbebfbc70, BP
455 ADDL R8, BP
456 MOVL 52(DX), R8
457 XORL BX, R9
458 XORL SI, R9
459 ADDL R9, BP
460 ROLL $0x17, BP
461 MOVL SI, R9
462 ADDL SI, BP
463 ADDL $0x289b7ec6, BX
464 ADDL R8, BX
465 MOVL (DX), R8
466 XORL CX, R9
467 XORL BP, R9
468 ADDL R9, BX
469 ROLL $0x04, BX
470 MOVL BP, R9
471 ADDL BP, BX
472 ADDL $0xeaa127fa, CX
473 ADDL R8, CX
474 MOVL 12(DX), R8
475 XORL SI, R9
476 XORL BX, R9
477 ADDL R9, CX
478 ROLL $0x0b, CX
479 MOVL BX, R9
480 ADDL BX, CX
481 ADDL $0xd4ef3085, SI
482 ADDL R8, SI
483 MOVL 24(DX), R8
484 XORL BP, R9
485 XORL CX, R9
486 ADDL R9, SI
487 ROLL $0x10, SI
488 MOVL CX, R9
489 ADDL CX, SI
490 ADDL $0x04881d05, BP
491 ADDL R8, BP
492 MOVL 36(DX), R8
493 XORL BX, R9
494 XORL SI, R9
495 ADDL R9, BP
496 ROLL $0x17, BP
497 MOVL SI, R9
498 ADDL SI, BP
499 ADDL $0xd9d4d039, BX
500 ADDL R8, BX
501 MOVL 48(DX), R8
502 XORL CX, R9
503 XORL BP, R9
504 ADDL R9, BX
505 ROLL $0x04, BX
506 MOVL BP, R9
507 ADDL BP, BX
508 ADDL $0xe6db99e5, CX
509 ADDL R8, CX
510 MOVL 60(DX), R8
511 XORL SI, R9
512 XORL BX, R9
513 ADDL R9, CX
514 ROLL $0x0b, CX
515 MOVL BX, R9
516 ADDL BX, CX
517 ADDL $0x1fa27cf8, SI
518 ADDL R8, SI
519 MOVL 8(DX), R8
520 XORL BP, R9
521 XORL CX, R9
522 ADDL R9, SI
523 ROLL $0x10, SI
524 MOVL CX, R9
525 ADDL CX, SI
526 ADDL $0xc4ac5665, BP
527 ADDL R8, BP
528 MOVL (DX), R8
529 XORL BX, R9
530 XORL SI, R9
531 ADDL R9, BP
532 ROLL $0x17, BP
533 MOVL SI, R9
534 ADDL SI, BP
535
536 // ROUND4
537 MOVL DI, R9
538 XORL CX, R9
539 ADDL $0xf4292244, BX
540 ADDL R8, BX
541 ORL BP, R9
542 XORL SI, R9
543 ADDL R9, BX
544 MOVL 28(DX), R8
545 MOVL DI, R9
546 ROLL $0x06, BX
547 XORL SI, R9
548 ADDL BP, BX
549 ADDL $0x432aff97, CX
550 ADDL R8, CX
551 ORL BX, R9
552 XORL BP, R9
553 ADDL R9, CX
554 MOVL 56(DX), R8
555 MOVL DI, R9
556 ROLL $0x0a, CX
557 XORL BP, R9
558 ADDL BX, CX
559 ADDL $0xab9423a7, SI
560 ADDL R8, SI
561 ORL CX, R9
562 XORL BX, R9
563 ADDL R9, SI
564 MOVL 20(DX), R8
565 MOVL DI, R9
566 ROLL $0x0f, SI
567 XORL BX, R9
568 ADDL CX, SI
569 ADDL $0xfc93a039, BP
570 ADDL R8, BP
571 ORL SI, R9
572 XORL CX, R9
573 ADDL R9, BP
574 MOVL 48(DX), R8
575 MOVL DI, R9
576 ROLL $0x15, BP
577 XORL CX, R9
578 ADDL SI, BP
579 ADDL $0x655b59c3, BX
580 ADDL R8, BX
581 ORL BP, R9
582 XORL SI, R9
583 ADDL R9, BX
584 MOVL 12(DX), R8
585 MOVL DI, R9
586 ROLL $0x06, BX
587 XORL SI, R9
588 ADDL BP, BX
589 ADDL $0x8f0ccc92, CX
590 ADDL R8, CX
591 ORL BX, R9
592 XORL BP, R9
593 ADDL R9, CX
594 MOVL 40(DX), R8
595 MOVL DI, R9
596 ROLL $0x0a, CX
597 XORL BP, R9
598 ADDL BX, CX
599 ADDL $0xffeff47d, SI
600 ADDL R8, SI
601 ORL CX, R9
602 XORL BX, R9
603 ADDL R9, SI
604 MOVL 4(DX), R8
605 MOVL DI, R9
606 ROLL $0x0f, SI
607 XORL BX, R9
608 ADDL CX, SI
609 ADDL $0x85845dd1, BP
610 ADDL R8, BP
611 ORL SI, R9
612 XORL CX, R9
613 ADDL R9, BP
614 MOVL 32(DX), R8
615 MOVL DI, R9
616 ROLL $0x15, BP
617 XORL CX, R9
618 ADDL SI, BP
619 ADDL $0x6fa87e4f, BX
620 ADDL R8, BX
621 ORL BP, R9
622 XORL SI, R9
623 ADDL R9, BX
624 MOVL 60(DX), R8
625 MOVL DI, R9
626 ROLL $0x06, BX
627 XORL SI, R9
628 ADDL BP, BX
629 ADDL $0xfe2ce6e0, CX
630 ADDL R8, CX
631 ORL BX, R9
632 XORL BP, R9
633 ADDL R9, CX
634 MOVL 24(DX), R8
635 MOVL DI, R9
636 ROLL $0x0a, CX
637 XORL BP, R9
638 ADDL BX, CX
639 ADDL $0xa3014314, SI
640 ADDL R8, SI
641 ORL CX, R9
642 XORL BX, R9
643 ADDL R9, SI
644 MOVL 52(DX), R8
645 MOVL DI, R9
646 ROLL $0x0f, SI
647 XORL BX, R9
648 ADDL CX, SI
649 ADDL $0x4e0811a1, BP
650 ADDL R8, BP
651 ORL SI, R9
652 XORL CX, R9
653 ADDL R9, BP
654 MOVL 16(DX), R8
655 MOVL DI, R9
656 ROLL $0x15, BP
657 XORL CX, R9
658 ADDL SI, BP
659 ADDL $0xf7537e82, BX
660 ADDL R8, BX
661 ORL BP, R9
662 XORL SI, R9
663 ADDL R9, BX
664 MOVL 44(DX), R8
665 MOVL DI, R9
666 ROLL $0x06, BX
667 XORL SI, R9
668 ADDL BP, BX
669 ADDL $0xbd3af235, CX
670 ADDL R8, CX
671 ORL BX, R9
672 XORL BP, R9
673 ADDL R9, CX
674 MOVL 8(DX), R8
675 MOVL DI, R9
676 ROLL $0x0a, CX
677 XORL BP, R9
678 ADDL BX, CX
679 ADDL $0x2ad7d2bb, SI
680 ADDL R8, SI
681 ORL CX, R9
682 XORL BX, R9
683 ADDL R9, SI
684 MOVL 36(DX), R8
685 MOVL DI, R9
686 ROLL $0x0f, SI
687 XORL BX, R9
688 ADDL CX, SI
689 ADDL $0xeb86d391, BP
690 ADDL R8, BP
691 ORL SI, R9
692 XORL CX, R9
693 ADDL R9, BP
694 ROLL $0x15, BP
695 ADDL SI, BP
696 ADDL R10, BX
697 ADDL R11, BP
698 ADDL R12, SI
699 ADDL R13, CX
700
701 // Prepare next loop
702 ADDQ $0x40, DX
703 CMPQ DX, AX
704 JB loop
705
706 // Write output
707 MOVQ dig+0(FP), AX
708 MOVL BX, (AX)
709 MOVL BP, 4(AX)
710 MOVL SI, 8(AX)
711 MOVL CX, 12(AX)
712
713end:
714 RET
diff --git a/vendor/github.com/minio/minio-go/v7/.gitignore b/vendor/github.com/minio/minio-go/v7/.gitignore
new file mode 100644
index 0000000..8ae0384
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/.gitignore
@@ -0,0 +1,6 @@
1*~
2*.test
3validator
4golangci-lint
5functional_tests
6.idea \ No newline at end of file
diff --git a/vendor/github.com/minio/minio-go/v7/.golangci.yml b/vendor/github.com/minio/minio-go/v7/.golangci.yml
new file mode 100644
index 0000000..875b949
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/.golangci.yml
@@ -0,0 +1,27 @@
1linters-settings:
2 misspell:
3 locale: US
4
5linters:
6 disable-all: true
7 enable:
8 - typecheck
9 - goimports
10 - misspell
11 - revive
12 - govet
13 - ineffassign
14 - gosimple
15 - unused
16 - gocritic
17
18issues:
19 exclude-use-default: false
20 exclude:
21 # todo fix these when we get enough time.
22 - "singleCaseSwitch: should rewrite switch statement to if statement"
23 - "unlambda: replace"
24 - "captLocal:"
25 - "ifElseChain:"
26 - "elseif:"
27 - "should have a package comment"
diff --git a/vendor/github.com/minio/minio-go/v7/CNAME b/vendor/github.com/minio/minio-go/v7/CNAME
new file mode 100644
index 0000000..d365a7b
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/CNAME
@@ -0,0 +1 @@
minio-go.min.io \ No newline at end of file
diff --git a/vendor/github.com/minio/minio-go/v7/CONTRIBUTING.md b/vendor/github.com/minio/minio-go/v7/CONTRIBUTING.md
new file mode 100644
index 0000000..24522ef
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/CONTRIBUTING.md
@@ -0,0 +1,22 @@
1### Developer Guidelines
2
3``minio-go`` welcomes your contribution. To make the process as seamless as possible, we ask for the following:
4
5* Go ahead and fork the project and make your changes. We encourage pull requests to discuss code changes.
6 - Fork it
7 - Create your feature branch (git checkout -b my-new-feature)
8 - Commit your changes (git commit -am 'Add some feature')
9 - Push to the branch (git push origin my-new-feature)
10 - Create new Pull Request
11
12* When you're ready to create a pull request, be sure to:
13 - Have test cases for the new code. If you have questions about how to do it, please ask in your pull request.
14 - Run `go fmt`
15 - Squash your commits into a single commit. `git rebase -i`. It's okay to force update your pull request.
16 - Make sure `go test -race ./...` and `go build` completes.
17 NOTE: go test runs functional tests and requires you to have a AWS S3 account. Set them as environment variables
18 ``ACCESS_KEY`` and ``SECRET_KEY``. To run shorter version of the tests please use ``go test -short -race ./...``
19
20* Read [Effective Go](https://github.com/golang/go/wiki/CodeReviewComments) article from Golang project
21 - `minio-go` project is strictly conformant with Golang style
22 - if you happen to observe offending code, please feel free to send a pull request
diff --git a/vendor/github.com/minio/minio-go/v7/LICENSE b/vendor/github.com/minio/minio-go/v7/LICENSE
new file mode 100644
index 0000000..d645695
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/LICENSE
@@ -0,0 +1,202 @@
1
2 Apache License
3 Version 2.0, January 2004
4 http://www.apache.org/licenses/
5
6 TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
7
8 1. Definitions.
9
10 "License" shall mean the terms and conditions for use, reproduction,
11 and distribution as defined by Sections 1 through 9 of this document.
12
13 "Licensor" shall mean the copyright owner or entity authorized by
14 the copyright owner that is granting the License.
15
16 "Legal Entity" shall mean the union of the acting entity and all
17 other entities that control, are controlled by, or are under common
18 control with that entity. For the purposes of this definition,
19 "control" means (i) the power, direct or indirect, to cause the
20 direction or management of such entity, whether by contract or
21 otherwise, or (ii) ownership of fifty percent (50%) or more of the
22 outstanding shares, or (iii) beneficial ownership of such entity.
23
24 "You" (or "Your") shall mean an individual or Legal Entity
25 exercising permissions granted by this License.
26
27 "Source" form shall mean the preferred form for making modifications,
28 including but not limited to software source code, documentation
29 source, and configuration files.
30
31 "Object" form shall mean any form resulting from mechanical
32 transformation or translation of a Source form, including but
33 not limited to compiled object code, generated documentation,
34 and conversions to other media types.
35
36 "Work" shall mean the work of authorship, whether in Source or
37 Object form, made available under the License, as indicated by a
38 copyright notice that is included in or attached to the work
39 (an example is provided in the Appendix below).
40
41 "Derivative Works" shall mean any work, whether in Source or Object
42 form, that is based on (or derived from) the Work and for which the
43 editorial revisions, annotations, elaborations, or other modifications
44 represent, as a whole, an original work of authorship. For the purposes
45 of this License, Derivative Works shall not include works that remain
46 separable from, or merely link (or bind by name) to the interfaces of,
47 the Work and Derivative Works thereof.
48
49 "Contribution" shall mean any work of authorship, including
50 the original version of the Work and any modifications or additions
51 to that Work or Derivative Works thereof, that is intentionally
52 submitted to Licensor for inclusion in the Work by the copyright owner
53 or by an individual or Legal Entity authorized to submit on behalf of
54 the copyright owner. For the purposes of this definition, "submitted"
55 means any form of electronic, verbal, or written communication sent
56 to the Licensor or its representatives, including but not limited to
57 communication on electronic mailing lists, source code control systems,
58 and issue tracking systems that are managed by, or on behalf of, the
59 Licensor for the purpose of discussing and improving the Work, but
60 excluding communication that is conspicuously marked or otherwise
61 designated in writing by the copyright owner as "Not a Contribution."
62
63 "Contributor" shall mean Licensor and any individual or Legal Entity
64 on behalf of whom a Contribution has been received by Licensor and
65 subsequently incorporated within the Work.
66
67 2. Grant of Copyright License. Subject to the terms and conditions of
68 this License, each Contributor hereby grants to You a perpetual,
69 worldwide, non-exclusive, no-charge, royalty-free, irrevocable
70 copyright license to reproduce, prepare Derivative Works of,
71 publicly display, publicly perform, sublicense, and distribute the
72 Work and such Derivative Works in Source or Object form.
73
74 3. Grant of Patent License. Subject to the terms and conditions of
75 this License, each Contributor hereby grants to You a perpetual,
76 worldwide, non-exclusive, no-charge, royalty-free, irrevocable
77 (except as stated in this section) patent license to make, have made,
78 use, offer to sell, sell, import, and otherwise transfer the Work,
79 where such license applies only to those patent claims licensable
80 by such Contributor that are necessarily infringed by their
81 Contribution(s) alone or by combination of their Contribution(s)
82 with the Work to which such Contribution(s) was submitted. If You
83 institute patent litigation against any entity (including a
84 cross-claim or counterclaim in a lawsuit) alleging that the Work
85 or a Contribution incorporated within the Work constitutes direct
86 or contributory patent infringement, then any patent licenses
87 granted to You under this License for that Work shall terminate
88 as of the date such litigation is filed.
89
90 4. Redistribution. You may reproduce and distribute copies of the
91 Work or Derivative Works thereof in any medium, with or without
92 modifications, and in Source or Object form, provided that You
93 meet the following conditions:
94
95 (a) You must give any other recipients of the Work or
96 Derivative Works a copy of this License; and
97
98 (b) You must cause any modified files to carry prominent notices
99 stating that You changed the files; and
100
101 (c) You must retain, in the Source form of any Derivative Works
102 that You distribute, all copyright, patent, trademark, and
103 attribution notices from the Source form of the Work,
104 excluding those notices that do not pertain to any part of
105 the Derivative Works; and
106
107 (d) If the Work includes a "NOTICE" text file as part of its
108 distribution, then any Derivative Works that You distribute must
109 include a readable copy of the attribution notices contained
110 within such NOTICE file, excluding those notices that do not
111 pertain to any part of the Derivative Works, in at least one
112 of the following places: within a NOTICE text file distributed
113 as part of the Derivative Works; within the Source form or
114 documentation, if provided along with the Derivative Works; or,
115 within a display generated by the Derivative Works, if and
116 wherever such third-party notices normally appear. The contents
117 of the NOTICE file are for informational purposes only and
118 do not modify the License. You may add Your own attribution
119 notices within Derivative Works that You distribute, alongside
120 or as an addendum to the NOTICE text from the Work, provided
121 that such additional attribution notices cannot be construed
122 as modifying the License.
123
124 You may add Your own copyright statement to Your modifications and
125 may provide additional or different license terms and conditions
126 for use, reproduction, or distribution of Your modifications, or
127 for any such Derivative Works as a whole, provided Your use,
128 reproduction, and distribution of the Work otherwise complies with
129 the conditions stated in this License.
130
131 5. Submission of Contributions. Unless You explicitly state otherwise,
132 any Contribution intentionally submitted for inclusion in the Work
133 by You to the Licensor shall be under the terms and conditions of
134 this License, without any additional terms or conditions.
135 Notwithstanding the above, nothing herein shall supersede or modify
136 the terms of any separate license agreement you may have executed
137 with Licensor regarding such Contributions.
138
139 6. Trademarks. This License does not grant permission to use the trade
140 names, trademarks, service marks, or product names of the Licensor,
141 except as required for reasonable and customary use in describing the
142 origin of the Work and reproducing the content of the NOTICE file.
143
144 7. Disclaimer of Warranty. Unless required by applicable law or
145 agreed to in writing, Licensor provides the Work (and each
146 Contributor provides its Contributions) on an "AS IS" BASIS,
147 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
148 implied, including, without limitation, any warranties or conditions
149 of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
150 PARTICULAR PURPOSE. You are solely responsible for determining the
151 appropriateness of using or redistributing the Work and assume any
152 risks associated with Your exercise of permissions under this License.
153
154 8. Limitation of Liability. In no event and under no legal theory,
155 whether in tort (including negligence), contract, or otherwise,
156 unless required by applicable law (such as deliberate and grossly
157 negligent acts) or agreed to in writing, shall any Contributor be
158 liable to You for damages, including any direct, indirect, special,
159 incidental, or consequential damages of any character arising as a
160 result of this License or out of the use or inability to use the
161 Work (including but not limited to damages for loss of goodwill,
162 work stoppage, computer failure or malfunction, or any and all
163 other commercial damages or losses), even if such Contributor
164 has been advised of the possibility of such damages.
165
166 9. Accepting Warranty or Additional Liability. While redistributing
167 the Work or Derivative Works thereof, You may choose to offer,
168 and charge a fee for, acceptance of support, warranty, indemnity,
169 or other liability obligations and/or rights consistent with this
170 License. However, in accepting such obligations, You may act only
171 on Your own behalf and on Your sole responsibility, not on behalf
172 of any other Contributor, and only if You agree to indemnify,
173 defend, and hold each Contributor harmless for any liability
174 incurred by, or claims asserted against, such Contributor by reason
175 of your accepting any such warranty or additional liability.
176
177 END OF TERMS AND CONDITIONS
178
179 APPENDIX: How to apply the Apache License to your work.
180
181 To apply the Apache License to your work, attach the following
182 boilerplate notice, with the fields enclosed by brackets "[]"
183 replaced with your own identifying information. (Don't include
184 the brackets!) The text should be enclosed in the appropriate
185 comment syntax for the file format. We also recommend that a
186 file or class name and description of purpose be included on the
187 same "printed page" as the copyright notice for easier
188 identification within third-party archives.
189
190 Copyright [yyyy] [name of copyright owner]
191
192 Licensed under the Apache License, Version 2.0 (the "License");
193 you may not use this file except in compliance with the License.
194 You may obtain a copy of the License at
195
196 http://www.apache.org/licenses/LICENSE-2.0
197
198 Unless required by applicable law or agreed to in writing, software
199 distributed under the License is distributed on an "AS IS" BASIS,
200 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
201 See the License for the specific language governing permissions and
202 limitations under the License.
diff --git a/vendor/github.com/minio/minio-go/v7/MAINTAINERS.md b/vendor/github.com/minio/minio-go/v7/MAINTAINERS.md
new file mode 100644
index 0000000..f640dfb
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/MAINTAINERS.md
@@ -0,0 +1,35 @@
1# For maintainers only
2
3## Responsibilities
4
5Please go through this link [Maintainer Responsibility](https://gist.github.com/abperiasamy/f4d9b31d3186bbd26522)
6
7### Making new releases
8Tag and sign your release commit, additionally this step requires you to have access to MinIO's trusted private key.
9```sh
10$ export GNUPGHOME=/media/${USER}/minio/trusted
11$ git tag -s 4.0.0
12$ git push
13$ git push --tags
14```
15
16### Update version
17Once release has been made update `libraryVersion` constant in `api.go` to next to be released version.
18
19```sh
20$ grep libraryVersion api.go
21 libraryVersion = "4.0.1"
22```
23
24Commit your changes
25```
26$ git commit -a -m "Update version for next release" --author "MinIO Trusted <[email protected]>"
27```
28
29### Announce
30Announce new release by adding release notes at https://github.com/minio/minio-go/releases from `[email protected]` account. Release notes requires two sections `highlights` and `changelog`. Highlights is a bulleted list of salient features in this release and Changelog contains list of all commits since the last release.
31
32To generate `changelog`
33```sh
34$ git log --no-color --pretty=format:'-%d %s (%cr) <%an>' <last_release_tag>..<latest_release_tag>
35```
diff --git a/vendor/github.com/minio/minio-go/v7/Makefile b/vendor/github.com/minio/minio-go/v7/Makefile
new file mode 100644
index 0000000..68444aa
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/Makefile
@@ -0,0 +1,38 @@
1GOPATH := $(shell go env GOPATH)
2TMPDIR := $(shell mktemp -d)
3
4all: checks
5
6.PHONY: examples docs
7
8checks: lint vet test examples functional-test
9
10lint:
11 @mkdir -p ${GOPATH}/bin
12 @echo "Installing golangci-lint" && curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(GOPATH)/bin
13 @echo "Running $@ check"
14 @GO111MODULE=on ${GOPATH}/bin/golangci-lint cache clean
15 @GO111MODULE=on ${GOPATH}/bin/golangci-lint run --timeout=5m --config ./.golangci.yml
16
17vet:
18 @GO111MODULE=on go vet ./...
19 @echo "Installing staticcheck" && go install honnef.co/go/tools/cmd/staticcheck@latest
20 ${GOPATH}/bin/staticcheck -tests=false -checks="all,-ST1000,-ST1003,-ST1016,-ST1020,-ST1021,-ST1022,-ST1023,-ST1005"
21
22test:
23 @GO111MODULE=on SERVER_ENDPOINT=localhost:9000 ACCESS_KEY=minioadmin SECRET_KEY=minioadmin ENABLE_HTTPS=1 MINT_MODE=full go test -race -v ./...
24
25examples:
26 @echo "Building s3 examples"
27 @cd ./examples/s3 && $(foreach v,$(wildcard examples/s3/*.go),go build -mod=mod -o ${TMPDIR}/$(basename $(v)) $(notdir $(v)) || exit 1;)
28 @echo "Building minio examples"
29 @cd ./examples/minio && $(foreach v,$(wildcard examples/minio/*.go),go build -mod=mod -o ${TMPDIR}/$(basename $(v)) $(notdir $(v)) || exit 1;)
30
31functional-test:
32 @GO111MODULE=on go build -race functional_tests.go
33 @SERVER_ENDPOINT=localhost:9000 ACCESS_KEY=minioadmin SECRET_KEY=minioadmin ENABLE_HTTPS=1 MINT_MODE=full ./functional_tests
34
35clean:
36 @echo "Cleaning up all the generated files"
37 @find . -name '*.test' | xargs rm -fv
38 @find . -name '*~' | xargs rm -fv
diff --git a/vendor/github.com/minio/minio-go/v7/NOTICE b/vendor/github.com/minio/minio-go/v7/NOTICE
new file mode 100644
index 0000000..1e8fd3b
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/NOTICE
@@ -0,0 +1,9 @@
1MinIO Cloud Storage, (C) 2014-2020 MinIO, Inc.
2
3This product includes software developed at MinIO, Inc.
4(https://min.io/).
5
6The MinIO project contains unmodified/modified subcomponents too with
7separate copyright notices and license terms. Your use of the source
8code for these subcomponents is subject to the terms and conditions
9of Apache License Version 2.0
diff --git a/vendor/github.com/minio/minio-go/v7/README.md b/vendor/github.com/minio/minio-go/v7/README.md
new file mode 100644
index 0000000..82f70a1
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/README.md
@@ -0,0 +1,312 @@
1# MinIO Go Client SDK for Amazon S3 Compatible Cloud Storage [![Slack](https://slack.min.io/slack?type=svg)](https://slack.min.io) [![Sourcegraph](https://sourcegraph.com/github.com/minio/minio-go/-/badge.svg)](https://sourcegraph.com/github.com/minio/minio-go?badge) [![Apache V2 License](https://img.shields.io/badge/license-Apache%20V2-blue.svg)](https://github.com/minio/minio-go/blob/master/LICENSE)
2
3The MinIO Go Client SDK provides straightforward APIs to access any Amazon S3 compatible object storage.
4
5This Quickstart Guide covers how to install the MinIO client SDK, connect to MinIO, and create a sample file uploader.
6For a complete list of APIs and examples, see the [godoc documentation](https://pkg.go.dev/github.com/minio/minio-go/v7) or [Go Client API Reference](https://min.io/docs/minio/linux/developers/go/API.html).
7
8These examples presume a working [Go development environment](https://golang.org/doc/install) and the [MinIO `mc` command line tool](https://min.io/docs/minio/linux/reference/minio-mc.html).
9
10## Download from Github
11
12From your project directory:
13
14```sh
15go get github.com/minio/minio-go/v7
16```
17
18## Initialize a MinIO Client Object
19
20The MinIO client requires the following parameters to connect to an Amazon S3 compatible object storage:
21
22| Parameter | Description |
23| ----------------- | ---------------------------------------------------------- |
24| `endpoint` | URL to object storage service. |
25| `_minio.Options_` | All the options such as credentials, custom transport etc. |
26
27```go
28package main
29
30import (
31 "log"
32
33 "github.com/minio/minio-go/v7"
34 "github.com/minio/minio-go/v7/pkg/credentials"
35)
36
37func main() {
38 endpoint := "play.min.io"
39 accessKeyID := "Q3AM3UQ867SPQQA43P2F"
40 secretAccessKey := "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG"
41 useSSL := true
42
43 // Initialize minio client object.
44 minioClient, err := minio.New(endpoint, &minio.Options{
45 Creds: credentials.NewStaticV4(accessKeyID, secretAccessKey, ""),
46 Secure: useSSL,
47 })
48 if err != nil {
49 log.Fatalln(err)
50 }
51
52 log.Printf("%#v\n", minioClient) // minioClient is now set up
53}
54```
55
56## Example - File Uploader
57
58This sample code connects to an object storage server, creates a bucket, and uploads a file to the bucket.
59It uses the MinIO `play` server, a public MinIO cluster located at [https://play.min.io](https://play.min.io).
60
61The `play` server runs the latest stable version of MinIO and may be used for testing and development.
62The access credentials shown in this example are open to the public and all data uploaded to `play` should be considered public and non-protected.
63
64### FileUploader.go
65
66This example does the following:
67
68- Connects to the MinIO `play` server using the provided credentials.
69- Creates a bucket named `testbucket`.
70- Uploads a file named `testdata` from `/tmp`.
71- Verifies the file was created using `mc ls`.
72
73```go
74// FileUploader.go MinIO example
75package main
76
77import (
78 "context"
79 "log"
80
81 "github.com/minio/minio-go/v7"
82 "github.com/minio/minio-go/v7/pkg/credentials"
83)
84
85func main() {
86 ctx := context.Background()
87 endpoint := "play.min.io"
88 accessKeyID := "Q3AM3UQ867SPQQA43P2F"
89 secretAccessKey := "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG"
90 useSSL := true
91
92 // Initialize minio client object.
93 minioClient, err := minio.New(endpoint, &minio.Options{
94 Creds: credentials.NewStaticV4(accessKeyID, secretAccessKey, ""),
95 Secure: useSSL,
96 })
97 if err != nil {
98 log.Fatalln(err)
99 }
100
101 // Make a new bucket called testbucket.
102 bucketName := "testbucket"
103 location := "us-east-1"
104
105 err = minioClient.MakeBucket(ctx, bucketName, minio.MakeBucketOptions{Region: location})
106 if err != nil {
107 // Check to see if we already own this bucket (which happens if you run this twice)
108 exists, errBucketExists := minioClient.BucketExists(ctx, bucketName)
109 if errBucketExists == nil && exists {
110 log.Printf("We already own %s\n", bucketName)
111 } else {
112 log.Fatalln(err)
113 }
114 } else {
115 log.Printf("Successfully created %s\n", bucketName)
116 }
117
118 // Upload the test file
119 // Change the value of filePath if the file is in another location
120 objectName := "testdata"
121 filePath := "/tmp/testdata"
122 contentType := "application/octet-stream"
123
124 // Upload the test file with FPutObject
125 info, err := minioClient.FPutObject(ctx, bucketName, objectName, filePath, minio.PutObjectOptions{ContentType: contentType})
126 if err != nil {
127 log.Fatalln(err)
128 }
129
130 log.Printf("Successfully uploaded %s of size %d\n", objectName, info.Size)
131}
132```
133
134**1. Create a test file containing data:**
135
136You can do this with `dd` on Linux or macOS systems:
137
138```sh
139dd if=/dev/urandom of=/tmp/testdata bs=2048 count=10
140```
141
142or `fsutil` on Windows:
143
144```sh
145fsutil file createnew "C:\Users\<username>\Desktop\sample.txt" 20480
146```
147
148**2. Run FileUploader with the following commands:**
149
150```sh
151go mod init example/FileUploader
152go get github.com/minio/minio-go/v7
153go get github.com/minio/minio-go/v7/pkg/credentials
154go run FileUploader.go
155```
156
157The output resembles the following:
158
159```sh
1602023/11/01 14:27:55 Successfully created testbucket
1612023/11/01 14:27:55 Successfully uploaded testdata of size 20480
162```
163
164**3. Verify the Uploaded File With `mc ls`:**
165
166```sh
167mc ls play/testbucket
168[2023-11-01 14:27:55 UTC] 20KiB STANDARD TestDataFile
169```
170
171## API Reference
172
173The full API Reference is available here.
174
175* [Complete API Reference](https://min.io/docs/minio/linux/developers/go/API.html)
176
177### API Reference : Bucket Operations
178
179* [`MakeBucket`](https://min.io/docs/minio/linux/developers/go/API.html#MakeBucket)
180* [`ListBuckets`](https://min.io/docs/minio/linux/developers/go/API.html#ListBuckets)
181* [`BucketExists`](https://min.io/docs/minio/linux/developers/go/API.html#BucketExists)
182* [`RemoveBucket`](https://min.io/docs/minio/linux/developers/go/API.html#RemoveBucket)
183* [`ListObjects`](https://min.io/docs/minio/linux/developers/go/API.html#ListObjects)
184* [`ListIncompleteUploads`](https://min.io/docs/minio/linux/developers/go/API.html#ListIncompleteUploads)
185
186### API Reference : Bucket policy Operations
187
188* [`SetBucketPolicy`](https://min.io/docs/minio/linux/developers/go/API.html#SetBucketPolicy)
189* [`GetBucketPolicy`](https://min.io/docs/minio/linux/developers/go/API.html#GetBucketPolicy)
190
191### API Reference : Bucket notification Operations
192
193* [`SetBucketNotification`](https://min.io/docs/minio/linux/developers/go/API.html#SetBucketNotification)
194* [`GetBucketNotification`](https://min.io/docs/minio/linux/developers/go/API.html#GetBucketNotification)
195* [`RemoveAllBucketNotification`](https://min.io/docs/minio/linux/developers/go/API.html#RemoveAllBucketNotification)
196* [`ListenBucketNotification`](https://min.io/docs/minio/linux/developers/go/API.html#ListenBucketNotification) (MinIO Extension)
197* [`ListenNotification`](https://min.io/docs/minio/linux/developers/go/API.html#ListenNotification) (MinIO Extension)
198
199### API Reference : File Object Operations
200
201* [`FPutObject`](https://min.io/docs/minio/linux/developers/go/API.html#FPutObject)
202* [`FGetObject`](https://min.io/docs/minio/linux/developers/go/API.html#FGetObject)
203
204### API Reference : Object Operations
205
206* [`GetObject`](https://min.io/docs/minio/linux/developers/go/API.html#GetObject)
207* [`PutObject`](https://min.io/docs/minio/linux/developers/go/API.html#PutObject)
208* [`PutObjectStreaming`](https://min.io/docs/minio/linux/developers/go/API.html#PutObjectStreaming)
209* [`StatObject`](https://min.io/docs/minio/linux/developers/go/API.html#StatObject)
210* [`CopyObject`](https://min.io/docs/minio/linux/developers/go/API.html#CopyObject)
211* [`RemoveObject`](https://min.io/docs/minio/linux/developers/go/API.html#RemoveObject)
212* [`RemoveObjects`](https://min.io/docs/minio/linux/developers/go/API.html#RemoveObjects)
213* [`RemoveIncompleteUpload`](https://min.io/docs/minio/linux/developers/go/API.html#RemoveIncompleteUpload)
214* [`SelectObjectContent`](https://min.io/docs/minio/linux/developers/go/API.html#SelectObjectContent)
215
216### API Reference : Presigned Operations
217
218* [`PresignedGetObject`](https://min.io/docs/minio/linux/developers/go/API.html#PresignedGetObject)
219* [`PresignedPutObject`](https://min.io/docs/minio/linux/developers/go/API.html#PresignedPutObject)
220* [`PresignedHeadObject`](https://min.io/docs/minio/linux/developers/go/API.html#PresignedHeadObject)
221* [`PresignedPostPolicy`](https://min.io/docs/minio/linux/developers/go/API.html#PresignedPostPolicy)
222
223### API Reference : Client custom settings
224
225* [`SetAppInfo`](https://min.io/docs/minio/linux/developers/go/API.html#SetAppInfo)
226* [`TraceOn`](https://min.io/docs/minio/linux/developers/go/API.html#TraceOn)
227* [`TraceOff`](https://min.io/docs/minio/linux/developers/go/API.html#TraceOff)
228
229## Full Examples
230
231### Full Examples : Bucket Operations
232
233* [makebucket.go](https://github.com/minio/minio-go/blob/master/examples/s3/makebucket.go)
234* [listbuckets.go](https://github.com/minio/minio-go/blob/master/examples/s3/listbuckets.go)
235* [bucketexists.go](https://github.com/minio/minio-go/blob/master/examples/s3/bucketexists.go)
236* [removebucket.go](https://github.com/minio/minio-go/blob/master/examples/s3/removebucket.go)
237* [listobjects.go](https://github.com/minio/minio-go/blob/master/examples/s3/listobjects.go)
238* [listobjectsV2.go](https://github.com/minio/minio-go/blob/master/examples/s3/listobjectsV2.go)
239* [listincompleteuploads.go](https://github.com/minio/minio-go/blob/master/examples/s3/listincompleteuploads.go)
240
241### Full Examples : Bucket policy Operations
242
243* [setbucketpolicy.go](https://github.com/minio/minio-go/blob/master/examples/s3/setbucketpolicy.go)
244* [getbucketpolicy.go](https://github.com/minio/minio-go/blob/master/examples/s3/getbucketpolicy.go)
245* [listbucketpolicies.go](https://github.com/minio/minio-go/blob/master/examples/s3/listbucketpolicies.go)
246
247### Full Examples : Bucket lifecycle Operations
248
249* [setbucketlifecycle.go](https://github.com/minio/minio-go/blob/master/examples/s3/setbucketlifecycle.go)
250* [getbucketlifecycle.go](https://github.com/minio/minio-go/blob/master/examples/s3/getbucketlifecycle.go)
251
252### Full Examples : Bucket encryption Operations
253
254* [setbucketencryption.go](https://github.com/minio/minio-go/blob/master/examples/s3/setbucketencryption.go)
255* [getbucketencryption.go](https://github.com/minio/minio-go/blob/master/examples/s3/getbucketencryption.go)
256* [deletebucketencryption.go](https://github.com/minio/minio-go/blob/master/examples/s3/deletebucketencryption.go)
257
258### Full Examples : Bucket replication Operations
259
260* [setbucketreplication.go](https://github.com/minio/minio-go/blob/master/examples/s3/setbucketreplication.go)
261* [getbucketreplication.go](https://github.com/minio/minio-go/blob/master/examples/s3/getbucketreplication.go)
262* [removebucketreplication.go](https://github.com/minio/minio-go/blob/master/examples/s3/removebucketreplication.go)
263
264### Full Examples : Bucket notification Operations
265
266* [setbucketnotification.go](https://github.com/minio/minio-go/blob/master/examples/s3/setbucketnotification.go)
267* [getbucketnotification.go](https://github.com/minio/minio-go/blob/master/examples/s3/getbucketnotification.go)
268* [removeallbucketnotification.go](https://github.com/minio/minio-go/blob/master/examples/s3/removeallbucketnotification.go)
269* [listenbucketnotification.go](https://github.com/minio/minio-go/blob/master/examples/minio/listenbucketnotification.go) (MinIO Extension)
270* [listennotification.go](https://github.com/minio/minio-go/blob/master/examples/minio/listen-notification.go) (MinIO Extension)
271
272### Full Examples : File Object Operations
273
274* [fputobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/fputobject.go)
275* [fgetobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/fgetobject.go)
276
277### Full Examples : Object Operations
278
279* [putobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/putobject.go)
280* [getobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/getobject.go)
281* [statobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/statobject.go)
282* [copyobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/copyobject.go)
283* [removeobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/removeobject.go)
284* [removeincompleteupload.go](https://github.com/minio/minio-go/blob/master/examples/s3/removeincompleteupload.go)
285* [removeobjects.go](https://github.com/minio/minio-go/blob/master/examples/s3/removeobjects.go)
286
287### Full Examples : Encrypted Object Operations
288
289* [put-encrypted-object.go](https://github.com/minio/minio-go/blob/master/examples/s3/put-encrypted-object.go)
290* [get-encrypted-object.go](https://github.com/minio/minio-go/blob/master/examples/s3/get-encrypted-object.go)
291* [fput-encrypted-object.go](https://github.com/minio/minio-go/blob/master/examples/s3/fputencrypted-object.go)
292
293### Full Examples : Presigned Operations
294
295* [presignedgetobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/presignedgetobject.go)
296* [presignedputobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/presignedputobject.go)
297* [presignedheadobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/presignedheadobject.go)
298* [presignedpostpolicy.go](https://github.com/minio/minio-go/blob/master/examples/s3/presignedpostpolicy.go)
299
300## Explore Further
301
302* [Godoc Documentation](https://pkg.go.dev/github.com/minio/minio-go/v7)
303* [Complete Documentation](https://min.io/docs/minio/kubernetes/upstream/index.html)
304* [MinIO Go Client SDK API Reference](https://min.io/docs/minio/linux/developers/go/API.html)
305
306## Contribute
307
308[Contributors Guide](https://github.com/minio/minio-go/blob/master/CONTRIBUTING.md)
309
310## License
311
312This SDK is distributed under the [Apache License, Version 2.0](https://www.apache.org/licenses/LICENSE-2.0), see [LICENSE](https://github.com/minio/minio-go/blob/master/LICENSE) and [NOTICE](https://github.com/minio/minio-go/blob/master/NOTICE) for more information.
diff --git a/vendor/github.com/minio/minio-go/v7/api-bucket-encryption.go b/vendor/github.com/minio/minio-go/v7/api-bucket-encryption.go
new file mode 100644
index 0000000..24f94e0
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/api-bucket-encryption.go
@@ -0,0 +1,134 @@
1/*
2 * MinIO Go Library for Amazon S3 Compatible Cloud Storage
3 * Copyright 2020 MinIO, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17package minio
18
19import (
20 "bytes"
21 "context"
22 "encoding/xml"
23 "net/http"
24 "net/url"
25
26 "github.com/minio/minio-go/v7/pkg/s3utils"
27 "github.com/minio/minio-go/v7/pkg/sse"
28)
29
30// SetBucketEncryption sets the default encryption configuration on an existing bucket.
31func (c *Client) SetBucketEncryption(ctx context.Context, bucketName string, config *sse.Configuration) error {
32 // Input validation.
33 if err := s3utils.CheckValidBucketName(bucketName); err != nil {
34 return err
35 }
36
37 if config == nil {
38 return errInvalidArgument("configuration cannot be empty")
39 }
40
41 buf, err := xml.Marshal(config)
42 if err != nil {
43 return err
44 }
45
46 // Get resources properly escaped and lined up before
47 // using them in http request.
48 urlValues := make(url.Values)
49 urlValues.Set("encryption", "")
50
51 // Content-length is mandatory to set a default encryption configuration
52 reqMetadata := requestMetadata{
53 bucketName: bucketName,
54 queryValues: urlValues,
55 contentBody: bytes.NewReader(buf),
56 contentLength: int64(len(buf)),
57 contentMD5Base64: sumMD5Base64(buf),
58 }
59
60 // Execute PUT to upload a new bucket default encryption configuration.
61 resp, err := c.executeMethod(ctx, http.MethodPut, reqMetadata)
62 defer closeResponse(resp)
63 if err != nil {
64 return err
65 }
66 if resp.StatusCode != http.StatusOK {
67 return httpRespToErrorResponse(resp, bucketName, "")
68 }
69 return nil
70}
71
72// RemoveBucketEncryption removes the default encryption configuration on a bucket with a context to control cancellations and timeouts.
73func (c *Client) RemoveBucketEncryption(ctx context.Context, bucketName string) error {
74 // Input validation.
75 if err := s3utils.CheckValidBucketName(bucketName); err != nil {
76 return err
77 }
78
79 // Get resources properly escaped and lined up before
80 // using them in http request.
81 urlValues := make(url.Values)
82 urlValues.Set("encryption", "")
83
84 // DELETE default encryption configuration on a bucket.
85 resp, err := c.executeMethod(ctx, http.MethodDelete, requestMetadata{
86 bucketName: bucketName,
87 queryValues: urlValues,
88 contentSHA256Hex: emptySHA256Hex,
89 })
90 defer closeResponse(resp)
91 if err != nil {
92 return err
93 }
94 if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusNoContent {
95 return httpRespToErrorResponse(resp, bucketName, "")
96 }
97 return nil
98}
99
100// GetBucketEncryption gets the default encryption configuration
101// on an existing bucket with a context to control cancellations and timeouts.
102func (c *Client) GetBucketEncryption(ctx context.Context, bucketName string) (*sse.Configuration, error) {
103 // Input validation.
104 if err := s3utils.CheckValidBucketName(bucketName); err != nil {
105 return nil, err
106 }
107
108 // Get resources properly escaped and lined up before
109 // using them in http request.
110 urlValues := make(url.Values)
111 urlValues.Set("encryption", "")
112
113 // Execute GET on bucket to get the default encryption configuration.
114 resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{
115 bucketName: bucketName,
116 queryValues: urlValues,
117 })
118
119 defer closeResponse(resp)
120 if err != nil {
121 return nil, err
122 }
123
124 if resp.StatusCode != http.StatusOK {
125 return nil, httpRespToErrorResponse(resp, bucketName, "")
126 }
127
128 encryptionConfig := &sse.Configuration{}
129 if err = xmlDecoder(resp.Body, encryptionConfig); err != nil {
130 return nil, err
131 }
132
133 return encryptionConfig, nil
134}
diff --git a/vendor/github.com/minio/minio-go/v7/api-bucket-lifecycle.go b/vendor/github.com/minio/minio-go/v7/api-bucket-lifecycle.go
new file mode 100644
index 0000000..fec5cec
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/api-bucket-lifecycle.go
@@ -0,0 +1,169 @@
1/*
2 * MinIO Go Library for Amazon S3 Compatible Cloud Storage
3 * Copyright 2020 MinIO, Inc.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18package minio
19
20import (
21 "bytes"
22 "context"
23 "encoding/xml"
24 "io"
25 "net/http"
26 "net/url"
27 "time"
28
29 "github.com/minio/minio-go/v7/pkg/lifecycle"
30 "github.com/minio/minio-go/v7/pkg/s3utils"
31)
32
33// SetBucketLifecycle set the lifecycle on an existing bucket.
34func (c *Client) SetBucketLifecycle(ctx context.Context, bucketName string, config *lifecycle.Configuration) error {
35 // Input validation.
36 if err := s3utils.CheckValidBucketName(bucketName); err != nil {
37 return err
38 }
39
40 // If lifecycle is empty then delete it.
41 if config.Empty() {
42 return c.removeBucketLifecycle(ctx, bucketName)
43 }
44
45 buf, err := xml.Marshal(config)
46 if err != nil {
47 return err
48 }
49
50 // Save the updated lifecycle.
51 return c.putBucketLifecycle(ctx, bucketName, buf)
52}
53
54// Saves a new bucket lifecycle.
55func (c *Client) putBucketLifecycle(ctx context.Context, bucketName string, buf []byte) error {
56 // Get resources properly escaped and lined up before
57 // using them in http request.
58 urlValues := make(url.Values)
59 urlValues.Set("lifecycle", "")
60
61 // Content-length is mandatory for put lifecycle request
62 reqMetadata := requestMetadata{
63 bucketName: bucketName,
64 queryValues: urlValues,
65 contentBody: bytes.NewReader(buf),
66 contentLength: int64(len(buf)),
67 contentMD5Base64: sumMD5Base64(buf),
68 }
69
70 // Execute PUT to upload a new bucket lifecycle.
71 resp, err := c.executeMethod(ctx, http.MethodPut, reqMetadata)
72 defer closeResponse(resp)
73 if err != nil {
74 return err
75 }
76 if resp != nil {
77 if resp.StatusCode != http.StatusOK {
78 return httpRespToErrorResponse(resp, bucketName, "")
79 }
80 }
81 return nil
82}
83
84// Remove lifecycle from a bucket.
85func (c *Client) removeBucketLifecycle(ctx context.Context, bucketName string) error {
86 // Get resources properly escaped and lined up before
87 // using them in http request.
88 urlValues := make(url.Values)
89 urlValues.Set("lifecycle", "")
90
91 // Execute DELETE on objectName.
92 resp, err := c.executeMethod(ctx, http.MethodDelete, requestMetadata{
93 bucketName: bucketName,
94 queryValues: urlValues,
95 contentSHA256Hex: emptySHA256Hex,
96 })
97 defer closeResponse(resp)
98 if err != nil {
99 return err
100 }
101 return nil
102}
103
104// GetBucketLifecycle fetch bucket lifecycle configuration
105func (c *Client) GetBucketLifecycle(ctx context.Context, bucketName string) (*lifecycle.Configuration, error) {
106 lc, _, err := c.GetBucketLifecycleWithInfo(ctx, bucketName)
107 return lc, err
108}
109
110// GetBucketLifecycleWithInfo fetch bucket lifecycle configuration along with when it was last updated
111func (c *Client) GetBucketLifecycleWithInfo(ctx context.Context, bucketName string) (*lifecycle.Configuration, time.Time, error) {
112 // Input validation.
113 if err := s3utils.CheckValidBucketName(bucketName); err != nil {
114 return nil, time.Time{}, err
115 }
116
117 bucketLifecycle, updatedAt, err := c.getBucketLifecycle(ctx, bucketName)
118 if err != nil {
119 return nil, time.Time{}, err
120 }
121
122 config := lifecycle.NewConfiguration()
123 if err = xml.Unmarshal(bucketLifecycle, config); err != nil {
124 return nil, time.Time{}, err
125 }
126 return config, updatedAt, nil
127}
128
129// Request server for current bucket lifecycle.
130func (c *Client) getBucketLifecycle(ctx context.Context, bucketName string) ([]byte, time.Time, error) {
131 // Get resources properly escaped and lined up before
132 // using them in http request.
133 urlValues := make(url.Values)
134 urlValues.Set("lifecycle", "")
135 urlValues.Set("withUpdatedAt", "true")
136
137 // Execute GET on bucket to get lifecycle.
138 resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{
139 bucketName: bucketName,
140 queryValues: urlValues,
141 })
142
143 defer closeResponse(resp)
144 if err != nil {
145 return nil, time.Time{}, err
146 }
147
148 if resp != nil {
149 if resp.StatusCode != http.StatusOK {
150 return nil, time.Time{}, httpRespToErrorResponse(resp, bucketName, "")
151 }
152 }
153
154 lcBytes, err := io.ReadAll(resp.Body)
155 if err != nil {
156 return nil, time.Time{}, err
157 }
158
159 const minIOLifecycleCfgUpdatedAt = "X-Minio-LifecycleConfig-UpdatedAt"
160 var updatedAt time.Time
161 if timeStr := resp.Header.Get(minIOLifecycleCfgUpdatedAt); timeStr != "" {
162 updatedAt, err = time.Parse(iso8601DateFormat, timeStr)
163 if err != nil {
164 return nil, time.Time{}, err
165 }
166 }
167
168 return lcBytes, updatedAt, nil
169}
diff --git a/vendor/github.com/minio/minio-go/v7/api-bucket-notification.go b/vendor/github.com/minio/minio-go/v7/api-bucket-notification.go
new file mode 100644
index 0000000..8de5c01
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/api-bucket-notification.go
@@ -0,0 +1,261 @@
1/*
2 * MinIO Go Library for Amazon S3 Compatible Cloud Storage
3 * Copyright 2017-2020 MinIO, Inc.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18package minio
19
20import (
21 "bufio"
22 "bytes"
23 "context"
24 "encoding/xml"
25 "net/http"
26 "net/url"
27 "time"
28
29 jsoniter "github.com/json-iterator/go"
30 "github.com/minio/minio-go/v7/pkg/notification"
31 "github.com/minio/minio-go/v7/pkg/s3utils"
32)
33
34// SetBucketNotification saves a new bucket notification with a context to control cancellations and timeouts.
35func (c *Client) SetBucketNotification(ctx context.Context, bucketName string, config notification.Configuration) error {
36 // Input validation.
37 if err := s3utils.CheckValidBucketName(bucketName); err != nil {
38 return err
39 }
40
41 // Get resources properly escaped and lined up before
42 // using them in http request.
43 urlValues := make(url.Values)
44 urlValues.Set("notification", "")
45
46 notifBytes, err := xml.Marshal(&config)
47 if err != nil {
48 return err
49 }
50
51 notifBuffer := bytes.NewReader(notifBytes)
52 reqMetadata := requestMetadata{
53 bucketName: bucketName,
54 queryValues: urlValues,
55 contentBody: notifBuffer,
56 contentLength: int64(len(notifBytes)),
57 contentMD5Base64: sumMD5Base64(notifBytes),
58 contentSHA256Hex: sum256Hex(notifBytes),
59 }
60
61 // Execute PUT to upload a new bucket notification.
62 resp, err := c.executeMethod(ctx, http.MethodPut, reqMetadata)
63 defer closeResponse(resp)
64 if err != nil {
65 return err
66 }
67 if resp != nil {
68 if resp.StatusCode != http.StatusOK {
69 return httpRespToErrorResponse(resp, bucketName, "")
70 }
71 }
72 return nil
73}
74
75// RemoveAllBucketNotification - Remove bucket notification clears all previously specified config
76func (c *Client) RemoveAllBucketNotification(ctx context.Context, bucketName string) error {
77 return c.SetBucketNotification(ctx, bucketName, notification.Configuration{})
78}
79
80// GetBucketNotification returns current bucket notification configuration
81func (c *Client) GetBucketNotification(ctx context.Context, bucketName string) (bucketNotification notification.Configuration, err error) {
82 // Input validation.
83 if err := s3utils.CheckValidBucketName(bucketName); err != nil {
84 return notification.Configuration{}, err
85 }
86 return c.getBucketNotification(ctx, bucketName)
87}
88
89// Request server for notification rules.
90func (c *Client) getBucketNotification(ctx context.Context, bucketName string) (notification.Configuration, error) {
91 urlValues := make(url.Values)
92 urlValues.Set("notification", "")
93
94 // Execute GET on bucket to list objects.
95 resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{
96 bucketName: bucketName,
97 queryValues: urlValues,
98 contentSHA256Hex: emptySHA256Hex,
99 })
100
101 defer closeResponse(resp)
102 if err != nil {
103 return notification.Configuration{}, err
104 }
105 return processBucketNotificationResponse(bucketName, resp)
106}
107
108// processes the GetNotification http response from the server.
109func processBucketNotificationResponse(bucketName string, resp *http.Response) (notification.Configuration, error) {
110 if resp.StatusCode != http.StatusOK {
111 errResponse := httpRespToErrorResponse(resp, bucketName, "")
112 return notification.Configuration{}, errResponse
113 }
114 var bucketNotification notification.Configuration
115 err := xmlDecoder(resp.Body, &bucketNotification)
116 if err != nil {
117 return notification.Configuration{}, err
118 }
119 return bucketNotification, nil
120}
121
122// ListenNotification listen for all events, this is a MinIO specific API
123func (c *Client) ListenNotification(ctx context.Context, prefix, suffix string, events []string) <-chan notification.Info {
124 return c.ListenBucketNotification(ctx, "", prefix, suffix, events)
125}
126
127// ListenBucketNotification listen for bucket events, this is a MinIO specific API
128func (c *Client) ListenBucketNotification(ctx context.Context, bucketName, prefix, suffix string, events []string) <-chan notification.Info {
129 notificationInfoCh := make(chan notification.Info, 1)
130 const notificationCapacity = 4 * 1024 * 1024
131 notificationEventBuffer := make([]byte, notificationCapacity)
132 // Only success, start a routine to start reading line by line.
133 go func(notificationInfoCh chan<- notification.Info) {
134 defer close(notificationInfoCh)
135
136 // Validate the bucket name.
137 if bucketName != "" {
138 if err := s3utils.CheckValidBucketName(bucketName); err != nil {
139 select {
140 case notificationInfoCh <- notification.Info{
141 Err: err,
142 }:
143 case <-ctx.Done():
144 }
145 return
146 }
147 }
148
149 // Check ARN partition to verify if listening bucket is supported
150 if s3utils.IsAmazonEndpoint(*c.endpointURL) || s3utils.IsGoogleEndpoint(*c.endpointURL) {
151 select {
152 case notificationInfoCh <- notification.Info{
153 Err: errAPINotSupported("Listening for bucket notification is specific only to `minio` server endpoints"),
154 }:
155 case <-ctx.Done():
156 }
157 return
158 }
159
160 // Continuously run and listen on bucket notification.
161 // Create a done channel to control 'ListObjects' go routine.
162 retryDoneCh := make(chan struct{}, 1)
163
164 // Indicate to our routine to exit cleanly upon return.
165 defer close(retryDoneCh)
166
167 // Prepare urlValues to pass into the request on every loop
168 urlValues := make(url.Values)
169 urlValues.Set("ping", "10")
170 urlValues.Set("prefix", prefix)
171 urlValues.Set("suffix", suffix)
172 urlValues["events"] = events
173
174 // Wait on the jitter retry loop.
175 for range c.newRetryTimerContinous(time.Second, time.Second*30, MaxJitter, retryDoneCh) {
176 // Execute GET on bucket to list objects.
177 resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{
178 bucketName: bucketName,
179 queryValues: urlValues,
180 contentSHA256Hex: emptySHA256Hex,
181 })
182 if err != nil {
183 select {
184 case notificationInfoCh <- notification.Info{
185 Err: err,
186 }:
187 case <-ctx.Done():
188 }
189 return
190 }
191
192 // Validate http response, upon error return quickly.
193 if resp.StatusCode != http.StatusOK {
194 errResponse := httpRespToErrorResponse(resp, bucketName, "")
195 select {
196 case notificationInfoCh <- notification.Info{
197 Err: errResponse,
198 }:
199 case <-ctx.Done():
200 }
201 return
202 }
203
204 // Initialize a new bufio scanner, to read line by line.
205 bio := bufio.NewScanner(resp.Body)
206
207 // Use a higher buffer to support unexpected
208 // caching done by proxies
209 bio.Buffer(notificationEventBuffer, notificationCapacity)
210 json := jsoniter.ConfigCompatibleWithStandardLibrary
211
212 // Unmarshal each line, returns marshaled values.
213 for bio.Scan() {
214 var notificationInfo notification.Info
215 if err = json.Unmarshal(bio.Bytes(), &notificationInfo); err != nil {
216 // Unexpected error during json unmarshal, send
217 // the error to caller for actionable as needed.
218 select {
219 case notificationInfoCh <- notification.Info{
220 Err: err,
221 }:
222 case <-ctx.Done():
223 return
224 }
225 closeResponse(resp)
226 continue
227 }
228
229 // Empty events pinged from the server
230 if len(notificationInfo.Records) == 0 && notificationInfo.Err == nil {
231 continue
232 }
233
234 // Send notificationInfo
235 select {
236 case notificationInfoCh <- notificationInfo:
237 case <-ctx.Done():
238 closeResponse(resp)
239 return
240 }
241 }
242
243 if err = bio.Err(); err != nil {
244 select {
245 case notificationInfoCh <- notification.Info{
246 Err: err,
247 }:
248 case <-ctx.Done():
249 return
250 }
251 }
252
253 // Close current connection before looping further.
254 closeResponse(resp)
255
256 }
257 }(notificationInfoCh)
258
259 // Returns the notification info channel, for caller to start reading from.
260 return notificationInfoCh
261}
diff --git a/vendor/github.com/minio/minio-go/v7/api-bucket-policy.go b/vendor/github.com/minio/minio-go/v7/api-bucket-policy.go
new file mode 100644
index 0000000..dbb5259
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/api-bucket-policy.go
@@ -0,0 +1,147 @@
1/*
2 * MinIO Go Library for Amazon S3 Compatible Cloud Storage
3 * Copyright 2020 MinIO, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17package minio
18
19import (
20 "context"
21 "io"
22 "net/http"
23 "net/url"
24 "strings"
25
26 "github.com/minio/minio-go/v7/pkg/s3utils"
27)
28
29// SetBucketPolicy sets the access permissions on an existing bucket.
30func (c *Client) SetBucketPolicy(ctx context.Context, bucketName, policy string) error {
31 // Input validation.
32 if err := s3utils.CheckValidBucketName(bucketName); err != nil {
33 return err
34 }
35
36 // If policy is empty then delete the bucket policy.
37 if policy == "" {
38 return c.removeBucketPolicy(ctx, bucketName)
39 }
40
41 // Save the updated policies.
42 return c.putBucketPolicy(ctx, bucketName, policy)
43}
44
45// Saves a new bucket policy.
46func (c *Client) putBucketPolicy(ctx context.Context, bucketName, policy string) error {
47 // Get resources properly escaped and lined up before
48 // using them in http request.
49 urlValues := make(url.Values)
50 urlValues.Set("policy", "")
51
52 reqMetadata := requestMetadata{
53 bucketName: bucketName,
54 queryValues: urlValues,
55 contentBody: strings.NewReader(policy),
56 contentLength: int64(len(policy)),
57 }
58
59 // Execute PUT to upload a new bucket policy.
60 resp, err := c.executeMethod(ctx, http.MethodPut, reqMetadata)
61 defer closeResponse(resp)
62 if err != nil {
63 return err
64 }
65 if resp != nil {
66 if resp.StatusCode != http.StatusNoContent && resp.StatusCode != http.StatusOK {
67 return httpRespToErrorResponse(resp, bucketName, "")
68 }
69 }
70 return nil
71}
72
73// Removes all policies on a bucket.
74func (c *Client) removeBucketPolicy(ctx context.Context, bucketName string) error {
75 // Get resources properly escaped and lined up before
76 // using them in http request.
77 urlValues := make(url.Values)
78 urlValues.Set("policy", "")
79
80 // Execute DELETE on objectName.
81 resp, err := c.executeMethod(ctx, http.MethodDelete, requestMetadata{
82 bucketName: bucketName,
83 queryValues: urlValues,
84 contentSHA256Hex: emptySHA256Hex,
85 })
86 defer closeResponse(resp)
87 if err != nil {
88 return err
89 }
90
91 if resp.StatusCode != http.StatusNoContent {
92 return httpRespToErrorResponse(resp, bucketName, "")
93 }
94
95 return nil
96}
97
98// GetBucketPolicy returns the current policy
99func (c *Client) GetBucketPolicy(ctx context.Context, bucketName string) (string, error) {
100 // Input validation.
101 if err := s3utils.CheckValidBucketName(bucketName); err != nil {
102 return "", err
103 }
104 bucketPolicy, err := c.getBucketPolicy(ctx, bucketName)
105 if err != nil {
106 errResponse := ToErrorResponse(err)
107 if errResponse.Code == "NoSuchBucketPolicy" {
108 return "", nil
109 }
110 return "", err
111 }
112 return bucketPolicy, nil
113}
114
115// Request server for current bucket policy.
116func (c *Client) getBucketPolicy(ctx context.Context, bucketName string) (string, error) {
117 // Get resources properly escaped and lined up before
118 // using them in http request.
119 urlValues := make(url.Values)
120 urlValues.Set("policy", "")
121
122 // Execute GET on bucket to list objects.
123 resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{
124 bucketName: bucketName,
125 queryValues: urlValues,
126 contentSHA256Hex: emptySHA256Hex,
127 })
128
129 defer closeResponse(resp)
130 if err != nil {
131 return "", err
132 }
133
134 if resp != nil {
135 if resp.StatusCode != http.StatusOK {
136 return "", httpRespToErrorResponse(resp, bucketName, "")
137 }
138 }
139
140 bucketPolicyBuf, err := io.ReadAll(resp.Body)
141 if err != nil {
142 return "", err
143 }
144
145 policy := string(bucketPolicyBuf)
146 return policy, err
147}
diff --git a/vendor/github.com/minio/minio-go/v7/api-bucket-replication.go b/vendor/github.com/minio/minio-go/v7/api-bucket-replication.go
new file mode 100644
index 0000000..b12bb13
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/api-bucket-replication.go
@@ -0,0 +1,355 @@
1/*
2 * MinIO Go Library for Amazon S3 Compatible Cloud Storage
3 * Copyright 2020 MinIO, Inc.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18package minio
19
20import (
21 "bytes"
22 "context"
23 "encoding/json"
24 "encoding/xml"
25 "io"
26 "net/http"
27 "net/url"
28 "time"
29
30 "github.com/google/uuid"
31 "github.com/minio/minio-go/v7/pkg/replication"
32 "github.com/minio/minio-go/v7/pkg/s3utils"
33)
34
35// RemoveBucketReplication removes a replication config on an existing bucket.
36func (c *Client) RemoveBucketReplication(ctx context.Context, bucketName string) error {
37 return c.removeBucketReplication(ctx, bucketName)
38}
39
40// SetBucketReplication sets a replication config on an existing bucket.
41func (c *Client) SetBucketReplication(ctx context.Context, bucketName string, cfg replication.Config) error {
42 // Input validation.
43 if err := s3utils.CheckValidBucketName(bucketName); err != nil {
44 return err
45 }
46
47 // If replication is empty then delete it.
48 if cfg.Empty() {
49 return c.removeBucketReplication(ctx, bucketName)
50 }
51 // Save the updated replication.
52 return c.putBucketReplication(ctx, bucketName, cfg)
53}
54
55// Saves a new bucket replication.
56func (c *Client) putBucketReplication(ctx context.Context, bucketName string, cfg replication.Config) error {
57 // Get resources properly escaped and lined up before
58 // using them in http request.
59 urlValues := make(url.Values)
60 urlValues.Set("replication", "")
61 replication, err := xml.Marshal(cfg)
62 if err != nil {
63 return err
64 }
65
66 reqMetadata := requestMetadata{
67 bucketName: bucketName,
68 queryValues: urlValues,
69 contentBody: bytes.NewReader(replication),
70 contentLength: int64(len(replication)),
71 contentMD5Base64: sumMD5Base64(replication),
72 }
73
74 // Execute PUT to upload a new bucket replication config.
75 resp, err := c.executeMethod(ctx, http.MethodPut, reqMetadata)
76 defer closeResponse(resp)
77 if err != nil {
78 return err
79 }
80
81 if resp.StatusCode != http.StatusOK {
82 return httpRespToErrorResponse(resp, bucketName, "")
83 }
84
85 return nil
86}
87
88// Remove replication from a bucket.
89func (c *Client) removeBucketReplication(ctx context.Context, bucketName string) error {
90 // Get resources properly escaped and lined up before
91 // using them in http request.
92 urlValues := make(url.Values)
93 urlValues.Set("replication", "")
94
95 // Execute DELETE on objectName.
96 resp, err := c.executeMethod(ctx, http.MethodDelete, requestMetadata{
97 bucketName: bucketName,
98 queryValues: urlValues,
99 contentSHA256Hex: emptySHA256Hex,
100 })
101 defer closeResponse(resp)
102 if err != nil {
103 return err
104 }
105 if resp.StatusCode != http.StatusOK {
106 return httpRespToErrorResponse(resp, bucketName, "")
107 }
108 return nil
109}
110
111// GetBucketReplication fetches bucket replication configuration.If config is not
112// found, returns empty config with nil error.
113func (c *Client) GetBucketReplication(ctx context.Context, bucketName string) (cfg replication.Config, err error) {
114 // Input validation.
115 if err := s3utils.CheckValidBucketName(bucketName); err != nil {
116 return cfg, err
117 }
118 bucketReplicationCfg, err := c.getBucketReplication(ctx, bucketName)
119 if err != nil {
120 errResponse := ToErrorResponse(err)
121 if errResponse.Code == "ReplicationConfigurationNotFoundError" {
122 return cfg, nil
123 }
124 return cfg, err
125 }
126 return bucketReplicationCfg, nil
127}
128
129// Request server for current bucket replication config.
130func (c *Client) getBucketReplication(ctx context.Context, bucketName string) (cfg replication.Config, err error) {
131 // Get resources properly escaped and lined up before
132 // using them in http request.
133 urlValues := make(url.Values)
134 urlValues.Set("replication", "")
135
136 // Execute GET on bucket to get replication config.
137 resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{
138 bucketName: bucketName,
139 queryValues: urlValues,
140 })
141
142 defer closeResponse(resp)
143 if err != nil {
144 return cfg, err
145 }
146
147 if resp.StatusCode != http.StatusOK {
148 return cfg, httpRespToErrorResponse(resp, bucketName, "")
149 }
150
151 if err = xmlDecoder(resp.Body, &cfg); err != nil {
152 return cfg, err
153 }
154
155 return cfg, nil
156}
157
158// GetBucketReplicationMetrics fetches bucket replication status metrics
159func (c *Client) GetBucketReplicationMetrics(ctx context.Context, bucketName string) (s replication.Metrics, err error) {
160 // Input validation.
161 if err := s3utils.CheckValidBucketName(bucketName); err != nil {
162 return s, err
163 }
164 // Get resources properly escaped and lined up before
165 // using them in http request.
166 urlValues := make(url.Values)
167 urlValues.Set("replication-metrics", "")
168
169 // Execute GET on bucket to get replication config.
170 resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{
171 bucketName: bucketName,
172 queryValues: urlValues,
173 })
174
175 defer closeResponse(resp)
176 if err != nil {
177 return s, err
178 }
179
180 if resp.StatusCode != http.StatusOK {
181 return s, httpRespToErrorResponse(resp, bucketName, "")
182 }
183 respBytes, err := io.ReadAll(resp.Body)
184 if err != nil {
185 return s, err
186 }
187
188 if err := json.Unmarshal(respBytes, &s); err != nil {
189 return s, err
190 }
191 return s, nil
192}
193
194// mustGetUUID - get a random UUID.
195func mustGetUUID() string {
196 u, err := uuid.NewRandom()
197 if err != nil {
198 return ""
199 }
200 return u.String()
201}
202
203// ResetBucketReplication kicks off replication of previously replicated objects if ExistingObjectReplication
204// is enabled in the replication config
205func (c *Client) ResetBucketReplication(ctx context.Context, bucketName string, olderThan time.Duration) (rID string, err error) {
206 rID = mustGetUUID()
207 _, err = c.resetBucketReplicationOnTarget(ctx, bucketName, olderThan, "", rID)
208 if err != nil {
209 return rID, err
210 }
211 return rID, nil
212}
213
214// ResetBucketReplicationOnTarget kicks off replication of previously replicated objects if
215// ExistingObjectReplication is enabled in the replication config
216func (c *Client) ResetBucketReplicationOnTarget(ctx context.Context, bucketName string, olderThan time.Duration, tgtArn string) (replication.ResyncTargetsInfo, error) {
217 return c.resetBucketReplicationOnTarget(ctx, bucketName, olderThan, tgtArn, mustGetUUID())
218}
219
220// ResetBucketReplication kicks off replication of previously replicated objects if ExistingObjectReplication
221// is enabled in the replication config
222func (c *Client) resetBucketReplicationOnTarget(ctx context.Context, bucketName string, olderThan time.Duration, tgtArn, resetID string) (rinfo replication.ResyncTargetsInfo, err error) {
223 // Input validation.
224 if err = s3utils.CheckValidBucketName(bucketName); err != nil {
225 return
226 }
227 // Get resources properly escaped and lined up before
228 // using them in http request.
229 urlValues := make(url.Values)
230 urlValues.Set("replication-reset", "")
231 if olderThan > 0 {
232 urlValues.Set("older-than", olderThan.String())
233 }
234 if tgtArn != "" {
235 urlValues.Set("arn", tgtArn)
236 }
237 urlValues.Set("reset-id", resetID)
238 // Execute GET on bucket to get replication config.
239 resp, err := c.executeMethod(ctx, http.MethodPut, requestMetadata{
240 bucketName: bucketName,
241 queryValues: urlValues,
242 })
243
244 defer closeResponse(resp)
245 if err != nil {
246 return rinfo, err
247 }
248
249 if resp.StatusCode != http.StatusOK {
250 return rinfo, httpRespToErrorResponse(resp, bucketName, "")
251 }
252
253 if err = json.NewDecoder(resp.Body).Decode(&rinfo); err != nil {
254 return rinfo, err
255 }
256 return rinfo, nil
257}
258
259// GetBucketReplicationResyncStatus gets the status of replication resync
260func (c *Client) GetBucketReplicationResyncStatus(ctx context.Context, bucketName, arn string) (rinfo replication.ResyncTargetsInfo, err error) {
261 // Input validation.
262 if err := s3utils.CheckValidBucketName(bucketName); err != nil {
263 return rinfo, err
264 }
265 // Get resources properly escaped and lined up before
266 // using them in http request.
267 urlValues := make(url.Values)
268 urlValues.Set("replication-reset-status", "")
269 if arn != "" {
270 urlValues.Set("arn", arn)
271 }
272 // Execute GET on bucket to get replication config.
273 resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{
274 bucketName: bucketName,
275 queryValues: urlValues,
276 })
277
278 defer closeResponse(resp)
279 if err != nil {
280 return rinfo, err
281 }
282
283 if resp.StatusCode != http.StatusOK {
284 return rinfo, httpRespToErrorResponse(resp, bucketName, "")
285 }
286
287 if err = json.NewDecoder(resp.Body).Decode(&rinfo); err != nil {
288 return rinfo, err
289 }
290 return rinfo, nil
291}
292
293// GetBucketReplicationMetricsV2 fetches bucket replication status metrics
294func (c *Client) GetBucketReplicationMetricsV2(ctx context.Context, bucketName string) (s replication.MetricsV2, err error) {
295 // Input validation.
296 if err := s3utils.CheckValidBucketName(bucketName); err != nil {
297 return s, err
298 }
299 // Get resources properly escaped and lined up before
300 // using them in http request.
301 urlValues := make(url.Values)
302 urlValues.Set("replication-metrics", "2")
303
304 // Execute GET on bucket to get replication metrics.
305 resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{
306 bucketName: bucketName,
307 queryValues: urlValues,
308 })
309
310 defer closeResponse(resp)
311 if err != nil {
312 return s, err
313 }
314
315 if resp.StatusCode != http.StatusOK {
316 return s, httpRespToErrorResponse(resp, bucketName, "")
317 }
318 respBytes, err := io.ReadAll(resp.Body)
319 if err != nil {
320 return s, err
321 }
322
323 if err := json.Unmarshal(respBytes, &s); err != nil {
324 return s, err
325 }
326 return s, nil
327}
328
329// CheckBucketReplication validates if replication is set up properly for a bucket
330func (c *Client) CheckBucketReplication(ctx context.Context, bucketName string) (err error) {
331 // Input validation.
332 if err := s3utils.CheckValidBucketName(bucketName); err != nil {
333 return err
334 }
335 // Get resources properly escaped and lined up before
336 // using them in http request.
337 urlValues := make(url.Values)
338 urlValues.Set("replication-check", "")
339
340 // Execute GET on bucket to get replication config.
341 resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{
342 bucketName: bucketName,
343 queryValues: urlValues,
344 })
345
346 defer closeResponse(resp)
347 if err != nil {
348 return err
349 }
350
351 if resp.StatusCode != http.StatusOK {
352 return httpRespToErrorResponse(resp, bucketName, "")
353 }
354 return nil
355}
diff --git a/vendor/github.com/minio/minio-go/v7/api-bucket-tagging.go b/vendor/github.com/minio/minio-go/v7/api-bucket-tagging.go
new file mode 100644
index 0000000..86d7429
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/api-bucket-tagging.go
@@ -0,0 +1,134 @@
1/*
2 * MinIO Go Library for Amazon S3 Compatible Cloud Storage
3 * Copyright 2020 MinIO, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17package minio
18
19import (
20 "bytes"
21 "context"
22 "encoding/xml"
23 "errors"
24 "io"
25 "net/http"
26 "net/url"
27
28 "github.com/minio/minio-go/v7/pkg/s3utils"
29 "github.com/minio/minio-go/v7/pkg/tags"
30)
31
32// GetBucketTagging fetch tagging configuration for a bucket with a
33// context to control cancellations and timeouts.
34func (c *Client) GetBucketTagging(ctx context.Context, bucketName string) (*tags.Tags, error) {
35 // Input validation.
36 if err := s3utils.CheckValidBucketName(bucketName); err != nil {
37 return nil, err
38 }
39
40 // Get resources properly escaped and lined up before
41 // using them in http request.
42 urlValues := make(url.Values)
43 urlValues.Set("tagging", "")
44
45 // Execute GET on bucket to get tagging configuration.
46 resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{
47 bucketName: bucketName,
48 queryValues: urlValues,
49 })
50
51 defer closeResponse(resp)
52 if err != nil {
53 return nil, err
54 }
55
56 if resp.StatusCode != http.StatusOK {
57 return nil, httpRespToErrorResponse(resp, bucketName, "")
58 }
59
60 defer io.Copy(io.Discard, resp.Body)
61 return tags.ParseBucketXML(resp.Body)
62}
63
64// SetBucketTagging sets tagging configuration for a bucket
65// with a context to control cancellations and timeouts.
66func (c *Client) SetBucketTagging(ctx context.Context, bucketName string, tags *tags.Tags) error {
67 // Input validation.
68 if err := s3utils.CheckValidBucketName(bucketName); err != nil {
69 return err
70 }
71
72 if tags == nil {
73 return errors.New("nil tags passed")
74 }
75
76 buf, err := xml.Marshal(tags)
77 if err != nil {
78 return err
79 }
80
81 // Get resources properly escaped and lined up before
82 // using them in http request.
83 urlValues := make(url.Values)
84 urlValues.Set("tagging", "")
85
86 // Content-length is mandatory to set a default encryption configuration
87 reqMetadata := requestMetadata{
88 bucketName: bucketName,
89 queryValues: urlValues,
90 contentBody: bytes.NewReader(buf),
91 contentLength: int64(len(buf)),
92 contentMD5Base64: sumMD5Base64(buf),
93 }
94
95 // Execute PUT on bucket to put tagging configuration.
96 resp, err := c.executeMethod(ctx, http.MethodPut, reqMetadata)
97 defer closeResponse(resp)
98 if err != nil {
99 return err
100 }
101 if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusNoContent {
102 return httpRespToErrorResponse(resp, bucketName, "")
103 }
104 return nil
105}
106
107// RemoveBucketTagging removes tagging configuration for a
108// bucket with a context to control cancellations and timeouts.
109func (c *Client) RemoveBucketTagging(ctx context.Context, bucketName string) error {
110 // Input validation.
111 if err := s3utils.CheckValidBucketName(bucketName); err != nil {
112 return err
113 }
114
115 // Get resources properly escaped and lined up before
116 // using them in http request.
117 urlValues := make(url.Values)
118 urlValues.Set("tagging", "")
119
120 // Execute DELETE on bucket to remove tagging configuration.
121 resp, err := c.executeMethod(ctx, http.MethodDelete, requestMetadata{
122 bucketName: bucketName,
123 queryValues: urlValues,
124 contentSHA256Hex: emptySHA256Hex,
125 })
126 defer closeResponse(resp)
127 if err != nil {
128 return err
129 }
130 if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusNoContent {
131 return httpRespToErrorResponse(resp, bucketName, "")
132 }
133 return nil
134}
diff --git a/vendor/github.com/minio/minio-go/v7/api-bucket-versioning.go b/vendor/github.com/minio/minio-go/v7/api-bucket-versioning.go
new file mode 100644
index 0000000..8c84e4f
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/api-bucket-versioning.go
@@ -0,0 +1,146 @@
1/*
2 * MinIO Go Library for Amazon S3 Compatible Cloud Storage
3 * Copyright 2020 MinIO, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17package minio
18
19import (
20 "bytes"
21 "context"
22 "encoding/xml"
23 "net/http"
24 "net/url"
25
26 "github.com/minio/minio-go/v7/pkg/s3utils"
27)
28
29// SetBucketVersioning sets a bucket versioning configuration
30func (c *Client) SetBucketVersioning(ctx context.Context, bucketName string, config BucketVersioningConfiguration) error {
31 // Input validation.
32 if err := s3utils.CheckValidBucketName(bucketName); err != nil {
33 return err
34 }
35
36 buf, err := xml.Marshal(config)
37 if err != nil {
38 return err
39 }
40
41 // Get resources properly escaped and lined up before
42 // using them in http request.
43 urlValues := make(url.Values)
44 urlValues.Set("versioning", "")
45
46 reqMetadata := requestMetadata{
47 bucketName: bucketName,
48 queryValues: urlValues,
49 contentBody: bytes.NewReader(buf),
50 contentLength: int64(len(buf)),
51 contentMD5Base64: sumMD5Base64(buf),
52 contentSHA256Hex: sum256Hex(buf),
53 }
54
55 // Execute PUT to set a bucket versioning.
56 resp, err := c.executeMethod(ctx, http.MethodPut, reqMetadata)
57 defer closeResponse(resp)
58 if err != nil {
59 return err
60 }
61 if resp != nil {
62 if resp.StatusCode != http.StatusOK {
63 return httpRespToErrorResponse(resp, bucketName, "")
64 }
65 }
66 return nil
67}
68
69// EnableVersioning - enable object versioning in given bucket.
70func (c *Client) EnableVersioning(ctx context.Context, bucketName string) error {
71 return c.SetBucketVersioning(ctx, bucketName, BucketVersioningConfiguration{Status: "Enabled"})
72}
73
74// SuspendVersioning - suspend object versioning in given bucket.
75func (c *Client) SuspendVersioning(ctx context.Context, bucketName string) error {
76 return c.SetBucketVersioning(ctx, bucketName, BucketVersioningConfiguration{Status: "Suspended"})
77}
78
79// ExcludedPrefix - holds individual prefixes excluded from being versioned.
80type ExcludedPrefix struct {
81 Prefix string
82}
83
84// BucketVersioningConfiguration is the versioning configuration structure
85type BucketVersioningConfiguration struct {
86 XMLName xml.Name `xml:"VersioningConfiguration"`
87 Status string `xml:"Status"`
88 MFADelete string `xml:"MfaDelete,omitempty"`
89 // MinIO extension - allows selective, prefix-level versioning exclusion.
90 // Requires versioning to be enabled
91 ExcludedPrefixes []ExcludedPrefix `xml:",omitempty"`
92 ExcludeFolders bool `xml:",omitempty"`
93}
94
95// Various supported states
96const (
97 Enabled = "Enabled"
98 // Disabled State = "Disabled" only used by MFA Delete not supported yet.
99 Suspended = "Suspended"
100)
101
102// Enabled returns true if bucket versioning is enabled
103func (b BucketVersioningConfiguration) Enabled() bool {
104 return b.Status == Enabled
105}
106
107// Suspended returns true if bucket versioning is suspended
108func (b BucketVersioningConfiguration) Suspended() bool {
109 return b.Status == Suspended
110}
111
112// GetBucketVersioning gets the versioning configuration on
113// an existing bucket with a context to control cancellations and timeouts.
114func (c *Client) GetBucketVersioning(ctx context.Context, bucketName string) (BucketVersioningConfiguration, error) {
115 // Input validation.
116 if err := s3utils.CheckValidBucketName(bucketName); err != nil {
117 return BucketVersioningConfiguration{}, err
118 }
119
120 // Get resources properly escaped and lined up before
121 // using them in http request.
122 urlValues := make(url.Values)
123 urlValues.Set("versioning", "")
124
125 // Execute GET on bucket to get the versioning configuration.
126 resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{
127 bucketName: bucketName,
128 queryValues: urlValues,
129 })
130
131 defer closeResponse(resp)
132 if err != nil {
133 return BucketVersioningConfiguration{}, err
134 }
135
136 if resp.StatusCode != http.StatusOK {
137 return BucketVersioningConfiguration{}, httpRespToErrorResponse(resp, bucketName, "")
138 }
139
140 versioningConfig := BucketVersioningConfiguration{}
141 if err = xmlDecoder(resp.Body, &versioningConfig); err != nil {
142 return versioningConfig, err
143 }
144
145 return versioningConfig, nil
146}
diff --git a/vendor/github.com/minio/minio-go/v7/api-compose-object.go b/vendor/github.com/minio/minio-go/v7/api-compose-object.go
new file mode 100644
index 0000000..e64a244
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/api-compose-object.go
@@ -0,0 +1,594 @@
1/*
2 * MinIO Go Library for Amazon S3 Compatible Cloud Storage
3 * Copyright 2017, 2018 MinIO, Inc.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18package minio
19
20import (
21 "context"
22 "fmt"
23 "io"
24 "net/http"
25 "net/url"
26 "strconv"
27 "strings"
28 "time"
29
30 "github.com/google/uuid"
31 "github.com/minio/minio-go/v7/pkg/encrypt"
32 "github.com/minio/minio-go/v7/pkg/s3utils"
33)
34
35// CopyDestOptions represents options specified by user for CopyObject/ComposeObject APIs
36type CopyDestOptions struct {
37 Bucket string // points to destination bucket
38 Object string // points to destination object
39
40 // `Encryption` is the key info for server-side-encryption with customer
41 // provided key. If it is nil, no encryption is performed.
42 Encryption encrypt.ServerSide
43
44 // `userMeta` is the user-metadata key-value pairs to be set on the
45 // destination. The keys are automatically prefixed with `x-amz-meta-`
46 // if needed. If nil is passed, and if only a single source (of any
47 // size) is provided in the ComposeObject call, then metadata from the
48 // source is copied to the destination.
49 // if no user-metadata is provided, it is copied from source
50 // (when there is only once source object in the compose
51 // request)
52 UserMetadata map[string]string
53 // UserMetadata is only set to destination if ReplaceMetadata is true
54 // other value is UserMetadata is ignored and we preserve src.UserMetadata
55 // NOTE: if you set this value to true and now metadata is present
56 // in UserMetadata your destination object will not have any metadata
57 // set.
58 ReplaceMetadata bool
59
60 // `userTags` is the user defined object tags to be set on destination.
61 // This will be set only if the `replaceTags` field is set to true.
62 // Otherwise this field is ignored
63 UserTags map[string]string
64 ReplaceTags bool
65
66 // Specifies whether you want to apply a Legal Hold to the copied object.
67 LegalHold LegalHoldStatus
68
69 // Object Retention related fields
70 Mode RetentionMode
71 RetainUntilDate time.Time
72
73 Size int64 // Needs to be specified if progress bar is specified.
74 // Progress of the entire copy operation will be sent here.
75 Progress io.Reader
76}
77
78// Process custom-metadata to remove a `x-amz-meta-` prefix if
79// present and validate that keys are distinct (after this
80// prefix removal).
81func filterCustomMeta(userMeta map[string]string) map[string]string {
82 m := make(map[string]string)
83 for k, v := range userMeta {
84 if strings.HasPrefix(strings.ToLower(k), "x-amz-meta-") {
85 k = k[len("x-amz-meta-"):]
86 }
87 if _, ok := m[k]; ok {
88 continue
89 }
90 m[k] = v
91 }
92 return m
93}
94
95// Marshal converts all the CopyDestOptions into their
96// equivalent HTTP header representation
97func (opts CopyDestOptions) Marshal(header http.Header) {
98 const replaceDirective = "REPLACE"
99 if opts.ReplaceTags {
100 header.Set(amzTaggingHeaderDirective, replaceDirective)
101 if tags := s3utils.TagEncode(opts.UserTags); tags != "" {
102 header.Set(amzTaggingHeader, tags)
103 }
104 }
105
106 if opts.LegalHold != LegalHoldStatus("") {
107 header.Set(amzLegalHoldHeader, opts.LegalHold.String())
108 }
109
110 if opts.Mode != RetentionMode("") && !opts.RetainUntilDate.IsZero() {
111 header.Set(amzLockMode, opts.Mode.String())
112 header.Set(amzLockRetainUntil, opts.RetainUntilDate.Format(time.RFC3339))
113 }
114
115 if opts.Encryption != nil {
116 opts.Encryption.Marshal(header)
117 }
118
119 if opts.ReplaceMetadata {
120 header.Set("x-amz-metadata-directive", replaceDirective)
121 for k, v := range filterCustomMeta(opts.UserMetadata) {
122 if isAmzHeader(k) || isStandardHeader(k) || isStorageClassHeader(k) {
123 header.Set(k, v)
124 } else {
125 header.Set("x-amz-meta-"+k, v)
126 }
127 }
128 }
129}
130
131// toDestinationInfo returns a validated copyOptions object.
132func (opts CopyDestOptions) validate() (err error) {
133 // Input validation.
134 if err = s3utils.CheckValidBucketName(opts.Bucket); err != nil {
135 return err
136 }
137 if err = s3utils.CheckValidObjectName(opts.Object); err != nil {
138 return err
139 }
140 if opts.Progress != nil && opts.Size < 0 {
141 return errInvalidArgument("For progress bar effective size needs to be specified")
142 }
143 return nil
144}
145
146// CopySrcOptions represents a source object to be copied, using
147// server-side copying APIs.
148type CopySrcOptions struct {
149 Bucket, Object string
150 VersionID string
151 MatchETag string
152 NoMatchETag string
153 MatchModifiedSince time.Time
154 MatchUnmodifiedSince time.Time
155 MatchRange bool
156 Start, End int64
157 Encryption encrypt.ServerSide
158}
159
160// Marshal converts all the CopySrcOptions into their
161// equivalent HTTP header representation
162func (opts CopySrcOptions) Marshal(header http.Header) {
163 // Set the source header
164 header.Set("x-amz-copy-source", s3utils.EncodePath(opts.Bucket+"/"+opts.Object))
165 if opts.VersionID != "" {
166 header.Set("x-amz-copy-source", s3utils.EncodePath(opts.Bucket+"/"+opts.Object)+"?versionId="+opts.VersionID)
167 }
168
169 if opts.MatchETag != "" {
170 header.Set("x-amz-copy-source-if-match", opts.MatchETag)
171 }
172 if opts.NoMatchETag != "" {
173 header.Set("x-amz-copy-source-if-none-match", opts.NoMatchETag)
174 }
175
176 if !opts.MatchModifiedSince.IsZero() {
177 header.Set("x-amz-copy-source-if-modified-since", opts.MatchModifiedSince.Format(http.TimeFormat))
178 }
179 if !opts.MatchUnmodifiedSince.IsZero() {
180 header.Set("x-amz-copy-source-if-unmodified-since", opts.MatchUnmodifiedSince.Format(http.TimeFormat))
181 }
182
183 if opts.Encryption != nil {
184 encrypt.SSECopy(opts.Encryption).Marshal(header)
185 }
186}
187
188func (opts CopySrcOptions) validate() (err error) {
189 // Input validation.
190 if err = s3utils.CheckValidBucketName(opts.Bucket); err != nil {
191 return err
192 }
193 if err = s3utils.CheckValidObjectName(opts.Object); err != nil {
194 return err
195 }
196 if opts.Start > opts.End || opts.Start < 0 {
197 return errInvalidArgument("start must be non-negative, and start must be at most end.")
198 }
199 return nil
200}
201
202// Low level implementation of CopyObject API, supports only upto 5GiB worth of copy.
203func (c *Client) copyObjectDo(ctx context.Context, srcBucket, srcObject, destBucket, destObject string,
204 metadata map[string]string, srcOpts CopySrcOptions, dstOpts PutObjectOptions,
205) (ObjectInfo, error) {
206 // Build headers.
207 headers := make(http.Header)
208
209 // Set all the metadata headers.
210 for k, v := range metadata {
211 headers.Set(k, v)
212 }
213 if !dstOpts.Internal.ReplicationStatus.Empty() {
214 headers.Set(amzBucketReplicationStatus, string(dstOpts.Internal.ReplicationStatus))
215 }
216 if !dstOpts.Internal.SourceMTime.IsZero() {
217 headers.Set(minIOBucketSourceMTime, dstOpts.Internal.SourceMTime.Format(time.RFC3339Nano))
218 }
219 if dstOpts.Internal.SourceETag != "" {
220 headers.Set(minIOBucketSourceETag, dstOpts.Internal.SourceETag)
221 }
222 if dstOpts.Internal.ReplicationRequest {
223 headers.Set(minIOBucketReplicationRequest, "true")
224 }
225 if dstOpts.Internal.ReplicationValidityCheck {
226 headers.Set(minIOBucketReplicationCheck, "true")
227 }
228 if !dstOpts.Internal.LegalholdTimestamp.IsZero() {
229 headers.Set(minIOBucketReplicationObjectLegalHoldTimestamp, dstOpts.Internal.LegalholdTimestamp.Format(time.RFC3339Nano))
230 }
231 if !dstOpts.Internal.RetentionTimestamp.IsZero() {
232 headers.Set(minIOBucketReplicationObjectRetentionTimestamp, dstOpts.Internal.RetentionTimestamp.Format(time.RFC3339Nano))
233 }
234 if !dstOpts.Internal.TaggingTimestamp.IsZero() {
235 headers.Set(minIOBucketReplicationTaggingTimestamp, dstOpts.Internal.TaggingTimestamp.Format(time.RFC3339Nano))
236 }
237
238 if len(dstOpts.UserTags) != 0 {
239 headers.Set(amzTaggingHeader, s3utils.TagEncode(dstOpts.UserTags))
240 }
241
242 reqMetadata := requestMetadata{
243 bucketName: destBucket,
244 objectName: destObject,
245 customHeader: headers,
246 }
247 if dstOpts.Internal.SourceVersionID != "" {
248 if dstOpts.Internal.SourceVersionID != nullVersionID {
249 if _, err := uuid.Parse(dstOpts.Internal.SourceVersionID); err != nil {
250 return ObjectInfo{}, errInvalidArgument(err.Error())
251 }
252 }
253 urlValues := make(url.Values)
254 urlValues.Set("versionId", dstOpts.Internal.SourceVersionID)
255 reqMetadata.queryValues = urlValues
256 }
257
258 // Set the source header
259 headers.Set("x-amz-copy-source", s3utils.EncodePath(srcBucket+"/"+srcObject))
260 if srcOpts.VersionID != "" {
261 headers.Set("x-amz-copy-source", s3utils.EncodePath(srcBucket+"/"+srcObject)+"?versionId="+srcOpts.VersionID)
262 }
263 // Send upload-part-copy request
264 resp, err := c.executeMethod(ctx, http.MethodPut, reqMetadata)
265 defer closeResponse(resp)
266 if err != nil {
267 return ObjectInfo{}, err
268 }
269
270 // Check if we got an error response.
271 if resp.StatusCode != http.StatusOK {
272 return ObjectInfo{}, httpRespToErrorResponse(resp, srcBucket, srcObject)
273 }
274
275 cpObjRes := copyObjectResult{}
276 err = xmlDecoder(resp.Body, &cpObjRes)
277 if err != nil {
278 return ObjectInfo{}, err
279 }
280
281 objInfo := ObjectInfo{
282 Key: destObject,
283 ETag: strings.Trim(cpObjRes.ETag, "\""),
284 LastModified: cpObjRes.LastModified,
285 }
286 return objInfo, nil
287}
288
289func (c *Client) copyObjectPartDo(ctx context.Context, srcBucket, srcObject, destBucket, destObject, uploadID string,
290 partID int, startOffset, length int64, metadata map[string]string,
291) (p CompletePart, err error) {
292 headers := make(http.Header)
293
294 // Set source
295 headers.Set("x-amz-copy-source", s3utils.EncodePath(srcBucket+"/"+srcObject))
296
297 if startOffset < 0 {
298 return p, errInvalidArgument("startOffset must be non-negative")
299 }
300
301 if length >= 0 {
302 headers.Set("x-amz-copy-source-range", fmt.Sprintf("bytes=%d-%d", startOffset, startOffset+length-1))
303 }
304
305 for k, v := range metadata {
306 headers.Set(k, v)
307 }
308
309 queryValues := make(url.Values)
310 queryValues.Set("partNumber", strconv.Itoa(partID))
311 queryValues.Set("uploadId", uploadID)
312
313 resp, err := c.executeMethod(ctx, http.MethodPut, requestMetadata{
314 bucketName: destBucket,
315 objectName: destObject,
316 customHeader: headers,
317 queryValues: queryValues,
318 })
319 defer closeResponse(resp)
320 if err != nil {
321 return
322 }
323
324 // Check if we got an error response.
325 if resp.StatusCode != http.StatusOK {
326 return p, httpRespToErrorResponse(resp, destBucket, destObject)
327 }
328
329 // Decode copy-part response on success.
330 cpObjRes := copyObjectResult{}
331 err = xmlDecoder(resp.Body, &cpObjRes)
332 if err != nil {
333 return p, err
334 }
335 p.PartNumber, p.ETag = partID, cpObjRes.ETag
336 return p, nil
337}
338
339// uploadPartCopy - helper function to create a part in a multipart
340// upload via an upload-part-copy request
341// https://docs.aws.amazon.com/AmazonS3/latest/API/mpUploadUploadPartCopy.html
342func (c *Client) uploadPartCopy(ctx context.Context, bucket, object, uploadID string, partNumber int,
343 headers http.Header,
344) (p CompletePart, err error) {
345 // Build query parameters
346 urlValues := make(url.Values)
347 urlValues.Set("partNumber", strconv.Itoa(partNumber))
348 urlValues.Set("uploadId", uploadID)
349
350 // Send upload-part-copy request
351 resp, err := c.executeMethod(ctx, http.MethodPut, requestMetadata{
352 bucketName: bucket,
353 objectName: object,
354 customHeader: headers,
355 queryValues: urlValues,
356 })
357 defer closeResponse(resp)
358 if err != nil {
359 return p, err
360 }
361
362 // Check if we got an error response.
363 if resp.StatusCode != http.StatusOK {
364 return p, httpRespToErrorResponse(resp, bucket, object)
365 }
366
367 // Decode copy-part response on success.
368 cpObjRes := copyObjectResult{}
369 err = xmlDecoder(resp.Body, &cpObjRes)
370 if err != nil {
371 return p, err
372 }
373 p.PartNumber, p.ETag = partNumber, cpObjRes.ETag
374 return p, nil
375}
376
377// ComposeObject - creates an object using server-side copying
378// of existing objects. It takes a list of source objects (with optional offsets)
379// and concatenates them into a new object using only server-side copying
380// operations. Optionally takes progress reader hook for applications to
381// look at current progress.
382func (c *Client) ComposeObject(ctx context.Context, dst CopyDestOptions, srcs ...CopySrcOptions) (UploadInfo, error) {
383 if len(srcs) < 1 || len(srcs) > maxPartsCount {
384 return UploadInfo{}, errInvalidArgument("There must be as least one and up to 10000 source objects.")
385 }
386
387 for _, src := range srcs {
388 if err := src.validate(); err != nil {
389 return UploadInfo{}, err
390 }
391 }
392
393 if err := dst.validate(); err != nil {
394 return UploadInfo{}, err
395 }
396
397 srcObjectInfos := make([]ObjectInfo, len(srcs))
398 srcObjectSizes := make([]int64, len(srcs))
399 var totalSize, totalParts int64
400 var err error
401 for i, src := range srcs {
402 opts := StatObjectOptions{ServerSideEncryption: encrypt.SSE(src.Encryption), VersionID: src.VersionID}
403 srcObjectInfos[i], err = c.StatObject(context.Background(), src.Bucket, src.Object, opts)
404 if err != nil {
405 return UploadInfo{}, err
406 }
407
408 srcCopySize := srcObjectInfos[i].Size
409 // Check if a segment is specified, and if so, is the
410 // segment within object bounds?
411 if src.MatchRange {
412 // Since range is specified,
413 // 0 <= src.start <= src.end
414 // so only invalid case to check is:
415 if src.End >= srcCopySize || src.Start < 0 {
416 return UploadInfo{}, errInvalidArgument(
417 fmt.Sprintf("CopySrcOptions %d has invalid segment-to-copy [%d, %d] (size is %d)",
418 i, src.Start, src.End, srcCopySize))
419 }
420 srcCopySize = src.End - src.Start + 1
421 }
422
423 // Only the last source may be less than `absMinPartSize`
424 if srcCopySize < absMinPartSize && i < len(srcs)-1 {
425 return UploadInfo{}, errInvalidArgument(
426 fmt.Sprintf("CopySrcOptions %d is too small (%d) and it is not the last part", i, srcCopySize))
427 }
428
429 // Is data to copy too large?
430 totalSize += srcCopySize
431 if totalSize > maxMultipartPutObjectSize {
432 return UploadInfo{}, errInvalidArgument(fmt.Sprintf("Cannot compose an object of size %d (> 5TiB)", totalSize))
433 }
434
435 // record source size
436 srcObjectSizes[i] = srcCopySize
437
438 // calculate parts needed for current source
439 totalParts += partsRequired(srcCopySize)
440 // Do we need more parts than we are allowed?
441 if totalParts > maxPartsCount {
442 return UploadInfo{}, errInvalidArgument(fmt.Sprintf(
443 "Your proposed compose object requires more than %d parts", maxPartsCount))
444 }
445 }
446
447 // Single source object case (i.e. when only one source is
448 // involved, it is being copied wholly and at most 5GiB in
449 // size, emptyfiles are also supported).
450 if (totalParts == 1 && srcs[0].Start == -1 && totalSize <= maxPartSize) || (totalSize == 0) {
451 return c.CopyObject(ctx, dst, srcs[0])
452 }
453
454 // Now, handle multipart-copy cases.
455
456 // 1. Ensure that the object has not been changed while
457 // we are copying data.
458 for i, src := range srcs {
459 src.MatchETag = srcObjectInfos[i].ETag
460 }
461
462 // 2. Initiate a new multipart upload.
463
464 // Set user-metadata on the destination object. If no
465 // user-metadata is specified, and there is only one source,
466 // (only) then metadata from source is copied.
467 var userMeta map[string]string
468 if dst.ReplaceMetadata {
469 userMeta = dst.UserMetadata
470 } else {
471 userMeta = srcObjectInfos[0].UserMetadata
472 }
473
474 var userTags map[string]string
475 if dst.ReplaceTags {
476 userTags = dst.UserTags
477 } else {
478 userTags = srcObjectInfos[0].UserTags
479 }
480
481 uploadID, err := c.newUploadID(ctx, dst.Bucket, dst.Object, PutObjectOptions{
482 ServerSideEncryption: dst.Encryption,
483 UserMetadata: userMeta,
484 UserTags: userTags,
485 Mode: dst.Mode,
486 RetainUntilDate: dst.RetainUntilDate,
487 LegalHold: dst.LegalHold,
488 })
489 if err != nil {
490 return UploadInfo{}, err
491 }
492
493 // 3. Perform copy part uploads
494 objParts := []CompletePart{}
495 partIndex := 1
496 for i, src := range srcs {
497 h := make(http.Header)
498 src.Marshal(h)
499 if dst.Encryption != nil && dst.Encryption.Type() == encrypt.SSEC {
500 dst.Encryption.Marshal(h)
501 }
502
503 // calculate start/end indices of parts after
504 // splitting.
505 startIdx, endIdx := calculateEvenSplits(srcObjectSizes[i], src)
506 for j, start := range startIdx {
507 end := endIdx[j]
508
509 // Add (or reset) source range header for
510 // upload part copy request.
511 h.Set("x-amz-copy-source-range",
512 fmt.Sprintf("bytes=%d-%d", start, end))
513
514 // make upload-part-copy request
515 complPart, err := c.uploadPartCopy(ctx, dst.Bucket,
516 dst.Object, uploadID, partIndex, h)
517 if err != nil {
518 return UploadInfo{}, err
519 }
520 if dst.Progress != nil {
521 io.CopyN(io.Discard, dst.Progress, end-start+1)
522 }
523 objParts = append(objParts, complPart)
524 partIndex++
525 }
526 }
527
528 // 4. Make final complete-multipart request.
529 uploadInfo, err := c.completeMultipartUpload(ctx, dst.Bucket, dst.Object, uploadID,
530 completeMultipartUpload{Parts: objParts}, PutObjectOptions{ServerSideEncryption: dst.Encryption})
531 if err != nil {
532 return UploadInfo{}, err
533 }
534
535 uploadInfo.Size = totalSize
536 return uploadInfo, nil
537}
538
539// partsRequired is maximum parts possible with
540// max part size of ceiling(maxMultipartPutObjectSize / (maxPartsCount - 1))
541func partsRequired(size int64) int64 {
542 maxPartSize := maxMultipartPutObjectSize / (maxPartsCount - 1)
543 r := size / int64(maxPartSize)
544 if size%int64(maxPartSize) > 0 {
545 r++
546 }
547 return r
548}
549
550// calculateEvenSplits - computes splits for a source and returns
551// start and end index slices. Splits happen evenly to be sure that no
552// part is less than 5MiB, as that could fail the multipart request if
553// it is not the last part.
554func calculateEvenSplits(size int64, src CopySrcOptions) (startIndex, endIndex []int64) {
555 if size == 0 {
556 return
557 }
558
559 reqParts := partsRequired(size)
560 startIndex = make([]int64, reqParts)
561 endIndex = make([]int64, reqParts)
562 // Compute number of required parts `k`, as:
563 //
564 // k = ceiling(size / copyPartSize)
565 //
566 // Now, distribute the `size` bytes in the source into
567 // k parts as evenly as possible:
568 //
569 // r parts sized (q+1) bytes, and
570 // (k - r) parts sized q bytes, where
571 //
572 // size = q * k + r (by simple division of size by k,
573 // so that 0 <= r < k)
574 //
575 start := src.Start
576 if start == -1 {
577 start = 0
578 }
579 quot, rem := size/reqParts, size%reqParts
580 nextStart := start
581 for j := int64(0); j < reqParts; j++ {
582 curPartSize := quot
583 if j < rem {
584 curPartSize++
585 }
586
587 cStart := nextStart
588 cEnd := cStart + curPartSize - 1
589 nextStart = cEnd + 1
590
591 startIndex[j], endIndex[j] = cStart, cEnd
592 }
593 return
594}
diff --git a/vendor/github.com/minio/minio-go/v7/api-copy-object.go b/vendor/github.com/minio/minio-go/v7/api-copy-object.go
new file mode 100644
index 0000000..0c95d91
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/api-copy-object.go
@@ -0,0 +1,76 @@
1/*
2 * MinIO Go Library for Amazon S3 Compatible Cloud Storage
3 * Copyright 2017, 2018 MinIO, Inc.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18package minio
19
20import (
21 "context"
22 "io"
23 "net/http"
24)
25
26// CopyObject - copy a source object into a new object
27func (c *Client) CopyObject(ctx context.Context, dst CopyDestOptions, src CopySrcOptions) (UploadInfo, error) {
28 if err := src.validate(); err != nil {
29 return UploadInfo{}, err
30 }
31
32 if err := dst.validate(); err != nil {
33 return UploadInfo{}, err
34 }
35
36 header := make(http.Header)
37 dst.Marshal(header)
38 src.Marshal(header)
39
40 resp, err := c.executeMethod(ctx, http.MethodPut, requestMetadata{
41 bucketName: dst.Bucket,
42 objectName: dst.Object,
43 customHeader: header,
44 })
45 if err != nil {
46 return UploadInfo{}, err
47 }
48 defer closeResponse(resp)
49
50 if resp.StatusCode != http.StatusOK {
51 return UploadInfo{}, httpRespToErrorResponse(resp, dst.Bucket, dst.Object)
52 }
53
54 // Update the progress properly after successful copy.
55 if dst.Progress != nil {
56 io.Copy(io.Discard, io.LimitReader(dst.Progress, dst.Size))
57 }
58
59 cpObjRes := copyObjectResult{}
60 if err = xmlDecoder(resp.Body, &cpObjRes); err != nil {
61 return UploadInfo{}, err
62 }
63
64 // extract lifecycle expiry date and rule ID
65 expTime, ruleID := amzExpirationToExpiryDateRuleID(resp.Header.Get(amzExpiration))
66
67 return UploadInfo{
68 Bucket: dst.Bucket,
69 Key: dst.Object,
70 LastModified: cpObjRes.LastModified,
71 ETag: trimEtag(resp.Header.Get("ETag")),
72 VersionID: resp.Header.Get(amzVersionID),
73 Expiration: expTime,
74 ExpirationRuleID: ruleID,
75 }, nil
76}
diff --git a/vendor/github.com/minio/minio-go/v7/api-datatypes.go b/vendor/github.com/minio/minio-go/v7/api-datatypes.go
new file mode 100644
index 0000000..97a6f80
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/api-datatypes.go
@@ -0,0 +1,254 @@
1/*
2 * MinIO Go Library for Amazon S3 Compatible Cloud Storage
3 * Copyright 2015-2020 MinIO, Inc.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18package minio
19
20import (
21 "encoding/xml"
22 "io"
23 "net/http"
24 "net/url"
25 "strings"
26 "time"
27)
28
29// BucketInfo container for bucket metadata.
30type BucketInfo struct {
31 // The name of the bucket.
32 Name string `json:"name"`
33 // Date the bucket was created.
34 CreationDate time.Time `json:"creationDate"`
35}
36
37// StringMap represents map with custom UnmarshalXML
38type StringMap map[string]string
39
40// UnmarshalXML unmarshals the XML into a map of string to strings,
41// creating a key in the map for each tag and setting it's value to the
42// tags contents.
43//
44// The fact this function is on the pointer of Map is important, so that
45// if m is nil it can be initialized, which is often the case if m is
46// nested in another xml structural. This is also why the first thing done
47// on the first line is initialize it.
48func (m *StringMap) UnmarshalXML(d *xml.Decoder, _ xml.StartElement) error {
49 *m = StringMap{}
50 for {
51 // Format is <key>value</key>
52 var e struct {
53 XMLName xml.Name
54 Value string `xml:",chardata"`
55 }
56 err := d.Decode(&e)
57 if err == io.EOF {
58 break
59 }
60 if err != nil {
61 return err
62 }
63 (*m)[e.XMLName.Local] = e.Value
64 }
65 return nil
66}
67
68// URLMap represents map with custom UnmarshalXML
69type URLMap map[string]string
70
71// UnmarshalXML unmarshals the XML into a map of string to strings,
72// creating a key in the map for each tag and setting it's value to the
73// tags contents.
74//
75// The fact this function is on the pointer of Map is important, so that
76// if m is nil it can be initialized, which is often the case if m is
77// nested in another xml structural. This is also why the first thing done
78// on the first line is initialize it.
79func (m *URLMap) UnmarshalXML(d *xml.Decoder, se xml.StartElement) error {
80 *m = URLMap{}
81 var tgs string
82 if err := d.DecodeElement(&tgs, &se); err != nil {
83 if err == io.EOF {
84 return nil
85 }
86 return err
87 }
88 for tgs != "" {
89 var key string
90 key, tgs, _ = stringsCut(tgs, "&")
91 if key == "" {
92 continue
93 }
94 key, value, _ := stringsCut(key, "=")
95 key, err := url.QueryUnescape(key)
96 if err != nil {
97 return err
98 }
99
100 value, err = url.QueryUnescape(value)
101 if err != nil {
102 return err
103 }
104 (*m)[key] = value
105 }
106 return nil
107}
108
109// stringsCut slices s around the first instance of sep,
110// returning the text before and after sep.
111// The found result reports whether sep appears in s.
112// If sep does not appear in s, cut returns s, "", false.
113func stringsCut(s, sep string) (before, after string, found bool) {
114 if i := strings.Index(s, sep); i >= 0 {
115 return s[:i], s[i+len(sep):], true
116 }
117 return s, "", false
118}
119
120// Owner name.
121type Owner struct {
122 XMLName xml.Name `xml:"Owner" json:"owner"`
123 DisplayName string `xml:"ID" json:"name"`
124 ID string `xml:"DisplayName" json:"id"`
125}
126
127// UploadInfo contains information about the
128// newly uploaded or copied object.
129type UploadInfo struct {
130 Bucket string
131 Key string
132 ETag string
133 Size int64
134 LastModified time.Time
135 Location string
136 VersionID string
137
138 // Lifecycle expiry-date and ruleID associated with the expiry
139 // not to be confused with `Expires` HTTP header.
140 Expiration time.Time
141 ExpirationRuleID string
142
143 // Verified checksum values, if any.
144 // Values are base64 (standard) encoded.
145 // For multipart objects this is a checksum of the checksum of each part.
146 ChecksumCRC32 string
147 ChecksumCRC32C string
148 ChecksumSHA1 string
149 ChecksumSHA256 string
150}
151
152// RestoreInfo contains information of the restore operation of an archived object
153type RestoreInfo struct {
154 // Is the restoring operation is still ongoing
155 OngoingRestore bool
156 // When the restored copy of the archived object will be removed
157 ExpiryTime time.Time
158}
159
160// ObjectInfo container for object metadata.
161type ObjectInfo struct {
162 // An ETag is optionally set to md5sum of an object. In case of multipart objects,
163 // ETag is of the form MD5SUM-N where MD5SUM is md5sum of all individual md5sums of
164 // each parts concatenated into one string.
165 ETag string `json:"etag"`
166
167 Key string `json:"name"` // Name of the object
168 LastModified time.Time `json:"lastModified"` // Date and time the object was last modified.
169 Size int64 `json:"size"` // Size in bytes of the object.
170 ContentType string `json:"contentType"` // A standard MIME type describing the format of the object data.
171 Expires time.Time `json:"expires"` // The date and time at which the object is no longer able to be cached.
172
173 // Collection of additional metadata on the object.
174 // eg: x-amz-meta-*, content-encoding etc.
175 Metadata http.Header `json:"metadata" xml:"-"`
176
177 // x-amz-meta-* headers stripped "x-amz-meta-" prefix containing the first value.
178 // Only returned by MinIO servers.
179 UserMetadata StringMap `json:"userMetadata,omitempty"`
180
181 // x-amz-tagging values in their k/v values.
182 // Only returned by MinIO servers.
183 UserTags URLMap `json:"userTags,omitempty" xml:"UserTags"`
184
185 // x-amz-tagging-count value
186 UserTagCount int
187
188 // Owner name.
189 Owner Owner
190
191 // ACL grant.
192 Grant []Grant
193
194 // The class of storage used to store the object.
195 StorageClass string `json:"storageClass"`
196
197 // Versioning related information
198 IsLatest bool
199 IsDeleteMarker bool
200 VersionID string `xml:"VersionId"`
201
202 // x-amz-replication-status value is either in one of the following states
203 // - COMPLETED
204 // - PENDING
205 // - FAILED
206 // - REPLICA (on the destination)
207 ReplicationStatus string `xml:"ReplicationStatus"`
208 // set to true if delete marker has backing object version on target, and eligible to replicate
209 ReplicationReady bool
210 // Lifecycle expiry-date and ruleID associated with the expiry
211 // not to be confused with `Expires` HTTP header.
212 Expiration time.Time
213 ExpirationRuleID string
214
215 Restore *RestoreInfo
216
217 // Checksum values
218 ChecksumCRC32 string
219 ChecksumCRC32C string
220 ChecksumSHA1 string
221 ChecksumSHA256 string
222
223 Internal *struct {
224 K int // Data blocks
225 M int // Parity blocks
226 } `xml:"Internal"`
227
228 // Error
229 Err error `json:"-"`
230}
231
232// ObjectMultipartInfo container for multipart object metadata.
233type ObjectMultipartInfo struct {
234 // Date and time at which the multipart upload was initiated.
235 Initiated time.Time `type:"timestamp" timestampFormat:"iso8601"`
236
237 Initiator initiator
238 Owner owner
239
240 // The type of storage to use for the object. Defaults to 'STANDARD'.
241 StorageClass string
242
243 // Key of the object for which the multipart upload was initiated.
244 Key string
245
246 // Size in bytes of the object.
247 Size int64
248
249 // Upload ID that identifies the multipart upload.
250 UploadID string `xml:"UploadId"`
251
252 // Error
253 Err error
254}
diff --git a/vendor/github.com/minio/minio-go/v7/api-error-response.go b/vendor/github.com/minio/minio-go/v7/api-error-response.go
new file mode 100644
index 0000000..7df211f
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/api-error-response.go
@@ -0,0 +1,284 @@
1/*
2 * MinIO Go Library for Amazon S3 Compatible Cloud Storage
3 * Copyright 2015-2020 MinIO, Inc.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18package minio
19
20import (
21 "bytes"
22 "encoding/xml"
23 "fmt"
24 "io"
25 "net/http"
26 "strings"
27)
28
29/* **** SAMPLE ERROR RESPONSE ****
30<?xml version="1.0" encoding="UTF-8"?>
31<Error>
32 <Code>AccessDenied</Code>
33 <Message>Access Denied</Message>
34 <BucketName>bucketName</BucketName>
35 <Key>objectName</Key>
36 <RequestId>F19772218238A85A</RequestId>
37 <HostId>GuWkjyviSiGHizehqpmsD1ndz5NClSP19DOT+s2mv7gXGQ8/X1lhbDGiIJEXpGFD</HostId>
38</Error>
39*/
40
41// ErrorResponse - Is the typed error returned by all API operations.
42// ErrorResponse struct should be comparable since it is compared inside
43// golang http API (https://github.com/golang/go/issues/29768)
44type ErrorResponse struct {
45 XMLName xml.Name `xml:"Error" json:"-"`
46 Code string
47 Message string
48 BucketName string
49 Key string
50 Resource string
51 RequestID string `xml:"RequestId"`
52 HostID string `xml:"HostId"`
53
54 // Region where the bucket is located. This header is returned
55 // only in HEAD bucket and ListObjects response.
56 Region string
57
58 // Captures the server string returned in response header.
59 Server string
60
61 // Underlying HTTP status code for the returned error
62 StatusCode int `xml:"-" json:"-"`
63}
64
65// ToErrorResponse - Returns parsed ErrorResponse struct from body and
66// http headers.
67//
68// For example:
69//
70// import s3 "github.com/minio/minio-go/v7"
71// ...
72// ...
73// reader, stat, err := s3.GetObject(...)
74// if err != nil {
75// resp := s3.ToErrorResponse(err)
76// }
77// ...
78func ToErrorResponse(err error) ErrorResponse {
79 switch err := err.(type) {
80 case ErrorResponse:
81 return err
82 default:
83 return ErrorResponse{}
84 }
85}
86
87// Error - Returns S3 error string.
88func (e ErrorResponse) Error() string {
89 if e.Message == "" {
90 msg, ok := s3ErrorResponseMap[e.Code]
91 if !ok {
92 msg = fmt.Sprintf("Error response code %s.", e.Code)
93 }
94 return msg
95 }
96 return e.Message
97}
98
99// Common string for errors to report issue location in unexpected
100// cases.
101const (
102 reportIssue = "Please report this issue at https://github.com/minio/minio-go/issues."
103)
104
105// xmlDecodeAndBody reads the whole body up to 1MB and
106// tries to XML decode it into v.
107// The body that was read and any error from reading or decoding is returned.
108func xmlDecodeAndBody(bodyReader io.Reader, v interface{}) ([]byte, error) {
109 // read the whole body (up to 1MB)
110 const maxBodyLength = 1 << 20
111 body, err := io.ReadAll(io.LimitReader(bodyReader, maxBodyLength))
112 if err != nil {
113 return nil, err
114 }
115 return bytes.TrimSpace(body), xmlDecoder(bytes.NewReader(body), v)
116}
117
118// httpRespToErrorResponse returns a new encoded ErrorResponse
119// structure as error.
120func httpRespToErrorResponse(resp *http.Response, bucketName, objectName string) error {
121 if resp == nil {
122 msg := "Empty http response. " + reportIssue
123 return errInvalidArgument(msg)
124 }
125
126 errResp := ErrorResponse{
127 StatusCode: resp.StatusCode,
128 Server: resp.Header.Get("Server"),
129 }
130
131 errBody, err := xmlDecodeAndBody(resp.Body, &errResp)
132 // Xml decoding failed with no body, fall back to HTTP headers.
133 if err != nil {
134 switch resp.StatusCode {
135 case http.StatusNotFound:
136 if objectName == "" {
137 errResp = ErrorResponse{
138 StatusCode: resp.StatusCode,
139 Code: "NoSuchBucket",
140 Message: "The specified bucket does not exist.",
141 BucketName: bucketName,
142 }
143 } else {
144 errResp = ErrorResponse{
145 StatusCode: resp.StatusCode,
146 Code: "NoSuchKey",
147 Message: "The specified key does not exist.",
148 BucketName: bucketName,
149 Key: objectName,
150 }
151 }
152 case http.StatusForbidden:
153 errResp = ErrorResponse{
154 StatusCode: resp.StatusCode,
155 Code: "AccessDenied",
156 Message: "Access Denied.",
157 BucketName: bucketName,
158 Key: objectName,
159 }
160 case http.StatusConflict:
161 errResp = ErrorResponse{
162 StatusCode: resp.StatusCode,
163 Code: "Conflict",
164 Message: "Bucket not empty.",
165 BucketName: bucketName,
166 }
167 case http.StatusPreconditionFailed:
168 errResp = ErrorResponse{
169 StatusCode: resp.StatusCode,
170 Code: "PreconditionFailed",
171 Message: s3ErrorResponseMap["PreconditionFailed"],
172 BucketName: bucketName,
173 Key: objectName,
174 }
175 default:
176 msg := resp.Status
177 if len(errBody) > 0 {
178 msg = string(errBody)
179 if len(msg) > 1024 {
180 msg = msg[:1024] + "..."
181 }
182 }
183 errResp = ErrorResponse{
184 StatusCode: resp.StatusCode,
185 Code: resp.Status,
186 Message: msg,
187 BucketName: bucketName,
188 }
189 }
190 }
191
192 code := resp.Header.Get("x-minio-error-code")
193 if code != "" {
194 errResp.Code = code
195 }
196 desc := resp.Header.Get("x-minio-error-desc")
197 if desc != "" {
198 errResp.Message = strings.Trim(desc, `"`)
199 }
200
201 // Save hostID, requestID and region information
202 // from headers if not available through error XML.
203 if errResp.RequestID == "" {
204 errResp.RequestID = resp.Header.Get("x-amz-request-id")
205 }
206 if errResp.HostID == "" {
207 errResp.HostID = resp.Header.Get("x-amz-id-2")
208 }
209 if errResp.Region == "" {
210 errResp.Region = resp.Header.Get("x-amz-bucket-region")
211 }
212 if errResp.Code == "InvalidRegion" && errResp.Region != "" {
213 errResp.Message = fmt.Sprintf("Region does not match, expecting region ‘%s’.", errResp.Region)
214 }
215
216 return errResp
217}
218
219// errTransferAccelerationBucket - bucket name is invalid to be used with transfer acceleration.
220func errTransferAccelerationBucket(bucketName string) error {
221 return ErrorResponse{
222 StatusCode: http.StatusBadRequest,
223 Code: "InvalidArgument",
224 Message: "The name of the bucket used for Transfer Acceleration must be DNS-compliant and must not contain periods ‘.’.",
225 BucketName: bucketName,
226 }
227}
228
229// errEntityTooLarge - Input size is larger than supported maximum.
230func errEntityTooLarge(totalSize, maxObjectSize int64, bucketName, objectName string) error {
231 msg := fmt.Sprintf("Your proposed upload size ‘%d’ exceeds the maximum allowed object size ‘%d’ for single PUT operation.", totalSize, maxObjectSize)
232 return ErrorResponse{
233 StatusCode: http.StatusBadRequest,
234 Code: "EntityTooLarge",
235 Message: msg,
236 BucketName: bucketName,
237 Key: objectName,
238 }
239}
240
241// errEntityTooSmall - Input size is smaller than supported minimum.
242func errEntityTooSmall(totalSize int64, bucketName, objectName string) error {
243 msg := fmt.Sprintf("Your proposed upload size ‘%d’ is below the minimum allowed object size ‘0B’ for single PUT operation.", totalSize)
244 return ErrorResponse{
245 StatusCode: http.StatusBadRequest,
246 Code: "EntityTooSmall",
247 Message: msg,
248 BucketName: bucketName,
249 Key: objectName,
250 }
251}
252
253// errUnexpectedEOF - Unexpected end of file reached.
254func errUnexpectedEOF(totalRead, totalSize int64, bucketName, objectName string) error {
255 msg := fmt.Sprintf("Data read ‘%d’ is not equal to the size ‘%d’ of the input Reader.", totalRead, totalSize)
256 return ErrorResponse{
257 StatusCode: http.StatusBadRequest,
258 Code: "UnexpectedEOF",
259 Message: msg,
260 BucketName: bucketName,
261 Key: objectName,
262 }
263}
264
265// errInvalidArgument - Invalid argument response.
266func errInvalidArgument(message string) error {
267 return ErrorResponse{
268 StatusCode: http.StatusBadRequest,
269 Code: "InvalidArgument",
270 Message: message,
271 RequestID: "minio",
272 }
273}
274
275// errAPINotSupported - API not supported response
276// The specified API call is not supported
277func errAPINotSupported(message string) error {
278 return ErrorResponse{
279 StatusCode: http.StatusNotImplemented,
280 Code: "APINotSupported",
281 Message: message,
282 RequestID: "minio",
283 }
284}
diff --git a/vendor/github.com/minio/minio-go/v7/api-get-object-acl.go b/vendor/github.com/minio/minio-go/v7/api-get-object-acl.go
new file mode 100644
index 0000000..9041d99
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/api-get-object-acl.go
@@ -0,0 +1,152 @@
1/*
2 * MinIO Go Library for Amazon S3 Compatible Cloud Storage
3 * Copyright 2018 MinIO, Inc.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18package minio
19
20import (
21 "context"
22 "encoding/xml"
23 "net/http"
24 "net/url"
25)
26
27// Grantee represents the person being granted permissions.
28type Grantee struct {
29 XMLName xml.Name `xml:"Grantee"`
30 ID string `xml:"ID"`
31 DisplayName string `xml:"DisplayName"`
32 URI string `xml:"URI"`
33}
34
35// Grant holds grant information
36type Grant struct {
37 XMLName xml.Name `xml:"Grant"`
38 Grantee Grantee
39 Permission string `xml:"Permission"`
40}
41
42// AccessControlList contains the set of grantees and the permissions assigned to each grantee.
43type AccessControlList struct {
44 XMLName xml.Name `xml:"AccessControlList"`
45 Grant []Grant
46 Permission string `xml:"Permission"`
47}
48
49type accessControlPolicy struct {
50 XMLName xml.Name `xml:"AccessControlPolicy"`
51 Owner Owner
52 AccessControlList AccessControlList
53}
54
55// GetObjectACL get object ACLs
56func (c *Client) GetObjectACL(ctx context.Context, bucketName, objectName string) (*ObjectInfo, error) {
57 resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{
58 bucketName: bucketName,
59 objectName: objectName,
60 queryValues: url.Values{
61 "acl": []string{""},
62 },
63 })
64 if err != nil {
65 return nil, err
66 }
67 defer closeResponse(resp)
68
69 if resp.StatusCode != http.StatusOK {
70 return nil, httpRespToErrorResponse(resp, bucketName, objectName)
71 }
72
73 res := &accessControlPolicy{}
74
75 if err := xmlDecoder(resp.Body, res); err != nil {
76 return nil, err
77 }
78
79 objInfo, err := c.StatObject(ctx, bucketName, objectName, StatObjectOptions{})
80 if err != nil {
81 return nil, err
82 }
83
84 objInfo.Owner.DisplayName = res.Owner.DisplayName
85 objInfo.Owner.ID = res.Owner.ID
86
87 objInfo.Grant = append(objInfo.Grant, res.AccessControlList.Grant...)
88
89 cannedACL := getCannedACL(res)
90 if cannedACL != "" {
91 objInfo.Metadata.Add("X-Amz-Acl", cannedACL)
92 return &objInfo, nil
93 }
94
95 grantACL := getAmzGrantACL(res)
96 for k, v := range grantACL {
97 objInfo.Metadata[k] = v
98 }
99
100 return &objInfo, nil
101}
102
103func getCannedACL(aCPolicy *accessControlPolicy) string {
104 grants := aCPolicy.AccessControlList.Grant
105
106 switch {
107 case len(grants) == 1:
108 if grants[0].Grantee.URI == "" && grants[0].Permission == "FULL_CONTROL" {
109 return "private"
110 }
111 case len(grants) == 2:
112 for _, g := range grants {
113 if g.Grantee.URI == "http://acs.amazonaws.com/groups/global/AuthenticatedUsers" && g.Permission == "READ" {
114 return "authenticated-read"
115 }
116 if g.Grantee.URI == "http://acs.amazonaws.com/groups/global/AllUsers" && g.Permission == "READ" {
117 return "public-read"
118 }
119 if g.Permission == "READ" && g.Grantee.ID == aCPolicy.Owner.ID {
120 return "bucket-owner-read"
121 }
122 }
123 case len(grants) == 3:
124 for _, g := range grants {
125 if g.Grantee.URI == "http://acs.amazonaws.com/groups/global/AllUsers" && g.Permission == "WRITE" {
126 return "public-read-write"
127 }
128 }
129 }
130 return ""
131}
132
133func getAmzGrantACL(aCPolicy *accessControlPolicy) map[string][]string {
134 grants := aCPolicy.AccessControlList.Grant
135 res := map[string][]string{}
136
137 for _, g := range grants {
138 switch {
139 case g.Permission == "READ":
140 res["X-Amz-Grant-Read"] = append(res["X-Amz-Grant-Read"], "id="+g.Grantee.ID)
141 case g.Permission == "WRITE":
142 res["X-Amz-Grant-Write"] = append(res["X-Amz-Grant-Write"], "id="+g.Grantee.ID)
143 case g.Permission == "READ_ACP":
144 res["X-Amz-Grant-Read-Acp"] = append(res["X-Amz-Grant-Read-Acp"], "id="+g.Grantee.ID)
145 case g.Permission == "WRITE_ACP":
146 res["X-Amz-Grant-Write-Acp"] = append(res["X-Amz-Grant-Write-Acp"], "id="+g.Grantee.ID)
147 case g.Permission == "FULL_CONTROL":
148 res["X-Amz-Grant-Full-Control"] = append(res["X-Amz-Grant-Full-Control"], "id="+g.Grantee.ID)
149 }
150 }
151 return res
152}
diff --git a/vendor/github.com/minio/minio-go/v7/api-get-object-file.go b/vendor/github.com/minio/minio-go/v7/api-get-object-file.go
new file mode 100644
index 0000000..2332dbf
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/api-get-object-file.go
@@ -0,0 +1,127 @@
1/*
2 * MinIO Go Library for Amazon S3 Compatible Cloud Storage
3 * Copyright 2015-2017 MinIO, Inc.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18package minio
19
20import (
21 "context"
22 "io"
23 "os"
24 "path/filepath"
25
26 "github.com/minio/minio-go/v7/pkg/s3utils"
27)
28
29// FGetObject - download contents of an object to a local file.
30// The options can be used to specify the GET request further.
31func (c *Client) FGetObject(ctx context.Context, bucketName, objectName, filePath string, opts GetObjectOptions) error {
32 // Input validation.
33 if err := s3utils.CheckValidBucketName(bucketName); err != nil {
34 return err
35 }
36 if err := s3utils.CheckValidObjectName(objectName); err != nil {
37 return err
38 }
39
40 // Verify if destination already exists.
41 st, err := os.Stat(filePath)
42 if err == nil {
43 // If the destination exists and is a directory.
44 if st.IsDir() {
45 return errInvalidArgument("fileName is a directory.")
46 }
47 }
48
49 // Proceed if file does not exist. return for all other errors.
50 if err != nil {
51 if !os.IsNotExist(err) {
52 return err
53 }
54 }
55
56 // Extract top level directory.
57 objectDir, _ := filepath.Split(filePath)
58 if objectDir != "" {
59 // Create any missing top level directories.
60 if err := os.MkdirAll(objectDir, 0o700); err != nil {
61 return err
62 }
63 }
64
65 // Gather md5sum.
66 objectStat, err := c.StatObject(ctx, bucketName, objectName, StatObjectOptions(opts))
67 if err != nil {
68 return err
69 }
70
71 // Write to a temporary file "fileName.part.minio" before saving.
72 filePartPath := filePath + objectStat.ETag + ".part.minio"
73
74 // If exists, open in append mode. If not create it as a part file.
75 filePart, err := os.OpenFile(filePartPath, os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0o600)
76 if err != nil {
77 return err
78 }
79
80 // If we return early with an error, be sure to close and delete
81 // filePart. If we have an error along the way there is a chance
82 // that filePart is somehow damaged, and we should discard it.
83 closeAndRemove := true
84 defer func() {
85 if closeAndRemove {
86 _ = filePart.Close()
87 _ = os.Remove(filePartPath)
88 }
89 }()
90
91 // Issue Stat to get the current offset.
92 st, err = filePart.Stat()
93 if err != nil {
94 return err
95 }
96
97 // Initialize get object request headers to set the
98 // appropriate range offsets to read from.
99 if st.Size() > 0 {
100 opts.SetRange(st.Size(), 0)
101 }
102
103 // Seek to current position for incoming reader.
104 objectReader, objectStat, _, err := c.getObject(ctx, bucketName, objectName, opts)
105 if err != nil {
106 return err
107 }
108
109 // Write to the part file.
110 if _, err = io.CopyN(filePart, objectReader, objectStat.Size); err != nil {
111 return err
112 }
113
114 // Close the file before rename, this is specifically needed for Windows users.
115 closeAndRemove = false
116 if err = filePart.Close(); err != nil {
117 return err
118 }
119
120 // Safely completed. Now commit by renaming to actual filename.
121 if err = os.Rename(filePartPath, filePath); err != nil {
122 return err
123 }
124
125 // Return.
126 return nil
127}
diff --git a/vendor/github.com/minio/minio-go/v7/api-get-object.go b/vendor/github.com/minio/minio-go/v7/api-get-object.go
new file mode 100644
index 0000000..9e6b154
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/api-get-object.go
@@ -0,0 +1,683 @@
1/*
2 * MinIO Go Library for Amazon S3 Compatible Cloud Storage
3 * Copyright 2015-2020 MinIO, Inc.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18package minio
19
20import (
21 "context"
22 "errors"
23 "fmt"
24 "io"
25 "net/http"
26 "sync"
27
28 "github.com/minio/minio-go/v7/pkg/s3utils"
29)
30
31// GetObject wrapper function that accepts a request context
32func (c *Client) GetObject(ctx context.Context, bucketName, objectName string, opts GetObjectOptions) (*Object, error) {
33 // Input validation.
34 if err := s3utils.CheckValidBucketName(bucketName); err != nil {
35 return nil, err
36 }
37 if err := s3utils.CheckValidObjectName(objectName); err != nil {
38 return nil, err
39 }
40
41 gctx, cancel := context.WithCancel(ctx)
42
43 // Detect if snowball is server location we are talking to.
44 var snowball bool
45 if location, ok := c.bucketLocCache.Get(bucketName); ok {
46 snowball = location == "snowball"
47 }
48
49 var (
50 err error
51 httpReader io.ReadCloser
52 objectInfo ObjectInfo
53 totalRead int
54 )
55
56 // Create request channel.
57 reqCh := make(chan getRequest)
58 // Create response channel.
59 resCh := make(chan getResponse)
60
61 // This routine feeds partial object data as and when the caller reads.
62 go func() {
63 defer close(resCh)
64 defer func() {
65 // Close the http response body before returning.
66 // This ends the connection with the server.
67 if httpReader != nil {
68 httpReader.Close()
69 }
70 }()
71 defer cancel()
72
73 // Used to verify if etag of object has changed since last read.
74 var etag string
75
76 for req := range reqCh {
77 // If this is the first request we may not need to do a getObject request yet.
78 if req.isFirstReq {
79 // First request is a Read/ReadAt.
80 if req.isReadOp {
81 // Differentiate between wanting the whole object and just a range.
82 if req.isReadAt {
83 // If this is a ReadAt request only get the specified range.
84 // Range is set with respect to the offset and length of the buffer requested.
85 // Do not set objectInfo from the first readAt request because it will not get
86 // the whole object.
87 opts.SetRange(req.Offset, req.Offset+int64(len(req.Buffer))-1)
88 } else if req.Offset > 0 {
89 opts.SetRange(req.Offset, 0)
90 }
91 httpReader, objectInfo, _, err = c.getObject(gctx, bucketName, objectName, opts)
92 if err != nil {
93 resCh <- getResponse{Error: err}
94 return
95 }
96 etag = objectInfo.ETag
97 // Read at least firstReq.Buffer bytes, if not we have
98 // reached our EOF.
99 size, err := readFull(httpReader, req.Buffer)
100 totalRead += size
101 if size > 0 && err == io.ErrUnexpectedEOF {
102 if int64(size) < objectInfo.Size {
103 // In situations when returned size
104 // is less than the expected content
105 // length set by the server, make sure
106 // we return io.ErrUnexpectedEOF
107 err = io.ErrUnexpectedEOF
108 } else {
109 // If an EOF happens after reading some but not
110 // all the bytes ReadFull returns ErrUnexpectedEOF
111 err = io.EOF
112 }
113 } else if size == 0 && err == io.EOF && objectInfo.Size > 0 {
114 // Special cases when server writes more data
115 // than the content-length, net/http response
116 // body returns an error, instead of converting
117 // it to io.EOF - return unexpected EOF.
118 err = io.ErrUnexpectedEOF
119 }
120 // Send back the first response.
121 resCh <- getResponse{
122 objectInfo: objectInfo,
123 Size: size,
124 Error: err,
125 didRead: true,
126 }
127 } else {
128 // First request is a Stat or Seek call.
129 // Only need to run a StatObject until an actual Read or ReadAt request comes through.
130
131 // Remove range header if already set, for stat Operations to get original file size.
132 delete(opts.headers, "Range")
133 objectInfo, err = c.StatObject(gctx, bucketName, objectName, StatObjectOptions(opts))
134 if err != nil {
135 resCh <- getResponse{
136 Error: err,
137 }
138 // Exit the go-routine.
139 return
140 }
141 etag = objectInfo.ETag
142 // Send back the first response.
143 resCh <- getResponse{
144 objectInfo: objectInfo,
145 }
146 }
147 } else if req.settingObjectInfo { // Request is just to get objectInfo.
148 // Remove range header if already set, for stat Operations to get original file size.
149 delete(opts.headers, "Range")
150 // Check whether this is snowball
151 // if yes do not use If-Match feature
152 // it doesn't work.
153 if etag != "" && !snowball {
154 opts.SetMatchETag(etag)
155 }
156 objectInfo, err := c.StatObject(gctx, bucketName, objectName, StatObjectOptions(opts))
157 if err != nil {
158 resCh <- getResponse{
159 Error: err,
160 }
161 // Exit the goroutine.
162 return
163 }
164 // Send back the objectInfo.
165 resCh <- getResponse{
166 objectInfo: objectInfo,
167 }
168 } else {
169 // Offset changes fetch the new object at an Offset.
170 // Because the httpReader may not be set by the first
171 // request if it was a stat or seek it must be checked
172 // if the object has been read or not to only initialize
173 // new ones when they haven't been already.
174 // All readAt requests are new requests.
175 if req.DidOffsetChange || !req.beenRead {
176 // Check whether this is snowball
177 // if yes do not use If-Match feature
178 // it doesn't work.
179 if etag != "" && !snowball {
180 opts.SetMatchETag(etag)
181 }
182 if httpReader != nil {
183 // Close previously opened http reader.
184 httpReader.Close()
185 }
186 // If this request is a readAt only get the specified range.
187 if req.isReadAt {
188 // Range is set with respect to the offset and length of the buffer requested.
189 opts.SetRange(req.Offset, req.Offset+int64(len(req.Buffer))-1)
190 } else if req.Offset > 0 { // Range is set with respect to the offset.
191 opts.SetRange(req.Offset, 0)
192 } else {
193 // Remove range header if already set
194 delete(opts.headers, "Range")
195 }
196 httpReader, objectInfo, _, err = c.getObject(gctx, bucketName, objectName, opts)
197 if err != nil {
198 resCh <- getResponse{
199 Error: err,
200 }
201 return
202 }
203 totalRead = 0
204 }
205
206 // Read at least req.Buffer bytes, if not we have
207 // reached our EOF.
208 size, err := readFull(httpReader, req.Buffer)
209 totalRead += size
210 if size > 0 && err == io.ErrUnexpectedEOF {
211 if int64(totalRead) < objectInfo.Size {
212 // In situations when returned size
213 // is less than the expected content
214 // length set by the server, make sure
215 // we return io.ErrUnexpectedEOF
216 err = io.ErrUnexpectedEOF
217 } else {
218 // If an EOF happens after reading some but not
219 // all the bytes ReadFull returns ErrUnexpectedEOF
220 err = io.EOF
221 }
222 } else if size == 0 && err == io.EOF && objectInfo.Size > 0 {
223 // Special cases when server writes more data
224 // than the content-length, net/http response
225 // body returns an error, instead of converting
226 // it to io.EOF - return unexpected EOF.
227 err = io.ErrUnexpectedEOF
228 }
229
230 // Reply back how much was read.
231 resCh <- getResponse{
232 Size: size,
233 Error: err,
234 didRead: true,
235 objectInfo: objectInfo,
236 }
237 }
238 }
239 }()
240
241 // Create a newObject through the information sent back by reqCh.
242 return newObject(gctx, cancel, reqCh, resCh), nil
243}
244
245// get request message container to communicate with internal
246// go-routine.
247type getRequest struct {
248 Buffer []byte
249 Offset int64 // readAt offset.
250 DidOffsetChange bool // Tracks the offset changes for Seek requests.
251 beenRead bool // Determines if this is the first time an object is being read.
252 isReadAt bool // Determines if this request is a request to a specific range
253 isReadOp bool // Determines if this request is a Read or Read/At request.
254 isFirstReq bool // Determines if this request is the first time an object is being accessed.
255 settingObjectInfo bool // Determines if this request is to set the objectInfo of an object.
256}
257
258// get response message container to reply back for the request.
259type getResponse struct {
260 Size int
261 Error error
262 didRead bool // Lets subsequent calls know whether or not httpReader has been initiated.
263 objectInfo ObjectInfo // Used for the first request.
264}
265
266// Object represents an open object. It implements
267// Reader, ReaderAt, Seeker, Closer for a HTTP stream.
268type Object struct {
269 // Mutex.
270 mutex *sync.Mutex
271
272 // User allocated and defined.
273 reqCh chan<- getRequest
274 resCh <-chan getResponse
275 ctx context.Context
276 cancel context.CancelFunc
277 currOffset int64
278 objectInfo ObjectInfo
279
280 // Ask lower level to initiate data fetching based on currOffset
281 seekData bool
282
283 // Keeps track of closed call.
284 isClosed bool
285
286 // Keeps track of if this is the first call.
287 isStarted bool
288
289 // Previous error saved for future calls.
290 prevErr error
291
292 // Keeps track of if this object has been read yet.
293 beenRead bool
294
295 // Keeps track of if objectInfo has been set yet.
296 objectInfoSet bool
297}
298
299// doGetRequest - sends and blocks on the firstReqCh and reqCh of an object.
300// Returns back the size of the buffer read, if anything was read, as well
301// as any error encountered. For all first requests sent on the object
302// it is also responsible for sending back the objectInfo.
303func (o *Object) doGetRequest(request getRequest) (getResponse, error) {
304 select {
305 case <-o.ctx.Done():
306 return getResponse{}, o.ctx.Err()
307 case o.reqCh <- request:
308 }
309
310 response := <-o.resCh
311
312 // Return any error to the top level.
313 if response.Error != nil {
314 return response, response.Error
315 }
316
317 // This was the first request.
318 if !o.isStarted {
319 // The object has been operated on.
320 o.isStarted = true
321 }
322 // Set the objectInfo if the request was not readAt
323 // and it hasn't been set before.
324 if !o.objectInfoSet && !request.isReadAt {
325 o.objectInfo = response.objectInfo
326 o.objectInfoSet = true
327 }
328 // Set beenRead only if it has not been set before.
329 if !o.beenRead {
330 o.beenRead = response.didRead
331 }
332 // Data are ready on the wire, no need to reinitiate connection in lower level
333 o.seekData = false
334
335 return response, nil
336}
337
338// setOffset - handles the setting of offsets for
339// Read/ReadAt/Seek requests.
340func (o *Object) setOffset(bytesRead int64) error {
341 // Update the currentOffset.
342 o.currOffset += bytesRead
343
344 if o.objectInfo.Size > -1 && o.currOffset >= o.objectInfo.Size {
345 return io.EOF
346 }
347 return nil
348}
349
350// Read reads up to len(b) bytes into b. It returns the number of
351// bytes read (0 <= n <= len(b)) and any error encountered. Returns
352// io.EOF upon end of file.
353func (o *Object) Read(b []byte) (n int, err error) {
354 if o == nil {
355 return 0, errInvalidArgument("Object is nil")
356 }
357
358 // Locking.
359 o.mutex.Lock()
360 defer o.mutex.Unlock()
361
362 // prevErr is previous error saved from previous operation.
363 if o.prevErr != nil || o.isClosed {
364 return 0, o.prevErr
365 }
366
367 // Create a new request.
368 readReq := getRequest{
369 isReadOp: true,
370 beenRead: o.beenRead,
371 Buffer: b,
372 }
373
374 // Alert that this is the first request.
375 if !o.isStarted {
376 readReq.isFirstReq = true
377 }
378
379 // Ask to establish a new data fetch routine based on seekData flag
380 readReq.DidOffsetChange = o.seekData
381 readReq.Offset = o.currOffset
382
383 // Send and receive from the first request.
384 response, err := o.doGetRequest(readReq)
385 if err != nil && err != io.EOF {
386 // Save the error for future calls.
387 o.prevErr = err
388 return response.Size, err
389 }
390
391 // Bytes read.
392 bytesRead := int64(response.Size)
393
394 // Set the new offset.
395 oerr := o.setOffset(bytesRead)
396 if oerr != nil {
397 // Save the error for future calls.
398 o.prevErr = oerr
399 return response.Size, oerr
400 }
401
402 // Return the response.
403 return response.Size, err
404}
405
406// Stat returns the ObjectInfo structure describing Object.
407func (o *Object) Stat() (ObjectInfo, error) {
408 if o == nil {
409 return ObjectInfo{}, errInvalidArgument("Object is nil")
410 }
411 // Locking.
412 o.mutex.Lock()
413 defer o.mutex.Unlock()
414
415 if o.prevErr != nil && o.prevErr != io.EOF || o.isClosed {
416 return ObjectInfo{}, o.prevErr
417 }
418
419 // This is the first request.
420 if !o.isStarted || !o.objectInfoSet {
421 // Send the request and get the response.
422 _, err := o.doGetRequest(getRequest{
423 isFirstReq: !o.isStarted,
424 settingObjectInfo: !o.objectInfoSet,
425 })
426 if err != nil {
427 o.prevErr = err
428 return ObjectInfo{}, err
429 }
430 }
431
432 return o.objectInfo, nil
433}
434
435// ReadAt reads len(b) bytes from the File starting at byte offset
436// off. It returns the number of bytes read and the error, if any.
437// ReadAt always returns a non-nil error when n < len(b). At end of
438// file, that error is io.EOF.
439func (o *Object) ReadAt(b []byte, offset int64) (n int, err error) {
440 if o == nil {
441 return 0, errInvalidArgument("Object is nil")
442 }
443
444 // Locking.
445 o.mutex.Lock()
446 defer o.mutex.Unlock()
447
448 // prevErr is error which was saved in previous operation.
449 if o.prevErr != nil && o.prevErr != io.EOF || o.isClosed {
450 return 0, o.prevErr
451 }
452
453 // Set the current offset to ReadAt offset, because the current offset will be shifted at the end of this method.
454 o.currOffset = offset
455
456 // Can only compare offsets to size when size has been set.
457 if o.objectInfoSet {
458 // If offset is negative than we return io.EOF.
459 // If offset is greater than or equal to object size we return io.EOF.
460 if (o.objectInfo.Size > -1 && offset >= o.objectInfo.Size) || offset < 0 {
461 return 0, io.EOF
462 }
463 }
464
465 // Create the new readAt request.
466 readAtReq := getRequest{
467 isReadOp: true,
468 isReadAt: true,
469 DidOffsetChange: true, // Offset always changes.
470 beenRead: o.beenRead, // Set if this is the first request to try and read.
471 Offset: offset, // Set the offset.
472 Buffer: b,
473 }
474
475 // Alert that this is the first request.
476 if !o.isStarted {
477 readAtReq.isFirstReq = true
478 }
479
480 // Send and receive from the first request.
481 response, err := o.doGetRequest(readAtReq)
482 if err != nil && err != io.EOF {
483 // Save the error.
484 o.prevErr = err
485 return response.Size, err
486 }
487 // Bytes read.
488 bytesRead := int64(response.Size)
489 // There is no valid objectInfo yet
490 // to compare against for EOF.
491 if !o.objectInfoSet {
492 // Update the currentOffset.
493 o.currOffset += bytesRead
494 } else {
495 // If this was not the first request update
496 // the offsets and compare against objectInfo
497 // for EOF.
498 oerr := o.setOffset(bytesRead)
499 if oerr != nil {
500 o.prevErr = oerr
501 return response.Size, oerr
502 }
503 }
504 return response.Size, err
505}
506
507// Seek sets the offset for the next Read or Write to offset,
508// interpreted according to whence: 0 means relative to the
509// origin of the file, 1 means relative to the current offset,
510// and 2 means relative to the end.
511// Seek returns the new offset and an error, if any.
512//
513// Seeking to a negative offset is an error. Seeking to any positive
514// offset is legal, subsequent io operations succeed until the
515// underlying object is not closed.
516func (o *Object) Seek(offset int64, whence int) (n int64, err error) {
517 if o == nil {
518 return 0, errInvalidArgument("Object is nil")
519 }
520
521 // Locking.
522 o.mutex.Lock()
523 defer o.mutex.Unlock()
524
525 // At EOF seeking is legal allow only io.EOF, for any other errors we return.
526 if o.prevErr != nil && o.prevErr != io.EOF {
527 return 0, o.prevErr
528 }
529
530 // Negative offset is valid for whence of '2'.
531 if offset < 0 && whence != 2 {
532 return 0, errInvalidArgument(fmt.Sprintf("Negative position not allowed for %d", whence))
533 }
534
535 // This is the first request. So before anything else
536 // get the ObjectInfo.
537 if !o.isStarted || !o.objectInfoSet {
538 // Create the new Seek request.
539 seekReq := getRequest{
540 isReadOp: false,
541 Offset: offset,
542 isFirstReq: true,
543 }
544 // Send and receive from the seek request.
545 _, err := o.doGetRequest(seekReq)
546 if err != nil {
547 // Save the error.
548 o.prevErr = err
549 return 0, err
550 }
551 }
552
553 newOffset := o.currOffset
554
555 // Switch through whence.
556 switch whence {
557 default:
558 return 0, errInvalidArgument(fmt.Sprintf("Invalid whence %d", whence))
559 case 0:
560 if o.objectInfo.Size > -1 && offset > o.objectInfo.Size {
561 return 0, io.EOF
562 }
563 newOffset = offset
564 case 1:
565 if o.objectInfo.Size > -1 && o.currOffset+offset > o.objectInfo.Size {
566 return 0, io.EOF
567 }
568 newOffset += offset
569 case 2:
570 // If we don't know the object size return an error for io.SeekEnd
571 if o.objectInfo.Size < 0 {
572 return 0, errInvalidArgument("Whence END is not supported when the object size is unknown")
573 }
574 // Seeking to positive offset is valid for whence '2', but
575 // since we are backing a Reader we have reached 'EOF' if
576 // offset is positive.
577 if offset > 0 {
578 return 0, io.EOF
579 }
580 // Seeking to negative position not allowed for whence.
581 if o.objectInfo.Size+offset < 0 {
582 return 0, errInvalidArgument(fmt.Sprintf("Seeking at negative offset not allowed for %d", whence))
583 }
584 newOffset = o.objectInfo.Size + offset
585 }
586 // Reset the saved error since we successfully seeked, let the Read
587 // and ReadAt decide.
588 if o.prevErr == io.EOF {
589 o.prevErr = nil
590 }
591
592 // Ask lower level to fetch again from source when necessary
593 o.seekData = (newOffset != o.currOffset) || o.seekData
594 o.currOffset = newOffset
595
596 // Return the effective offset.
597 return o.currOffset, nil
598}
599
600// Close - The behavior of Close after the first call returns error
601// for subsequent Close() calls.
602func (o *Object) Close() (err error) {
603 if o == nil {
604 return errInvalidArgument("Object is nil")
605 }
606
607 // Locking.
608 o.mutex.Lock()
609 defer o.mutex.Unlock()
610
611 // if already closed return an error.
612 if o.isClosed {
613 return o.prevErr
614 }
615
616 // Close successfully.
617 o.cancel()
618
619 // Close the request channel to indicate the internal go-routine to exit.
620 close(o.reqCh)
621
622 // Save for future operations.
623 errMsg := "Object is already closed. Bad file descriptor."
624 o.prevErr = errors.New(errMsg)
625 // Save here that we closed done channel successfully.
626 o.isClosed = true
627 return nil
628}
629
630// newObject instantiates a new *minio.Object*
631// ObjectInfo will be set by setObjectInfo
632func newObject(ctx context.Context, cancel context.CancelFunc, reqCh chan<- getRequest, resCh <-chan getResponse) *Object {
633 return &Object{
634 ctx: ctx,
635 cancel: cancel,
636 mutex: &sync.Mutex{},
637 reqCh: reqCh,
638 resCh: resCh,
639 }
640}
641
642// getObject - retrieve object from Object Storage.
643//
644// Additionally this function also takes range arguments to download the specified
645// range bytes of an object. Setting offset and length = 0 will download the full object.
646//
647// For more information about the HTTP Range header.
648// go to http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35.
649func (c *Client) getObject(ctx context.Context, bucketName, objectName string, opts GetObjectOptions) (io.ReadCloser, ObjectInfo, http.Header, error) {
650 // Validate input arguments.
651 if err := s3utils.CheckValidBucketName(bucketName); err != nil {
652 return nil, ObjectInfo{}, nil, err
653 }
654 if err := s3utils.CheckValidObjectName(objectName); err != nil {
655 return nil, ObjectInfo{}, nil, err
656 }
657
658 // Execute GET on objectName.
659 resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{
660 bucketName: bucketName,
661 objectName: objectName,
662 queryValues: opts.toQueryValues(),
663 customHeader: opts.Header(),
664 contentSHA256Hex: emptySHA256Hex,
665 })
666 if err != nil {
667 return nil, ObjectInfo{}, nil, err
668 }
669 if resp != nil {
670 if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusPartialContent {
671 return nil, ObjectInfo{}, nil, httpRespToErrorResponse(resp, bucketName, objectName)
672 }
673 }
674
675 objectStat, err := ToObjectInfo(bucketName, objectName, resp.Header)
676 if err != nil {
677 closeResponse(resp)
678 return nil, ObjectInfo{}, nil, err
679 }
680
681 // do not close body here, caller will close
682 return resp.Body, objectStat, resp.Header, nil
683}
diff --git a/vendor/github.com/minio/minio-go/v7/api-get-options.go b/vendor/github.com/minio/minio-go/v7/api-get-options.go
new file mode 100644
index 0000000..a0216e2
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/api-get-options.go
@@ -0,0 +1,203 @@
1/*
2 * MinIO Go Library for Amazon S3 Compatible Cloud Storage
3 * Copyright 2015-2020 MinIO, Inc.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18package minio
19
20import (
21 "fmt"
22 "net/http"
23 "net/url"
24 "strconv"
25 "time"
26
27 "github.com/minio/minio-go/v7/pkg/encrypt"
28)
29
30// AdvancedGetOptions for internal use by MinIO server - not intended for client use.
31type AdvancedGetOptions struct {
32 ReplicationDeleteMarker bool
33 IsReplicationReadyForDeleteMarker bool
34 ReplicationProxyRequest string
35}
36
37// GetObjectOptions are used to specify additional headers or options
38// during GET requests.
39type GetObjectOptions struct {
40 headers map[string]string
41 reqParams url.Values
42 ServerSideEncryption encrypt.ServerSide
43 VersionID string
44 PartNumber int
45
46 // Include any checksums, if object was uploaded with checksum.
47 // For multipart objects this is a checksum of part checksums.
48 // https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html
49 Checksum bool
50
51 // To be not used by external applications
52 Internal AdvancedGetOptions
53}
54
55// StatObjectOptions are used to specify additional headers or options
56// during GET info/stat requests.
57type StatObjectOptions = GetObjectOptions
58
59// Header returns the http.Header representation of the GET options.
60func (o GetObjectOptions) Header() http.Header {
61 headers := make(http.Header, len(o.headers))
62 for k, v := range o.headers {
63 headers.Set(k, v)
64 }
65 if o.ServerSideEncryption != nil && o.ServerSideEncryption.Type() == encrypt.SSEC {
66 o.ServerSideEncryption.Marshal(headers)
67 }
68 // this header is set for active-active replication scenario where GET/HEAD
69 // to site A is proxy'd to site B if object/version missing on site A.
70 if o.Internal.ReplicationProxyRequest != "" {
71 headers.Set(minIOBucketReplicationProxyRequest, o.Internal.ReplicationProxyRequest)
72 }
73 if o.Checksum {
74 headers.Set("x-amz-checksum-mode", "ENABLED")
75 }
76 return headers
77}
78
79// Set adds a key value pair to the options. The
80// key-value pair will be part of the HTTP GET request
81// headers.
82func (o *GetObjectOptions) Set(key, value string) {
83 if o.headers == nil {
84 o.headers = make(map[string]string)
85 }
86 o.headers[http.CanonicalHeaderKey(key)] = value
87}
88
89// SetReqParam - set request query string parameter
90// supported key: see supportedQueryValues and allowedCustomQueryPrefix.
91// If an unsupported key is passed in, it will be ignored and nothing will be done.
92func (o *GetObjectOptions) SetReqParam(key, value string) {
93 if !isCustomQueryValue(key) && !isStandardQueryValue(key) {
94 // do nothing
95 return
96 }
97 if o.reqParams == nil {
98 o.reqParams = make(url.Values)
99 }
100 o.reqParams.Set(key, value)
101}
102
103// AddReqParam - add request query string parameter
104// supported key: see supportedQueryValues and allowedCustomQueryPrefix.
105// If an unsupported key is passed in, it will be ignored and nothing will be done.
106func (o *GetObjectOptions) AddReqParam(key, value string) {
107 if !isCustomQueryValue(key) && !isStandardQueryValue(key) {
108 // do nothing
109 return
110 }
111 if o.reqParams == nil {
112 o.reqParams = make(url.Values)
113 }
114 o.reqParams.Add(key, value)
115}
116
117// SetMatchETag - set match etag.
118func (o *GetObjectOptions) SetMatchETag(etag string) error {
119 if etag == "" {
120 return errInvalidArgument("ETag cannot be empty.")
121 }
122 o.Set("If-Match", "\""+etag+"\"")
123 return nil
124}
125
126// SetMatchETagExcept - set match etag except.
127func (o *GetObjectOptions) SetMatchETagExcept(etag string) error {
128 if etag == "" {
129 return errInvalidArgument("ETag cannot be empty.")
130 }
131 o.Set("If-None-Match", "\""+etag+"\"")
132 return nil
133}
134
135// SetUnmodified - set unmodified time since.
136func (o *GetObjectOptions) SetUnmodified(modTime time.Time) error {
137 if modTime.IsZero() {
138 return errInvalidArgument("Modified since cannot be empty.")
139 }
140 o.Set("If-Unmodified-Since", modTime.Format(http.TimeFormat))
141 return nil
142}
143
144// SetModified - set modified time since.
145func (o *GetObjectOptions) SetModified(modTime time.Time) error {
146 if modTime.IsZero() {
147 return errInvalidArgument("Modified since cannot be empty.")
148 }
149 o.Set("If-Modified-Since", modTime.Format(http.TimeFormat))
150 return nil
151}
152
153// SetRange - set the start and end offset of the object to be read.
154// See https://tools.ietf.org/html/rfc7233#section-3.1 for reference.
155func (o *GetObjectOptions) SetRange(start, end int64) error {
156 switch {
157 case start == 0 && end < 0:
158 // Read last '-end' bytes. `bytes=-N`.
159 o.Set("Range", fmt.Sprintf("bytes=%d", end))
160 case 0 < start && end == 0:
161 // Read everything starting from offset
162 // 'start'. `bytes=N-`.
163 o.Set("Range", fmt.Sprintf("bytes=%d-", start))
164 case 0 <= start && start <= end:
165 // Read everything starting at 'start' till the
166 // 'end'. `bytes=N-M`
167 o.Set("Range", fmt.Sprintf("bytes=%d-%d", start, end))
168 default:
169 // All other cases such as
170 // bytes=-3-
171 // bytes=5-3
172 // bytes=-2-4
173 // bytes=-3-0
174 // bytes=-3--2
175 // are invalid.
176 return errInvalidArgument(
177 fmt.Sprintf(
178 "Invalid range specified: start=%d end=%d",
179 start, end))
180 }
181 return nil
182}
183
184// toQueryValues - Convert the versionId, partNumber, and reqParams in Options to query string parameters.
185func (o *GetObjectOptions) toQueryValues() url.Values {
186 urlValues := make(url.Values)
187 if o.VersionID != "" {
188 urlValues.Set("versionId", o.VersionID)
189 }
190 if o.PartNumber > 0 {
191 urlValues.Set("partNumber", strconv.Itoa(o.PartNumber))
192 }
193
194 if o.reqParams != nil {
195 for key, values := range o.reqParams {
196 for _, value := range values {
197 urlValues.Add(key, value)
198 }
199 }
200 }
201
202 return urlValues
203}
diff --git a/vendor/github.com/minio/minio-go/v7/api-list.go b/vendor/github.com/minio/minio-go/v7/api-list.go
new file mode 100644
index 0000000..31b6edf
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/api-list.go
@@ -0,0 +1,1057 @@
1/*
2 * MinIO Go Library for Amazon S3 Compatible Cloud Storage
3 * Copyright 2015-2020 MinIO, Inc.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18package minio
19
20import (
21 "context"
22 "fmt"
23 "net/http"
24 "net/url"
25 "time"
26
27 "github.com/minio/minio-go/v7/pkg/s3utils"
28)
29
30// ListBuckets list all buckets owned by this authenticated user.
31//
32// This call requires explicit authentication, no anonymous requests are
33// allowed for listing buckets.
34//
35// api := client.New(....)
36// for message := range api.ListBuckets(context.Background()) {
37// fmt.Println(message)
38// }
39func (c *Client) ListBuckets(ctx context.Context) ([]BucketInfo, error) {
40 // Execute GET on service.
41 resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{contentSHA256Hex: emptySHA256Hex})
42 defer closeResponse(resp)
43 if err != nil {
44 return nil, err
45 }
46 if resp != nil {
47 if resp.StatusCode != http.StatusOK {
48 return nil, httpRespToErrorResponse(resp, "", "")
49 }
50 }
51 listAllMyBucketsResult := listAllMyBucketsResult{}
52 err = xmlDecoder(resp.Body, &listAllMyBucketsResult)
53 if err != nil {
54 return nil, err
55 }
56 return listAllMyBucketsResult.Buckets.Bucket, nil
57}
58
59// Bucket List Operations.
60func (c *Client) listObjectsV2(ctx context.Context, bucketName string, opts ListObjectsOptions) <-chan ObjectInfo {
61 // Allocate new list objects channel.
62 objectStatCh := make(chan ObjectInfo, 1)
63 // Default listing is delimited at "/"
64 delimiter := "/"
65 if opts.Recursive {
66 // If recursive we do not delimit.
67 delimiter = ""
68 }
69
70 // Return object owner information by default
71 fetchOwner := true
72
73 sendObjectInfo := func(info ObjectInfo) {
74 select {
75 case objectStatCh <- info:
76 case <-ctx.Done():
77 }
78 }
79
80 // Validate bucket name.
81 if err := s3utils.CheckValidBucketName(bucketName); err != nil {
82 defer close(objectStatCh)
83 sendObjectInfo(ObjectInfo{
84 Err: err,
85 })
86 return objectStatCh
87 }
88
89 // Validate incoming object prefix.
90 if err := s3utils.CheckValidObjectNamePrefix(opts.Prefix); err != nil {
91 defer close(objectStatCh)
92 sendObjectInfo(ObjectInfo{
93 Err: err,
94 })
95 return objectStatCh
96 }
97
98 // Initiate list objects goroutine here.
99 go func(objectStatCh chan<- ObjectInfo) {
100 defer func() {
101 if contextCanceled(ctx) {
102 objectStatCh <- ObjectInfo{
103 Err: ctx.Err(),
104 }
105 }
106 close(objectStatCh)
107 }()
108
109 // Save continuationToken for next request.
110 var continuationToken string
111 for {
112 // Get list of objects a maximum of 1000 per request.
113 result, err := c.listObjectsV2Query(ctx, bucketName, opts.Prefix, continuationToken,
114 fetchOwner, opts.WithMetadata, delimiter, opts.StartAfter, opts.MaxKeys, opts.headers)
115 if err != nil {
116 sendObjectInfo(ObjectInfo{
117 Err: err,
118 })
119 return
120 }
121
122 // If contents are available loop through and send over channel.
123 for _, object := range result.Contents {
124 object.ETag = trimEtag(object.ETag)
125 select {
126 // Send object content.
127 case objectStatCh <- object:
128 // If receives done from the caller, return here.
129 case <-ctx.Done():
130 return
131 }
132 }
133
134 // Send all common prefixes if any.
135 // NOTE: prefixes are only present if the request is delimited.
136 for _, obj := range result.CommonPrefixes {
137 select {
138 // Send object prefixes.
139 case objectStatCh <- ObjectInfo{Key: obj.Prefix}:
140 // If receives done from the caller, return here.
141 case <-ctx.Done():
142 return
143 }
144 }
145
146 // If continuation token present, save it for next request.
147 if result.NextContinuationToken != "" {
148 continuationToken = result.NextContinuationToken
149 }
150
151 // Listing ends result is not truncated, return right here.
152 if !result.IsTruncated {
153 return
154 }
155
156 // Add this to catch broken S3 API implementations.
157 if continuationToken == "" {
158 sendObjectInfo(ObjectInfo{
159 Err: fmt.Errorf("listObjectsV2 is truncated without continuationToken, %s S3 server is incompatible with S3 API", c.endpointURL),
160 })
161 return
162 }
163 }
164 }(objectStatCh)
165 return objectStatCh
166}
167
168// listObjectsV2Query - (List Objects V2) - List some or all (up to 1000) of the objects in a bucket.
169//
170// You can use the request parameters as selection criteria to return a subset of the objects in a bucket.
171// request parameters :-
172// ---------
173// ?prefix - Limits the response to keys that begin with the specified prefix.
174// ?continuation-token - Used to continue iterating over a set of objects
175// ?metadata - Specifies if we want metadata for the objects as part of list operation.
176// ?delimiter - A delimiter is a character you use to group keys.
177// ?start-after - Sets a marker to start listing lexically at this key onwards.
178// ?max-keys - Sets the maximum number of keys returned in the response body.
179func (c *Client) listObjectsV2Query(ctx context.Context, bucketName, objectPrefix, continuationToken string, fetchOwner, metadata bool, delimiter, startAfter string, maxkeys int, headers http.Header) (ListBucketV2Result, error) {
180 // Validate bucket name.
181 if err := s3utils.CheckValidBucketName(bucketName); err != nil {
182 return ListBucketV2Result{}, err
183 }
184 // Validate object prefix.
185 if err := s3utils.CheckValidObjectNamePrefix(objectPrefix); err != nil {
186 return ListBucketV2Result{}, err
187 }
188 // Get resources properly escaped and lined up before
189 // using them in http request.
190 urlValues := make(url.Values)
191
192 // Always set list-type in ListObjects V2
193 urlValues.Set("list-type", "2")
194
195 if metadata {
196 urlValues.Set("metadata", "true")
197 }
198
199 // Set this conditionally if asked
200 if startAfter != "" {
201 urlValues.Set("start-after", startAfter)
202 }
203
204 // Always set encoding-type in ListObjects V2
205 urlValues.Set("encoding-type", "url")
206
207 // Set object prefix, prefix value to be set to empty is okay.
208 urlValues.Set("prefix", objectPrefix)
209
210 // Set delimiter, delimiter value to be set to empty is okay.
211 urlValues.Set("delimiter", delimiter)
212
213 // Set continuation token
214 if continuationToken != "" {
215 urlValues.Set("continuation-token", continuationToken)
216 }
217
218 // Fetch owner when listing
219 if fetchOwner {
220 urlValues.Set("fetch-owner", "true")
221 }
222
223 // Set max keys.
224 if maxkeys > 0 {
225 urlValues.Set("max-keys", fmt.Sprintf("%d", maxkeys))
226 }
227
228 // Execute GET on bucket to list objects.
229 resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{
230 bucketName: bucketName,
231 queryValues: urlValues,
232 contentSHA256Hex: emptySHA256Hex,
233 customHeader: headers,
234 })
235 defer closeResponse(resp)
236 if err != nil {
237 return ListBucketV2Result{}, err
238 }
239 if resp != nil {
240 if resp.StatusCode != http.StatusOK {
241 return ListBucketV2Result{}, httpRespToErrorResponse(resp, bucketName, "")
242 }
243 }
244
245 // Decode listBuckets XML.
246 listBucketResult := ListBucketV2Result{}
247 if err = xmlDecoder(resp.Body, &listBucketResult); err != nil {
248 return listBucketResult, err
249 }
250
251 // This is an additional verification check to make
252 // sure proper responses are received.
253 if listBucketResult.IsTruncated && listBucketResult.NextContinuationToken == "" {
254 return listBucketResult, ErrorResponse{
255 Code: "NotImplemented",
256 Message: "Truncated response should have continuation token set",
257 }
258 }
259
260 for i, obj := range listBucketResult.Contents {
261 listBucketResult.Contents[i].Key, err = decodeS3Name(obj.Key, listBucketResult.EncodingType)
262 if err != nil {
263 return listBucketResult, err
264 }
265 listBucketResult.Contents[i].LastModified = listBucketResult.Contents[i].LastModified.Truncate(time.Millisecond)
266 }
267
268 for i, obj := range listBucketResult.CommonPrefixes {
269 listBucketResult.CommonPrefixes[i].Prefix, err = decodeS3Name(obj.Prefix, listBucketResult.EncodingType)
270 if err != nil {
271 return listBucketResult, err
272 }
273 }
274
275 // Success.
276 return listBucketResult, nil
277}
278
279func (c *Client) listObjects(ctx context.Context, bucketName string, opts ListObjectsOptions) <-chan ObjectInfo {
280 // Allocate new list objects channel.
281 objectStatCh := make(chan ObjectInfo, 1)
282 // Default listing is delimited at "/"
283 delimiter := "/"
284 if opts.Recursive {
285 // If recursive we do not delimit.
286 delimiter = ""
287 }
288
289 sendObjectInfo := func(info ObjectInfo) {
290 select {
291 case objectStatCh <- info:
292 case <-ctx.Done():
293 }
294 }
295
296 // Validate bucket name.
297 if err := s3utils.CheckValidBucketName(bucketName); err != nil {
298 defer close(objectStatCh)
299 sendObjectInfo(ObjectInfo{
300 Err: err,
301 })
302 return objectStatCh
303 }
304 // Validate incoming object prefix.
305 if err := s3utils.CheckValidObjectNamePrefix(opts.Prefix); err != nil {
306 defer close(objectStatCh)
307 sendObjectInfo(ObjectInfo{
308 Err: err,
309 })
310 return objectStatCh
311 }
312
313 // Initiate list objects goroutine here.
314 go func(objectStatCh chan<- ObjectInfo) {
315 defer func() {
316 if contextCanceled(ctx) {
317 objectStatCh <- ObjectInfo{
318 Err: ctx.Err(),
319 }
320 }
321 close(objectStatCh)
322 }()
323
324 marker := opts.StartAfter
325 for {
326 // Get list of objects a maximum of 1000 per request.
327 result, err := c.listObjectsQuery(ctx, bucketName, opts.Prefix, marker, delimiter, opts.MaxKeys, opts.headers)
328 if err != nil {
329 sendObjectInfo(ObjectInfo{
330 Err: err,
331 })
332 return
333 }
334
335 // If contents are available loop through and send over channel.
336 for _, object := range result.Contents {
337 // Save the marker.
338 marker = object.Key
339 object.ETag = trimEtag(object.ETag)
340 select {
341 // Send object content.
342 case objectStatCh <- object:
343 // If receives done from the caller, return here.
344 case <-ctx.Done():
345 return
346 }
347 }
348
349 // Send all common prefixes if any.
350 // NOTE: prefixes are only present if the request is delimited.
351 for _, obj := range result.CommonPrefixes {
352 select {
353 // Send object prefixes.
354 case objectStatCh <- ObjectInfo{Key: obj.Prefix}:
355 // If receives done from the caller, return here.
356 case <-ctx.Done():
357 return
358 }
359 }
360
361 // If next marker present, save it for next request.
362 if result.NextMarker != "" {
363 marker = result.NextMarker
364 }
365
366 // Listing ends result is not truncated, return right here.
367 if !result.IsTruncated {
368 return
369 }
370 }
371 }(objectStatCh)
372 return objectStatCh
373}
374
375func (c *Client) listObjectVersions(ctx context.Context, bucketName string, opts ListObjectsOptions) <-chan ObjectInfo {
376 // Allocate new list objects channel.
377 resultCh := make(chan ObjectInfo, 1)
378 // Default listing is delimited at "/"
379 delimiter := "/"
380 if opts.Recursive {
381 // If recursive we do not delimit.
382 delimiter = ""
383 }
384
385 sendObjectInfo := func(info ObjectInfo) {
386 select {
387 case resultCh <- info:
388 case <-ctx.Done():
389 }
390 }
391
392 // Validate bucket name.
393 if err := s3utils.CheckValidBucketName(bucketName); err != nil {
394 defer close(resultCh)
395 sendObjectInfo(ObjectInfo{
396 Err: err,
397 })
398 return resultCh
399 }
400
401 // Validate incoming object prefix.
402 if err := s3utils.CheckValidObjectNamePrefix(opts.Prefix); err != nil {
403 defer close(resultCh)
404 sendObjectInfo(ObjectInfo{
405 Err: err,
406 })
407 return resultCh
408 }
409
410 // Initiate list objects goroutine here.
411 go func(resultCh chan<- ObjectInfo) {
412 defer func() {
413 if contextCanceled(ctx) {
414 resultCh <- ObjectInfo{
415 Err: ctx.Err(),
416 }
417 }
418 close(resultCh)
419 }()
420
421 var (
422 keyMarker = ""
423 versionIDMarker = ""
424 )
425
426 for {
427 // Get list of objects a maximum of 1000 per request.
428 result, err := c.listObjectVersionsQuery(ctx, bucketName, opts, keyMarker, versionIDMarker, delimiter)
429 if err != nil {
430 sendObjectInfo(ObjectInfo{
431 Err: err,
432 })
433 return
434 }
435
436 // If contents are available loop through and send over channel.
437 for _, version := range result.Versions {
438 info := ObjectInfo{
439 ETag: trimEtag(version.ETag),
440 Key: version.Key,
441 LastModified: version.LastModified.Truncate(time.Millisecond),
442 Size: version.Size,
443 Owner: version.Owner,
444 StorageClass: version.StorageClass,
445 IsLatest: version.IsLatest,
446 VersionID: version.VersionID,
447 IsDeleteMarker: version.isDeleteMarker,
448 UserTags: version.UserTags,
449 UserMetadata: version.UserMetadata,
450 Internal: version.Internal,
451 }
452 select {
453 // Send object version info.
454 case resultCh <- info:
455 // If receives done from the caller, return here.
456 case <-ctx.Done():
457 return
458 }
459 }
460
461 // Send all common prefixes if any.
462 // NOTE: prefixes are only present if the request is delimited.
463 for _, obj := range result.CommonPrefixes {
464 select {
465 // Send object prefixes.
466 case resultCh <- ObjectInfo{Key: obj.Prefix}:
467 // If receives done from the caller, return here.
468 case <-ctx.Done():
469 return
470 }
471 }
472
473 // If next key marker is present, save it for next request.
474 if result.NextKeyMarker != "" {
475 keyMarker = result.NextKeyMarker
476 }
477
478 // If next version id marker is present, save it for next request.
479 if result.NextVersionIDMarker != "" {
480 versionIDMarker = result.NextVersionIDMarker
481 }
482
483 // Listing ends result is not truncated, return right here.
484 if !result.IsTruncated {
485 return
486 }
487 }
488 }(resultCh)
489 return resultCh
490}
491
492// listObjectVersions - (List Object Versions) - List some or all (up to 1000) of the existing objects
493// and their versions in a bucket.
494//
495// You can use the request parameters as selection criteria to return a subset of the objects in a bucket.
496// request parameters :-
497// ---------
498// ?key-marker - Specifies the key to start with when listing objects in a bucket.
499// ?version-id-marker - Specifies the version id marker to start with when listing objects with versions in a bucket.
500// ?delimiter - A delimiter is a character you use to group keys.
501// ?prefix - Limits the response to keys that begin with the specified prefix.
502// ?max-keys - Sets the maximum number of keys returned in the response body.
503func (c *Client) listObjectVersionsQuery(ctx context.Context, bucketName string, opts ListObjectsOptions, keyMarker, versionIDMarker, delimiter string) (ListVersionsResult, error) {
504 // Validate bucket name.
505 if err := s3utils.CheckValidBucketName(bucketName); err != nil {
506 return ListVersionsResult{}, err
507 }
508 // Validate object prefix.
509 if err := s3utils.CheckValidObjectNamePrefix(opts.Prefix); err != nil {
510 return ListVersionsResult{}, err
511 }
512 // Get resources properly escaped and lined up before
513 // using them in http request.
514 urlValues := make(url.Values)
515
516 // Set versions to trigger versioning API
517 urlValues.Set("versions", "")
518
519 // Set object prefix, prefix value to be set to empty is okay.
520 urlValues.Set("prefix", opts.Prefix)
521
522 // Set delimiter, delimiter value to be set to empty is okay.
523 urlValues.Set("delimiter", delimiter)
524
525 // Set object marker.
526 if keyMarker != "" {
527 urlValues.Set("key-marker", keyMarker)
528 }
529
530 // Set max keys.
531 if opts.MaxKeys > 0 {
532 urlValues.Set("max-keys", fmt.Sprintf("%d", opts.MaxKeys))
533 }
534
535 // Set version ID marker
536 if versionIDMarker != "" {
537 urlValues.Set("version-id-marker", versionIDMarker)
538 }
539
540 if opts.WithMetadata {
541 urlValues.Set("metadata", "true")
542 }
543
544 // Always set encoding-type
545 urlValues.Set("encoding-type", "url")
546
547 // Execute GET on bucket to list objects.
548 resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{
549 bucketName: bucketName,
550 queryValues: urlValues,
551 contentSHA256Hex: emptySHA256Hex,
552 customHeader: opts.headers,
553 })
554 defer closeResponse(resp)
555 if err != nil {
556 return ListVersionsResult{}, err
557 }
558 if resp != nil {
559 if resp.StatusCode != http.StatusOK {
560 return ListVersionsResult{}, httpRespToErrorResponse(resp, bucketName, "")
561 }
562 }
563
564 // Decode ListVersionsResult XML.
565 listObjectVersionsOutput := ListVersionsResult{}
566 err = xmlDecoder(resp.Body, &listObjectVersionsOutput)
567 if err != nil {
568 return ListVersionsResult{}, err
569 }
570
571 for i, obj := range listObjectVersionsOutput.Versions {
572 listObjectVersionsOutput.Versions[i].Key, err = decodeS3Name(obj.Key, listObjectVersionsOutput.EncodingType)
573 if err != nil {
574 return listObjectVersionsOutput, err
575 }
576 }
577
578 for i, obj := range listObjectVersionsOutput.CommonPrefixes {
579 listObjectVersionsOutput.CommonPrefixes[i].Prefix, err = decodeS3Name(obj.Prefix, listObjectVersionsOutput.EncodingType)
580 if err != nil {
581 return listObjectVersionsOutput, err
582 }
583 }
584
585 if listObjectVersionsOutput.NextKeyMarker != "" {
586 listObjectVersionsOutput.NextKeyMarker, err = decodeS3Name(listObjectVersionsOutput.NextKeyMarker, listObjectVersionsOutput.EncodingType)
587 if err != nil {
588 return listObjectVersionsOutput, err
589 }
590 }
591
592 return listObjectVersionsOutput, nil
593}
594
595// listObjects - (List Objects) - List some or all (up to 1000) of the objects in a bucket.
596//
597// You can use the request parameters as selection criteria to return a subset of the objects in a bucket.
598// request parameters :-
599// ---------
600// ?marker - Specifies the key to start with when listing objects in a bucket.
601// ?delimiter - A delimiter is a character you use to group keys.
602// ?prefix - Limits the response to keys that begin with the specified prefix.
603// ?max-keys - Sets the maximum number of keys returned in the response body.
604func (c *Client) listObjectsQuery(ctx context.Context, bucketName, objectPrefix, objectMarker, delimiter string, maxkeys int, headers http.Header) (ListBucketResult, error) {
605 // Validate bucket name.
606 if err := s3utils.CheckValidBucketName(bucketName); err != nil {
607 return ListBucketResult{}, err
608 }
609 // Validate object prefix.
610 if err := s3utils.CheckValidObjectNamePrefix(objectPrefix); err != nil {
611 return ListBucketResult{}, err
612 }
613 // Get resources properly escaped and lined up before
614 // using them in http request.
615 urlValues := make(url.Values)
616
617 // Set object prefix, prefix value to be set to empty is okay.
618 urlValues.Set("prefix", objectPrefix)
619
620 // Set delimiter, delimiter value to be set to empty is okay.
621 urlValues.Set("delimiter", delimiter)
622
623 // Set object marker.
624 if objectMarker != "" {
625 urlValues.Set("marker", objectMarker)
626 }
627
628 // Set max keys.
629 if maxkeys > 0 {
630 urlValues.Set("max-keys", fmt.Sprintf("%d", maxkeys))
631 }
632
633 // Always set encoding-type
634 urlValues.Set("encoding-type", "url")
635
636 // Execute GET on bucket to list objects.
637 resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{
638 bucketName: bucketName,
639 queryValues: urlValues,
640 contentSHA256Hex: emptySHA256Hex,
641 customHeader: headers,
642 })
643 defer closeResponse(resp)
644 if err != nil {
645 return ListBucketResult{}, err
646 }
647 if resp != nil {
648 if resp.StatusCode != http.StatusOK {
649 return ListBucketResult{}, httpRespToErrorResponse(resp, bucketName, "")
650 }
651 }
652 // Decode listBuckets XML.
653 listBucketResult := ListBucketResult{}
654 err = xmlDecoder(resp.Body, &listBucketResult)
655 if err != nil {
656 return listBucketResult, err
657 }
658
659 for i, obj := range listBucketResult.Contents {
660 listBucketResult.Contents[i].Key, err = decodeS3Name(obj.Key, listBucketResult.EncodingType)
661 if err != nil {
662 return listBucketResult, err
663 }
664 listBucketResult.Contents[i].LastModified = listBucketResult.Contents[i].LastModified.Truncate(time.Millisecond)
665 }
666
667 for i, obj := range listBucketResult.CommonPrefixes {
668 listBucketResult.CommonPrefixes[i].Prefix, err = decodeS3Name(obj.Prefix, listBucketResult.EncodingType)
669 if err != nil {
670 return listBucketResult, err
671 }
672 }
673
674 if listBucketResult.NextMarker != "" {
675 listBucketResult.NextMarker, err = decodeS3Name(listBucketResult.NextMarker, listBucketResult.EncodingType)
676 if err != nil {
677 return listBucketResult, err
678 }
679 }
680
681 return listBucketResult, nil
682}
683
684// ListObjectsOptions holds all options of a list object request
685type ListObjectsOptions struct {
686 // Include objects versions in the listing
687 WithVersions bool
688 // Include objects metadata in the listing
689 WithMetadata bool
690 // Only list objects with the prefix
691 Prefix string
692 // Ignore '/' delimiter
693 Recursive bool
694 // The maximum number of objects requested per
695 // batch, advanced use-case not useful for most
696 // applications
697 MaxKeys int
698 // StartAfter start listing lexically at this
699 // object onwards, this value can also be set
700 // for Marker when `UseV1` is set to true.
701 StartAfter string
702
703 // Use the deprecated list objects V1 API
704 UseV1 bool
705
706 headers http.Header
707}
708
709// Set adds a key value pair to the options. The
710// key-value pair will be part of the HTTP GET request
711// headers.
712func (o *ListObjectsOptions) Set(key, value string) {
713 if o.headers == nil {
714 o.headers = make(http.Header)
715 }
716 o.headers.Set(key, value)
717}
718
719// ListObjects returns objects list after evaluating the passed options.
720//
721// api := client.New(....)
722// for object := range api.ListObjects(ctx, "mytestbucket", minio.ListObjectsOptions{Prefix: "starthere", Recursive:true}) {
723// fmt.Println(object)
724// }
725//
726// If caller cancels the context, then the last entry on the 'chan ObjectInfo' will be the context.Error()
727// caller must drain the channel entirely and wait until channel is closed before proceeding, without
728// waiting on the channel to be closed completely you might leak goroutines.
729func (c *Client) ListObjects(ctx context.Context, bucketName string, opts ListObjectsOptions) <-chan ObjectInfo {
730 if opts.WithVersions {
731 return c.listObjectVersions(ctx, bucketName, opts)
732 }
733
734 // Use legacy list objects v1 API
735 if opts.UseV1 {
736 return c.listObjects(ctx, bucketName, opts)
737 }
738
739 // Check whether this is snowball region, if yes ListObjectsV2 doesn't work, fallback to listObjectsV1.
740 if location, ok := c.bucketLocCache.Get(bucketName); ok {
741 if location == "snowball" {
742 return c.listObjects(ctx, bucketName, opts)
743 }
744 }
745
746 return c.listObjectsV2(ctx, bucketName, opts)
747}
748
749// ListIncompleteUploads - List incompletely uploaded multipart objects.
750//
751// ListIncompleteUploads lists all incompleted objects matching the
752// objectPrefix from the specified bucket. If recursion is enabled
753// it would list all subdirectories and all its contents.
754//
755// Your input parameters are just bucketName, objectPrefix, recursive.
756// If you enable recursive as 'true' this function will return back all
757// the multipart objects in a given bucket name.
758//
759// api := client.New(....)
760// // Recurively list all objects in 'mytestbucket'
761// recursive := true
762// for message := range api.ListIncompleteUploads(context.Background(), "mytestbucket", "starthere", recursive) {
763// fmt.Println(message)
764// }
765func (c *Client) ListIncompleteUploads(ctx context.Context, bucketName, objectPrefix string, recursive bool) <-chan ObjectMultipartInfo {
766 return c.listIncompleteUploads(ctx, bucketName, objectPrefix, recursive)
767}
768
769// contextCanceled returns whether a context is canceled.
770func contextCanceled(ctx context.Context) bool {
771 select {
772 case <-ctx.Done():
773 return true
774 default:
775 return false
776 }
777}
778
779// listIncompleteUploads lists all incomplete uploads.
780func (c *Client) listIncompleteUploads(ctx context.Context, bucketName, objectPrefix string, recursive bool) <-chan ObjectMultipartInfo {
781 // Allocate channel for multipart uploads.
782 objectMultipartStatCh := make(chan ObjectMultipartInfo, 1)
783 // Delimiter is set to "/" by default.
784 delimiter := "/"
785 if recursive {
786 // If recursive do not delimit.
787 delimiter = ""
788 }
789 // Validate bucket name.
790 if err := s3utils.CheckValidBucketName(bucketName); err != nil {
791 defer close(objectMultipartStatCh)
792 objectMultipartStatCh <- ObjectMultipartInfo{
793 Err: err,
794 }
795 return objectMultipartStatCh
796 }
797 // Validate incoming object prefix.
798 if err := s3utils.CheckValidObjectNamePrefix(objectPrefix); err != nil {
799 defer close(objectMultipartStatCh)
800 objectMultipartStatCh <- ObjectMultipartInfo{
801 Err: err,
802 }
803 return objectMultipartStatCh
804 }
805 go func(objectMultipartStatCh chan<- ObjectMultipartInfo) {
806 defer func() {
807 if contextCanceled(ctx) {
808 objectMultipartStatCh <- ObjectMultipartInfo{
809 Err: ctx.Err(),
810 }
811 }
812 close(objectMultipartStatCh)
813 }()
814
815 // object and upload ID marker for future requests.
816 var objectMarker string
817 var uploadIDMarker string
818 for {
819 // list all multipart uploads.
820 result, err := c.listMultipartUploadsQuery(ctx, bucketName, objectMarker, uploadIDMarker, objectPrefix, delimiter, 0)
821 if err != nil {
822 objectMultipartStatCh <- ObjectMultipartInfo{
823 Err: err,
824 }
825 return
826 }
827 objectMarker = result.NextKeyMarker
828 uploadIDMarker = result.NextUploadIDMarker
829
830 // Send all multipart uploads.
831 for _, obj := range result.Uploads {
832 // Calculate total size of the uploaded parts if 'aggregateSize' is enabled.
833 select {
834 // Send individual uploads here.
835 case objectMultipartStatCh <- obj:
836 // If the context is canceled
837 case <-ctx.Done():
838 return
839 }
840 }
841 // Send all common prefixes if any.
842 // NOTE: prefixes are only present if the request is delimited.
843 for _, obj := range result.CommonPrefixes {
844 select {
845 // Send delimited prefixes here.
846 case objectMultipartStatCh <- ObjectMultipartInfo{Key: obj.Prefix, Size: 0}:
847 // If context is canceled.
848 case <-ctx.Done():
849 return
850 }
851 }
852 // Listing ends if result not truncated, return right here.
853 if !result.IsTruncated {
854 return
855 }
856 }
857 }(objectMultipartStatCh)
858 // return.
859 return objectMultipartStatCh
860}
861
862// listMultipartUploadsQuery - (List Multipart Uploads).
863// - Lists some or all (up to 1000) in-progress multipart uploads in a bucket.
864//
865// You can use the request parameters as selection criteria to return a subset of the uploads in a bucket.
866// request parameters. :-
867// ---------
868// ?key-marker - Specifies the multipart upload after which listing should begin.
869// ?upload-id-marker - Together with key-marker specifies the multipart upload after which listing should begin.
870// ?delimiter - A delimiter is a character you use to group keys.
871// ?prefix - Limits the response to keys that begin with the specified prefix.
872// ?max-uploads - Sets the maximum number of multipart uploads returned in the response body.
873func (c *Client) listMultipartUploadsQuery(ctx context.Context, bucketName, keyMarker, uploadIDMarker, prefix, delimiter string, maxUploads int) (ListMultipartUploadsResult, error) {
874 // Get resources properly escaped and lined up before using them in http request.
875 urlValues := make(url.Values)
876 // Set uploads.
877 urlValues.Set("uploads", "")
878 // Set object key marker.
879 if keyMarker != "" {
880 urlValues.Set("key-marker", keyMarker)
881 }
882 // Set upload id marker.
883 if uploadIDMarker != "" {
884 urlValues.Set("upload-id-marker", uploadIDMarker)
885 }
886
887 // Set object prefix, prefix value to be set to empty is okay.
888 urlValues.Set("prefix", prefix)
889
890 // Set delimiter, delimiter value to be set to empty is okay.
891 urlValues.Set("delimiter", delimiter)
892
893 // Always set encoding-type
894 urlValues.Set("encoding-type", "url")
895
896 // maxUploads should be 1000 or less.
897 if maxUploads > 0 {
898 // Set max-uploads.
899 urlValues.Set("max-uploads", fmt.Sprintf("%d", maxUploads))
900 }
901
902 // Execute GET on bucketName to list multipart uploads.
903 resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{
904 bucketName: bucketName,
905 queryValues: urlValues,
906 contentSHA256Hex: emptySHA256Hex,
907 })
908 defer closeResponse(resp)
909 if err != nil {
910 return ListMultipartUploadsResult{}, err
911 }
912 if resp != nil {
913 if resp.StatusCode != http.StatusOK {
914 return ListMultipartUploadsResult{}, httpRespToErrorResponse(resp, bucketName, "")
915 }
916 }
917 // Decode response body.
918 listMultipartUploadsResult := ListMultipartUploadsResult{}
919 err = xmlDecoder(resp.Body, &listMultipartUploadsResult)
920 if err != nil {
921 return listMultipartUploadsResult, err
922 }
923
924 listMultipartUploadsResult.NextKeyMarker, err = decodeS3Name(listMultipartUploadsResult.NextKeyMarker, listMultipartUploadsResult.EncodingType)
925 if err != nil {
926 return listMultipartUploadsResult, err
927 }
928
929 listMultipartUploadsResult.NextUploadIDMarker, err = decodeS3Name(listMultipartUploadsResult.NextUploadIDMarker, listMultipartUploadsResult.EncodingType)
930 if err != nil {
931 return listMultipartUploadsResult, err
932 }
933
934 for i, obj := range listMultipartUploadsResult.Uploads {
935 listMultipartUploadsResult.Uploads[i].Key, err = decodeS3Name(obj.Key, listMultipartUploadsResult.EncodingType)
936 if err != nil {
937 return listMultipartUploadsResult, err
938 }
939 }
940
941 for i, obj := range listMultipartUploadsResult.CommonPrefixes {
942 listMultipartUploadsResult.CommonPrefixes[i].Prefix, err = decodeS3Name(obj.Prefix, listMultipartUploadsResult.EncodingType)
943 if err != nil {
944 return listMultipartUploadsResult, err
945 }
946 }
947
948 return listMultipartUploadsResult, nil
949}
950
951// listObjectParts list all object parts recursively.
952//
953//lint:ignore U1000 Keep this around
954func (c *Client) listObjectParts(ctx context.Context, bucketName, objectName, uploadID string) (partsInfo map[int]ObjectPart, err error) {
955 // Part number marker for the next batch of request.
956 var nextPartNumberMarker int
957 partsInfo = make(map[int]ObjectPart)
958 for {
959 // Get list of uploaded parts a maximum of 1000 per request.
960 listObjPartsResult, err := c.listObjectPartsQuery(ctx, bucketName, objectName, uploadID, nextPartNumberMarker, 1000)
961 if err != nil {
962 return nil, err
963 }
964 // Append to parts info.
965 for _, part := range listObjPartsResult.ObjectParts {
966 // Trim off the odd double quotes from ETag in the beginning and end.
967 part.ETag = trimEtag(part.ETag)
968 partsInfo[part.PartNumber] = part
969 }
970 // Keep part number marker, for the next iteration.
971 nextPartNumberMarker = listObjPartsResult.NextPartNumberMarker
972 // Listing ends result is not truncated, return right here.
973 if !listObjPartsResult.IsTruncated {
974 break
975 }
976 }
977
978 // Return all the parts.
979 return partsInfo, nil
980}
981
982// findUploadIDs lists all incomplete uploads and find the uploadIDs of the matching object name.
983func (c *Client) findUploadIDs(ctx context.Context, bucketName, objectName string) ([]string, error) {
984 var uploadIDs []string
985 // Make list incomplete uploads recursive.
986 isRecursive := true
987 // List all incomplete uploads.
988 for mpUpload := range c.listIncompleteUploads(ctx, bucketName, objectName, isRecursive) {
989 if mpUpload.Err != nil {
990 return nil, mpUpload.Err
991 }
992 if objectName == mpUpload.Key {
993 uploadIDs = append(uploadIDs, mpUpload.UploadID)
994 }
995 }
996 // Return the latest upload id.
997 return uploadIDs, nil
998}
999
1000// listObjectPartsQuery (List Parts query)
1001// - lists some or all (up to 1000) parts that have been uploaded
1002// for a specific multipart upload
1003//
1004// You can use the request parameters as selection criteria to return
1005// a subset of the uploads in a bucket, request parameters :-
1006// ---------
1007// ?part-number-marker - Specifies the part after which listing should
1008// begin.
1009// ?max-parts - Maximum parts to be listed per request.
1010func (c *Client) listObjectPartsQuery(ctx context.Context, bucketName, objectName, uploadID string, partNumberMarker, maxParts int) (ListObjectPartsResult, error) {
1011 // Get resources properly escaped and lined up before using them in http request.
1012 urlValues := make(url.Values)
1013 // Set part number marker.
1014 urlValues.Set("part-number-marker", fmt.Sprintf("%d", partNumberMarker))
1015 // Set upload id.
1016 urlValues.Set("uploadId", uploadID)
1017
1018 // maxParts should be 1000 or less.
1019 if maxParts > 0 {
1020 // Set max parts.
1021 urlValues.Set("max-parts", fmt.Sprintf("%d", maxParts))
1022 }
1023
1024 // Execute GET on objectName to get list of parts.
1025 resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{
1026 bucketName: bucketName,
1027 objectName: objectName,
1028 queryValues: urlValues,
1029 contentSHA256Hex: emptySHA256Hex,
1030 })
1031 defer closeResponse(resp)
1032 if err != nil {
1033 return ListObjectPartsResult{}, err
1034 }
1035 if resp != nil {
1036 if resp.StatusCode != http.StatusOK {
1037 return ListObjectPartsResult{}, httpRespToErrorResponse(resp, bucketName, objectName)
1038 }
1039 }
1040 // Decode list object parts XML.
1041 listObjectPartsResult := ListObjectPartsResult{}
1042 err = xmlDecoder(resp.Body, &listObjectPartsResult)
1043 if err != nil {
1044 return listObjectPartsResult, err
1045 }
1046 return listObjectPartsResult, nil
1047}
1048
1049// Decode an S3 object name according to the encoding type
1050func decodeS3Name(name, encodingType string) (string, error) {
1051 switch encodingType {
1052 case "url":
1053 return url.QueryUnescape(name)
1054 default:
1055 return name, nil
1056 }
1057}
diff --git a/vendor/github.com/minio/minio-go/v7/api-object-legal-hold.go b/vendor/github.com/minio/minio-go/v7/api-object-legal-hold.go
new file mode 100644
index 0000000..0c027d5
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/api-object-legal-hold.go
@@ -0,0 +1,176 @@
1/*
2 * MinIO Go Library for Amazon S3 Compatible Cloud Storage
3 * Copyright 2020 MinIO, Inc.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18package minio
19
20import (
21 "bytes"
22 "context"
23 "encoding/xml"
24 "fmt"
25 "net/http"
26 "net/url"
27
28 "github.com/minio/minio-go/v7/pkg/s3utils"
29)
30
31// objectLegalHold - object legal hold specified in
32// https://docs.aws.amazon.com/AmazonS3/latest/API/archive-RESTObjectPUTLegalHold.html
33type objectLegalHold struct {
34 XMLNS string `xml:"xmlns,attr,omitempty"`
35 XMLName xml.Name `xml:"LegalHold"`
36 Status LegalHoldStatus `xml:"Status,omitempty"`
37}
38
39// PutObjectLegalHoldOptions represents options specified by user for PutObjectLegalHold call
40type PutObjectLegalHoldOptions struct {
41 VersionID string
42 Status *LegalHoldStatus
43}
44
45// GetObjectLegalHoldOptions represents options specified by user for GetObjectLegalHold call
46type GetObjectLegalHoldOptions struct {
47 VersionID string
48}
49
50// LegalHoldStatus - object legal hold status.
51type LegalHoldStatus string
52
53const (
54 // LegalHoldEnabled indicates legal hold is enabled
55 LegalHoldEnabled LegalHoldStatus = "ON"
56
57 // LegalHoldDisabled indicates legal hold is disabled
58 LegalHoldDisabled LegalHoldStatus = "OFF"
59)
60
61func (r LegalHoldStatus) String() string {
62 return string(r)
63}
64
65// IsValid - check whether this legal hold status is valid or not.
66func (r LegalHoldStatus) IsValid() bool {
67 return r == LegalHoldEnabled || r == LegalHoldDisabled
68}
69
70func newObjectLegalHold(status *LegalHoldStatus) (*objectLegalHold, error) {
71 if status == nil {
72 return nil, fmt.Errorf("Status not set")
73 }
74 if !status.IsValid() {
75 return nil, fmt.Errorf("invalid legal hold status `%v`", status)
76 }
77 legalHold := &objectLegalHold{
78 Status: *status,
79 }
80 return legalHold, nil
81}
82
83// PutObjectLegalHold : sets object legal hold for a given object and versionID.
84func (c *Client) PutObjectLegalHold(ctx context.Context, bucketName, objectName string, opts PutObjectLegalHoldOptions) error {
85 // Input validation.
86 if err := s3utils.CheckValidBucketName(bucketName); err != nil {
87 return err
88 }
89
90 if err := s3utils.CheckValidObjectName(objectName); err != nil {
91 return err
92 }
93
94 // Get resources properly escaped and lined up before
95 // using them in http request.
96 urlValues := make(url.Values)
97 urlValues.Set("legal-hold", "")
98
99 if opts.VersionID != "" {
100 urlValues.Set("versionId", opts.VersionID)
101 }
102
103 lh, err := newObjectLegalHold(opts.Status)
104 if err != nil {
105 return err
106 }
107
108 lhData, err := xml.Marshal(lh)
109 if err != nil {
110 return err
111 }
112
113 reqMetadata := requestMetadata{
114 bucketName: bucketName,
115 objectName: objectName,
116 queryValues: urlValues,
117 contentBody: bytes.NewReader(lhData),
118 contentLength: int64(len(lhData)),
119 contentMD5Base64: sumMD5Base64(lhData),
120 contentSHA256Hex: sum256Hex(lhData),
121 }
122
123 // Execute PUT Object Legal Hold.
124 resp, err := c.executeMethod(ctx, http.MethodPut, reqMetadata)
125 defer closeResponse(resp)
126 if err != nil {
127 return err
128 }
129 if resp != nil {
130 if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusNoContent {
131 return httpRespToErrorResponse(resp, bucketName, objectName)
132 }
133 }
134 return nil
135}
136
137// GetObjectLegalHold gets legal-hold status of given object.
138func (c *Client) GetObjectLegalHold(ctx context.Context, bucketName, objectName string, opts GetObjectLegalHoldOptions) (status *LegalHoldStatus, err error) {
139 // Input validation.
140 if err := s3utils.CheckValidBucketName(bucketName); err != nil {
141 return nil, err
142 }
143
144 if err := s3utils.CheckValidObjectName(objectName); err != nil {
145 return nil, err
146 }
147 urlValues := make(url.Values)
148 urlValues.Set("legal-hold", "")
149
150 if opts.VersionID != "" {
151 urlValues.Set("versionId", opts.VersionID)
152 }
153
154 // Execute GET on bucket to list objects.
155 resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{
156 bucketName: bucketName,
157 objectName: objectName,
158 queryValues: urlValues,
159 contentSHA256Hex: emptySHA256Hex,
160 })
161 defer closeResponse(resp)
162 if err != nil {
163 return nil, err
164 }
165 if resp != nil {
166 if resp.StatusCode != http.StatusOK {
167 return nil, httpRespToErrorResponse(resp, bucketName, objectName)
168 }
169 }
170 lh := &objectLegalHold{}
171 if err = xml.NewDecoder(resp.Body).Decode(lh); err != nil {
172 return nil, err
173 }
174
175 return &lh.Status, nil
176}
diff --git a/vendor/github.com/minio/minio-go/v7/api-object-lock.go b/vendor/github.com/minio/minio-go/v7/api-object-lock.go
new file mode 100644
index 0000000..f0a4398
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/api-object-lock.go
@@ -0,0 +1,241 @@
1/*
2 * MinIO Go Library for Amazon S3 Compatible Cloud Storage
3 * Copyright 2019 MinIO, Inc.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18package minio
19
20import (
21 "bytes"
22 "context"
23 "encoding/xml"
24 "fmt"
25 "net/http"
26 "net/url"
27 "time"
28
29 "github.com/minio/minio-go/v7/pkg/s3utils"
30)
31
32// RetentionMode - object retention mode.
33type RetentionMode string
34
35const (
36 // Governance - governance mode.
37 Governance RetentionMode = "GOVERNANCE"
38
39 // Compliance - compliance mode.
40 Compliance RetentionMode = "COMPLIANCE"
41)
42
43func (r RetentionMode) String() string {
44 return string(r)
45}
46
47// IsValid - check whether this retention mode is valid or not.
48func (r RetentionMode) IsValid() bool {
49 return r == Governance || r == Compliance
50}
51
52// ValidityUnit - retention validity unit.
53type ValidityUnit string
54
55const (
56 // Days - denotes no. of days.
57 Days ValidityUnit = "DAYS"
58
59 // Years - denotes no. of years.
60 Years ValidityUnit = "YEARS"
61)
62
63func (unit ValidityUnit) String() string {
64 return string(unit)
65}
66
67// IsValid - check whether this validity unit is valid or not.
68func (unit ValidityUnit) isValid() bool {
69 return unit == Days || unit == Years
70}
71
72// Retention - bucket level retention configuration.
73type Retention struct {
74 Mode RetentionMode
75 Validity time.Duration
76}
77
78func (r Retention) String() string {
79 return fmt.Sprintf("{Mode:%v, Validity:%v}", r.Mode, r.Validity)
80}
81
82// IsEmpty - returns whether retention is empty or not.
83func (r Retention) IsEmpty() bool {
84 return r.Mode == "" || r.Validity == 0
85}
86
87// objectLockConfig - object lock configuration specified in
88// https://docs.aws.amazon.com/AmazonS3/latest/API/Type_API_ObjectLockConfiguration.html
89type objectLockConfig struct {
90 XMLNS string `xml:"xmlns,attr,omitempty"`
91 XMLName xml.Name `xml:"ObjectLockConfiguration"`
92 ObjectLockEnabled string `xml:"ObjectLockEnabled"`
93 Rule *struct {
94 DefaultRetention struct {
95 Mode RetentionMode `xml:"Mode"`
96 Days *uint `xml:"Days"`
97 Years *uint `xml:"Years"`
98 } `xml:"DefaultRetention"`
99 } `xml:"Rule,omitempty"`
100}
101
102func newObjectLockConfig(mode *RetentionMode, validity *uint, unit *ValidityUnit) (*objectLockConfig, error) {
103 config := &objectLockConfig{
104 ObjectLockEnabled: "Enabled",
105 }
106
107 if mode != nil && validity != nil && unit != nil {
108 if !mode.IsValid() {
109 return nil, fmt.Errorf("invalid retention mode `%v`", mode)
110 }
111
112 if !unit.isValid() {
113 return nil, fmt.Errorf("invalid validity unit `%v`", unit)
114 }
115
116 config.Rule = &struct {
117 DefaultRetention struct {
118 Mode RetentionMode `xml:"Mode"`
119 Days *uint `xml:"Days"`
120 Years *uint `xml:"Years"`
121 } `xml:"DefaultRetention"`
122 }{}
123
124 config.Rule.DefaultRetention.Mode = *mode
125 if *unit == Days {
126 config.Rule.DefaultRetention.Days = validity
127 } else {
128 config.Rule.DefaultRetention.Years = validity
129 }
130
131 return config, nil
132 }
133
134 if mode == nil && validity == nil && unit == nil {
135 return config, nil
136 }
137
138 return nil, fmt.Errorf("all of retention mode, validity and validity unit must be passed")
139}
140
141// SetBucketObjectLockConfig sets object lock configuration in given bucket. mode, validity and unit are either all set or all nil.
142func (c *Client) SetBucketObjectLockConfig(ctx context.Context, bucketName string, mode *RetentionMode, validity *uint, unit *ValidityUnit) error {
143 // Input validation.
144 if err := s3utils.CheckValidBucketName(bucketName); err != nil {
145 return err
146 }
147
148 // Get resources properly escaped and lined up before
149 // using them in http request.
150 urlValues := make(url.Values)
151 urlValues.Set("object-lock", "")
152
153 config, err := newObjectLockConfig(mode, validity, unit)
154 if err != nil {
155 return err
156 }
157
158 configData, err := xml.Marshal(config)
159 if err != nil {
160 return err
161 }
162
163 reqMetadata := requestMetadata{
164 bucketName: bucketName,
165 queryValues: urlValues,
166 contentBody: bytes.NewReader(configData),
167 contentLength: int64(len(configData)),
168 contentMD5Base64: sumMD5Base64(configData),
169 contentSHA256Hex: sum256Hex(configData),
170 }
171
172 // Execute PUT bucket object lock configuration.
173 resp, err := c.executeMethod(ctx, http.MethodPut, reqMetadata)
174 defer closeResponse(resp)
175 if err != nil {
176 return err
177 }
178 if resp != nil {
179 if resp.StatusCode != http.StatusOK {
180 return httpRespToErrorResponse(resp, bucketName, "")
181 }
182 }
183 return nil
184}
185
186// GetObjectLockConfig gets object lock configuration of given bucket.
187func (c *Client) GetObjectLockConfig(ctx context.Context, bucketName string) (objectLock string, mode *RetentionMode, validity *uint, unit *ValidityUnit, err error) {
188 // Input validation.
189 if err := s3utils.CheckValidBucketName(bucketName); err != nil {
190 return "", nil, nil, nil, err
191 }
192
193 urlValues := make(url.Values)
194 urlValues.Set("object-lock", "")
195
196 // Execute GET on bucket to list objects.
197 resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{
198 bucketName: bucketName,
199 queryValues: urlValues,
200 contentSHA256Hex: emptySHA256Hex,
201 })
202 defer closeResponse(resp)
203 if err != nil {
204 return "", nil, nil, nil, err
205 }
206 if resp != nil {
207 if resp.StatusCode != http.StatusOK {
208 return "", nil, nil, nil, httpRespToErrorResponse(resp, bucketName, "")
209 }
210 }
211 config := &objectLockConfig{}
212 if err = xml.NewDecoder(resp.Body).Decode(config); err != nil {
213 return "", nil, nil, nil, err
214 }
215
216 if config.Rule != nil {
217 mode = &config.Rule.DefaultRetention.Mode
218 if config.Rule.DefaultRetention.Days != nil {
219 validity = config.Rule.DefaultRetention.Days
220 days := Days
221 unit = &days
222 } else {
223 validity = config.Rule.DefaultRetention.Years
224 years := Years
225 unit = &years
226 }
227 return config.ObjectLockEnabled, mode, validity, unit, nil
228 }
229 return config.ObjectLockEnabled, nil, nil, nil, nil
230}
231
232// GetBucketObjectLockConfig gets object lock configuration of given bucket.
233func (c *Client) GetBucketObjectLockConfig(ctx context.Context, bucketName string) (mode *RetentionMode, validity *uint, unit *ValidityUnit, err error) {
234 _, mode, validity, unit, err = c.GetObjectLockConfig(ctx, bucketName)
235 return mode, validity, unit, err
236}
237
238// SetObjectLockConfig sets object lock configuration in given bucket. mode, validity and unit are either all set or all nil.
239func (c *Client) SetObjectLockConfig(ctx context.Context, bucketName string, mode *RetentionMode, validity *uint, unit *ValidityUnit) error {
240 return c.SetBucketObjectLockConfig(ctx, bucketName, mode, validity, unit)
241}
diff --git a/vendor/github.com/minio/minio-go/v7/api-object-retention.go b/vendor/github.com/minio/minio-go/v7/api-object-retention.go
new file mode 100644
index 0000000..b29cb1f
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/api-object-retention.go
@@ -0,0 +1,165 @@
1/*
2 * MinIO Go Library for Amazon S3 Compatible Cloud Storage
3 * Copyright 2019-2020 MinIO, Inc.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18package minio
19
20import (
21 "bytes"
22 "context"
23 "encoding/xml"
24 "fmt"
25 "net/http"
26 "net/url"
27 "time"
28
29 "github.com/minio/minio-go/v7/pkg/s3utils"
30)
31
32// objectRetention - object retention specified in
33// https://docs.aws.amazon.com/AmazonS3/latest/API/Type_API_ObjectLockConfiguration.html
34type objectRetention struct {
35 XMLNS string `xml:"xmlns,attr,omitempty"`
36 XMLName xml.Name `xml:"Retention"`
37 Mode RetentionMode `xml:"Mode,omitempty"`
38 RetainUntilDate *time.Time `type:"timestamp" timestampFormat:"iso8601" xml:"RetainUntilDate,omitempty"`
39}
40
41func newObjectRetention(mode *RetentionMode, date *time.Time) (*objectRetention, error) {
42 objectRetention := &objectRetention{}
43
44 if date != nil && !date.IsZero() {
45 objectRetention.RetainUntilDate = date
46 }
47 if mode != nil {
48 if !mode.IsValid() {
49 return nil, fmt.Errorf("invalid retention mode `%v`", mode)
50 }
51 objectRetention.Mode = *mode
52 }
53
54 return objectRetention, nil
55}
56
57// PutObjectRetentionOptions represents options specified by user for PutObject call
58type PutObjectRetentionOptions struct {
59 GovernanceBypass bool
60 Mode *RetentionMode
61 RetainUntilDate *time.Time
62 VersionID string
63}
64
65// PutObjectRetention sets object retention for a given object and versionID.
66func (c *Client) PutObjectRetention(ctx context.Context, bucketName, objectName string, opts PutObjectRetentionOptions) error {
67 // Input validation.
68 if err := s3utils.CheckValidBucketName(bucketName); err != nil {
69 return err
70 }
71
72 if err := s3utils.CheckValidObjectName(objectName); err != nil {
73 return err
74 }
75
76 // Get resources properly escaped and lined up before
77 // using them in http request.
78 urlValues := make(url.Values)
79 urlValues.Set("retention", "")
80
81 if opts.VersionID != "" {
82 urlValues.Set("versionId", opts.VersionID)
83 }
84
85 retention, err := newObjectRetention(opts.Mode, opts.RetainUntilDate)
86 if err != nil {
87 return err
88 }
89
90 retentionData, err := xml.Marshal(retention)
91 if err != nil {
92 return err
93 }
94
95 // Build headers.
96 headers := make(http.Header)
97
98 if opts.GovernanceBypass {
99 // Set the bypass goverenance retention header
100 headers.Set(amzBypassGovernance, "true")
101 }
102
103 reqMetadata := requestMetadata{
104 bucketName: bucketName,
105 objectName: objectName,
106 queryValues: urlValues,
107 contentBody: bytes.NewReader(retentionData),
108 contentLength: int64(len(retentionData)),
109 contentMD5Base64: sumMD5Base64(retentionData),
110 contentSHA256Hex: sum256Hex(retentionData),
111 customHeader: headers,
112 }
113
114 // Execute PUT Object Retention.
115 resp, err := c.executeMethod(ctx, http.MethodPut, reqMetadata)
116 defer closeResponse(resp)
117 if err != nil {
118 return err
119 }
120 if resp != nil {
121 if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusNoContent {
122 return httpRespToErrorResponse(resp, bucketName, objectName)
123 }
124 }
125 return nil
126}
127
128// GetObjectRetention gets retention of given object.
129func (c *Client) GetObjectRetention(ctx context.Context, bucketName, objectName, versionID string) (mode *RetentionMode, retainUntilDate *time.Time, err error) {
130 // Input validation.
131 if err := s3utils.CheckValidBucketName(bucketName); err != nil {
132 return nil, nil, err
133 }
134
135 if err := s3utils.CheckValidObjectName(objectName); err != nil {
136 return nil, nil, err
137 }
138 urlValues := make(url.Values)
139 urlValues.Set("retention", "")
140 if versionID != "" {
141 urlValues.Set("versionId", versionID)
142 }
143 // Execute GET on bucket to list objects.
144 resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{
145 bucketName: bucketName,
146 objectName: objectName,
147 queryValues: urlValues,
148 contentSHA256Hex: emptySHA256Hex,
149 })
150 defer closeResponse(resp)
151 if err != nil {
152 return nil, nil, err
153 }
154 if resp != nil {
155 if resp.StatusCode != http.StatusOK {
156 return nil, nil, httpRespToErrorResponse(resp, bucketName, objectName)
157 }
158 }
159 retention := &objectRetention{}
160 if err = xml.NewDecoder(resp.Body).Decode(retention); err != nil {
161 return nil, nil, err
162 }
163
164 return &retention.Mode, retention.RetainUntilDate, nil
165}
diff --git a/vendor/github.com/minio/minio-go/v7/api-object-tagging.go b/vendor/github.com/minio/minio-go/v7/api-object-tagging.go
new file mode 100644
index 0000000..6623e26
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/api-object-tagging.go
@@ -0,0 +1,177 @@
1/*
2 * MinIO Go Library for Amazon S3 Compatible Cloud Storage
3 * Copyright 2020 MinIO, Inc.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18package minio
19
20import (
21 "bytes"
22 "context"
23 "encoding/xml"
24 "net/http"
25 "net/url"
26
27 "github.com/minio/minio-go/v7/pkg/s3utils"
28 "github.com/minio/minio-go/v7/pkg/tags"
29)
30
31// PutObjectTaggingOptions holds an object version id
32// to update tag(s) of a specific object version
33type PutObjectTaggingOptions struct {
34 VersionID string
35 Internal AdvancedObjectTaggingOptions
36}
37
38// AdvancedObjectTaggingOptions for internal use by MinIO server - not intended for client use.
39type AdvancedObjectTaggingOptions struct {
40 ReplicationProxyRequest string
41}
42
43// PutObjectTagging replaces or creates object tag(s) and can target
44// a specific object version in a versioned bucket.
45func (c *Client) PutObjectTagging(ctx context.Context, bucketName, objectName string, otags *tags.Tags, opts PutObjectTaggingOptions) error {
46 // Input validation.
47 if err := s3utils.CheckValidBucketName(bucketName); err != nil {
48 return err
49 }
50
51 // Get resources properly escaped and lined up before
52 // using them in http request.
53 urlValues := make(url.Values)
54 urlValues.Set("tagging", "")
55
56 if opts.VersionID != "" {
57 urlValues.Set("versionId", opts.VersionID)
58 }
59 headers := make(http.Header, 0)
60 if opts.Internal.ReplicationProxyRequest != "" {
61 headers.Set(minIOBucketReplicationProxyRequest, opts.Internal.ReplicationProxyRequest)
62 }
63 reqBytes, err := xml.Marshal(otags)
64 if err != nil {
65 return err
66 }
67
68 reqMetadata := requestMetadata{
69 bucketName: bucketName,
70 objectName: objectName,
71 queryValues: urlValues,
72 contentBody: bytes.NewReader(reqBytes),
73 contentLength: int64(len(reqBytes)),
74 contentMD5Base64: sumMD5Base64(reqBytes),
75 customHeader: headers,
76 }
77
78 // Execute PUT to set a object tagging.
79 resp, err := c.executeMethod(ctx, http.MethodPut, reqMetadata)
80 defer closeResponse(resp)
81 if err != nil {
82 return err
83 }
84 if resp != nil {
85 if resp.StatusCode != http.StatusOK {
86 return httpRespToErrorResponse(resp, bucketName, objectName)
87 }
88 }
89 return nil
90}
91
92// GetObjectTaggingOptions holds the object version ID
93// to fetch the tagging key/value pairs
94type GetObjectTaggingOptions struct {
95 VersionID string
96 Internal AdvancedObjectTaggingOptions
97}
98
99// GetObjectTagging fetches object tag(s) with options to target
100// a specific object version in a versioned bucket.
101func (c *Client) GetObjectTagging(ctx context.Context, bucketName, objectName string, opts GetObjectTaggingOptions) (*tags.Tags, error) {
102 // Get resources properly escaped and lined up before
103 // using them in http request.
104 urlValues := make(url.Values)
105 urlValues.Set("tagging", "")
106
107 if opts.VersionID != "" {
108 urlValues.Set("versionId", opts.VersionID)
109 }
110 headers := make(http.Header, 0)
111 if opts.Internal.ReplicationProxyRequest != "" {
112 headers.Set(minIOBucketReplicationProxyRequest, opts.Internal.ReplicationProxyRequest)
113 }
114 // Execute GET on object to get object tag(s)
115 resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{
116 bucketName: bucketName,
117 objectName: objectName,
118 queryValues: urlValues,
119 customHeader: headers,
120 })
121
122 defer closeResponse(resp)
123 if err != nil {
124 return nil, err
125 }
126
127 if resp != nil {
128 if resp.StatusCode != http.StatusOK {
129 return nil, httpRespToErrorResponse(resp, bucketName, objectName)
130 }
131 }
132
133 return tags.ParseObjectXML(resp.Body)
134}
135
136// RemoveObjectTaggingOptions holds the version id of the object to remove
137type RemoveObjectTaggingOptions struct {
138 VersionID string
139 Internal AdvancedObjectTaggingOptions
140}
141
142// RemoveObjectTagging removes object tag(s) with options to control a specific object
143// version in a versioned bucket
144func (c *Client) RemoveObjectTagging(ctx context.Context, bucketName, objectName string, opts RemoveObjectTaggingOptions) error {
145 // Get resources properly escaped and lined up before
146 // using them in http request.
147 urlValues := make(url.Values)
148 urlValues.Set("tagging", "")
149
150 if opts.VersionID != "" {
151 urlValues.Set("versionId", opts.VersionID)
152 }
153 headers := make(http.Header, 0)
154 if opts.Internal.ReplicationProxyRequest != "" {
155 headers.Set(minIOBucketReplicationProxyRequest, opts.Internal.ReplicationProxyRequest)
156 }
157 // Execute DELETE on object to remove object tag(s)
158 resp, err := c.executeMethod(ctx, http.MethodDelete, requestMetadata{
159 bucketName: bucketName,
160 objectName: objectName,
161 queryValues: urlValues,
162 customHeader: headers,
163 })
164
165 defer closeResponse(resp)
166 if err != nil {
167 return err
168 }
169
170 if resp != nil {
171 // S3 returns "204 No content" after Object tag deletion.
172 if resp.StatusCode != http.StatusNoContent {
173 return httpRespToErrorResponse(resp, bucketName, objectName)
174 }
175 }
176 return err
177}
diff --git a/vendor/github.com/minio/minio-go/v7/api-presigned.go b/vendor/github.com/minio/minio-go/v7/api-presigned.go
new file mode 100644
index 0000000..9e85f81
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/api-presigned.go
@@ -0,0 +1,228 @@
1/*
2 * MinIO Go Library for Amazon S3 Compatible Cloud Storage
3 * Copyright 2015-2017 MinIO, Inc.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18package minio
19
20import (
21 "context"
22 "errors"
23 "net/http"
24 "net/url"
25 "time"
26
27 "github.com/minio/minio-go/v7/pkg/s3utils"
28 "github.com/minio/minio-go/v7/pkg/signer"
29)
30
31// presignURL - Returns a presigned URL for an input 'method'.
32// Expires maximum is 7days - ie. 604800 and minimum is 1.
33func (c *Client) presignURL(ctx context.Context, method, bucketName, objectName string, expires time.Duration, reqParams url.Values, extraHeaders http.Header) (u *url.URL, err error) {
34 // Input validation.
35 if method == "" {
36 return nil, errInvalidArgument("method cannot be empty.")
37 }
38 if err = s3utils.CheckValidBucketName(bucketName); err != nil {
39 return nil, err
40 }
41 if err = isValidExpiry(expires); err != nil {
42 return nil, err
43 }
44
45 // Convert expires into seconds.
46 expireSeconds := int64(expires / time.Second)
47 reqMetadata := requestMetadata{
48 presignURL: true,
49 bucketName: bucketName,
50 objectName: objectName,
51 expires: expireSeconds,
52 queryValues: reqParams,
53 extraPresignHeader: extraHeaders,
54 }
55
56 // Instantiate a new request.
57 // Since expires is set newRequest will presign the request.
58 var req *http.Request
59 if req, err = c.newRequest(ctx, method, reqMetadata); err != nil {
60 return nil, err
61 }
62 return req.URL, nil
63}
64
65// PresignedGetObject - Returns a presigned URL to access an object
66// data without credentials. URL can have a maximum expiry of
67// upto 7days or a minimum of 1sec. Additionally you can override
68// a set of response headers using the query parameters.
69func (c *Client) PresignedGetObject(ctx context.Context, bucketName, objectName string, expires time.Duration, reqParams url.Values) (u *url.URL, err error) {
70 if err = s3utils.CheckValidObjectName(objectName); err != nil {
71 return nil, err
72 }
73 return c.presignURL(ctx, http.MethodGet, bucketName, objectName, expires, reqParams, nil)
74}
75
76// PresignedHeadObject - Returns a presigned URL to access
77// object metadata without credentials. URL can have a maximum expiry
78// of upto 7days or a minimum of 1sec. Additionally you can override
79// a set of response headers using the query parameters.
80func (c *Client) PresignedHeadObject(ctx context.Context, bucketName, objectName string, expires time.Duration, reqParams url.Values) (u *url.URL, err error) {
81 if err = s3utils.CheckValidObjectName(objectName); err != nil {
82 return nil, err
83 }
84 return c.presignURL(ctx, http.MethodHead, bucketName, objectName, expires, reqParams, nil)
85}
86
87// PresignedPutObject - Returns a presigned URL to upload an object
88// without credentials. URL can have a maximum expiry of upto 7days
89// or a minimum of 1sec.
90func (c *Client) PresignedPutObject(ctx context.Context, bucketName, objectName string, expires time.Duration) (u *url.URL, err error) {
91 if err = s3utils.CheckValidObjectName(objectName); err != nil {
92 return nil, err
93 }
94 return c.presignURL(ctx, http.MethodPut, bucketName, objectName, expires, nil, nil)
95}
96
97// PresignHeader - similar to Presign() but allows including HTTP headers that
98// will be used to build the signature. The request using the resulting URL will
99// need to have the exact same headers to be added for signature validation to
100// pass.
101//
102// FIXME: The extra header parameter should be included in Presign() in the next
103// major version bump, and this function should then be deprecated.
104func (c *Client) PresignHeader(ctx context.Context, method, bucketName, objectName string, expires time.Duration, reqParams url.Values, extraHeaders http.Header) (u *url.URL, err error) {
105 return c.presignURL(ctx, method, bucketName, objectName, expires, reqParams, extraHeaders)
106}
107
108// Presign - returns a presigned URL for any http method of your choice along
109// with custom request params and extra signed headers. URL can have a maximum
110// expiry of upto 7days or a minimum of 1sec.
111func (c *Client) Presign(ctx context.Context, method, bucketName, objectName string, expires time.Duration, reqParams url.Values) (u *url.URL, err error) {
112 return c.presignURL(ctx, method, bucketName, objectName, expires, reqParams, nil)
113}
114
115// PresignedPostPolicy - Returns POST urlString, form data to upload an object.
116func (c *Client) PresignedPostPolicy(ctx context.Context, p *PostPolicy) (u *url.URL, formData map[string]string, err error) {
117 // Validate input arguments.
118 if p.expiration.IsZero() {
119 return nil, nil, errors.New("Expiration time must be specified")
120 }
121 if _, ok := p.formData["key"]; !ok {
122 return nil, nil, errors.New("object key must be specified")
123 }
124 if _, ok := p.formData["bucket"]; !ok {
125 return nil, nil, errors.New("bucket name must be specified")
126 }
127
128 bucketName := p.formData["bucket"]
129 // Fetch the bucket location.
130 location, err := c.getBucketLocation(ctx, bucketName)
131 if err != nil {
132 return nil, nil, err
133 }
134
135 isVirtualHost := c.isVirtualHostStyleRequest(*c.endpointURL, bucketName)
136
137 u, err = c.makeTargetURL(bucketName, "", location, isVirtualHost, nil)
138 if err != nil {
139 return nil, nil, err
140 }
141
142 // Get credentials from the configured credentials provider.
143 credValues, err := c.credsProvider.Get()
144 if err != nil {
145 return nil, nil, err
146 }
147
148 var (
149 signerType = credValues.SignerType
150 sessionToken = credValues.SessionToken
151 accessKeyID = credValues.AccessKeyID
152 secretAccessKey = credValues.SecretAccessKey
153 )
154
155 if signerType.IsAnonymous() {
156 return nil, nil, errInvalidArgument("Presigned operations are not supported for anonymous credentials")
157 }
158
159 // Keep time.
160 t := time.Now().UTC()
161 // For signature version '2' handle here.
162 if signerType.IsV2() {
163 policyBase64 := p.base64()
164 p.formData["policy"] = policyBase64
165 // For Google endpoint set this value to be 'GoogleAccessId'.
166 if s3utils.IsGoogleEndpoint(*c.endpointURL) {
167 p.formData["GoogleAccessId"] = accessKeyID
168 } else {
169 // For all other endpoints set this value to be 'AWSAccessKeyId'.
170 p.formData["AWSAccessKeyId"] = accessKeyID
171 }
172 // Sign the policy.
173 p.formData["signature"] = signer.PostPresignSignatureV2(policyBase64, secretAccessKey)
174 return u, p.formData, nil
175 }
176
177 // Add date policy.
178 if err = p.addNewPolicy(policyCondition{
179 matchType: "eq",
180 condition: "$x-amz-date",
181 value: t.Format(iso8601DateFormat),
182 }); err != nil {
183 return nil, nil, err
184 }
185
186 // Add algorithm policy.
187 if err = p.addNewPolicy(policyCondition{
188 matchType: "eq",
189 condition: "$x-amz-algorithm",
190 value: signV4Algorithm,
191 }); err != nil {
192 return nil, nil, err
193 }
194
195 // Add a credential policy.
196 credential := signer.GetCredential(accessKeyID, location, t, signer.ServiceTypeS3)
197 if err = p.addNewPolicy(policyCondition{
198 matchType: "eq",
199 condition: "$x-amz-credential",
200 value: credential,
201 }); err != nil {
202 return nil, nil, err
203 }
204
205 if sessionToken != "" {
206 if err = p.addNewPolicy(policyCondition{
207 matchType: "eq",
208 condition: "$x-amz-security-token",
209 value: sessionToken,
210 }); err != nil {
211 return nil, nil, err
212 }
213 }
214
215 // Get base64 encoded policy.
216 policyBase64 := p.base64()
217
218 // Fill in the form data.
219 p.formData["policy"] = policyBase64
220 p.formData["x-amz-algorithm"] = signV4Algorithm
221 p.formData["x-amz-credential"] = credential
222 p.formData["x-amz-date"] = t.Format(iso8601DateFormat)
223 if sessionToken != "" {
224 p.formData["x-amz-security-token"] = sessionToken
225 }
226 p.formData["x-amz-signature"] = signer.PostPresignSignatureV4(policyBase64, t, secretAccessKey, location)
227 return u, p.formData, nil
228}
diff --git a/vendor/github.com/minio/minio-go/v7/api-put-bucket.go b/vendor/github.com/minio/minio-go/v7/api-put-bucket.go
new file mode 100644
index 0000000..7376669
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/api-put-bucket.go
@@ -0,0 +1,123 @@
1/*
2 * MinIO Go Library for Amazon S3 Compatible Cloud Storage
3 * Copyright 2015-2020 MinIO, Inc.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18package minio
19
20import (
21 "bytes"
22 "context"
23 "encoding/xml"
24 "net/http"
25
26 "github.com/minio/minio-go/v7/pkg/s3utils"
27)
28
29// Bucket operations
30func (c *Client) makeBucket(ctx context.Context, bucketName string, opts MakeBucketOptions) (err error) {
31 // Validate the input arguments.
32 if err := s3utils.CheckValidBucketNameStrict(bucketName); err != nil {
33 return err
34 }
35
36 err = c.doMakeBucket(ctx, bucketName, opts.Region, opts.ObjectLocking)
37 if err != nil && (opts.Region == "" || opts.Region == "us-east-1") {
38 if resp, ok := err.(ErrorResponse); ok && resp.Code == "AuthorizationHeaderMalformed" && resp.Region != "" {
39 err = c.doMakeBucket(ctx, bucketName, resp.Region, opts.ObjectLocking)
40 }
41 }
42 return err
43}
44
45func (c *Client) doMakeBucket(ctx context.Context, bucketName, location string, objectLockEnabled bool) (err error) {
46 defer func() {
47 // Save the location into cache on a successful makeBucket response.
48 if err == nil {
49 c.bucketLocCache.Set(bucketName, location)
50 }
51 }()
52
53 // If location is empty, treat is a default region 'us-east-1'.
54 if location == "" {
55 location = "us-east-1"
56 // For custom region clients, default
57 // to custom region instead not 'us-east-1'.
58 if c.region != "" {
59 location = c.region
60 }
61 }
62 // PUT bucket request metadata.
63 reqMetadata := requestMetadata{
64 bucketName: bucketName,
65 bucketLocation: location,
66 }
67
68 if objectLockEnabled {
69 headers := make(http.Header)
70 headers.Add("x-amz-bucket-object-lock-enabled", "true")
71 reqMetadata.customHeader = headers
72 }
73
74 // If location is not 'us-east-1' create bucket location config.
75 if location != "us-east-1" && location != "" {
76 createBucketConfig := createBucketConfiguration{}
77 createBucketConfig.Location = location
78 var createBucketConfigBytes []byte
79 createBucketConfigBytes, err = xml.Marshal(createBucketConfig)
80 if err != nil {
81 return err
82 }
83 reqMetadata.contentMD5Base64 = sumMD5Base64(createBucketConfigBytes)
84 reqMetadata.contentSHA256Hex = sum256Hex(createBucketConfigBytes)
85 reqMetadata.contentBody = bytes.NewReader(createBucketConfigBytes)
86 reqMetadata.contentLength = int64(len(createBucketConfigBytes))
87 }
88
89 // Execute PUT to create a new bucket.
90 resp, err := c.executeMethod(ctx, http.MethodPut, reqMetadata)
91 defer closeResponse(resp)
92 if err != nil {
93 return err
94 }
95
96 if resp != nil {
97 if resp.StatusCode != http.StatusOK {
98 return httpRespToErrorResponse(resp, bucketName, "")
99 }
100 }
101
102 // Success.
103 return nil
104}
105
106// MakeBucketOptions holds all options to tweak bucket creation
107type MakeBucketOptions struct {
108 // Bucket location
109 Region string
110 // Enable object locking
111 ObjectLocking bool
112}
113
114// MakeBucket creates a new bucket with bucketName with a context to control cancellations and timeouts.
115//
116// Location is an optional argument, by default all buckets are
117// created in US Standard Region.
118//
119// For Amazon S3 for more supported regions - http://docs.aws.amazon.com/general/latest/gr/rande.html
120// For Google Cloud Storage for more supported regions - https://cloud.google.com/storage/docs/bucket-locations
121func (c *Client) MakeBucket(ctx context.Context, bucketName string, opts MakeBucketOptions) (err error) {
122 return c.makeBucket(ctx, bucketName, opts)
123}
diff --git a/vendor/github.com/minio/minio-go/v7/api-put-object-common.go b/vendor/github.com/minio/minio-go/v7/api-put-object-common.go
new file mode 100644
index 0000000..9ccb97c
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/api-put-object-common.go
@@ -0,0 +1,149 @@
1/*
2 * MinIO Go Library for Amazon S3 Compatible Cloud Storage
3 * Copyright 2015-2017 MinIO, Inc.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18package minio
19
20import (
21 "context"
22 "io"
23 "math"
24 "os"
25
26 "github.com/minio/minio-go/v7/pkg/s3utils"
27)
28
29const nullVersionID = "null"
30
31// Verify if reader is *minio.Object
32func isObject(reader io.Reader) (ok bool) {
33 _, ok = reader.(*Object)
34 return
35}
36
37// Verify if reader is a generic ReaderAt
38func isReadAt(reader io.Reader) (ok bool) {
39 var v *os.File
40 v, ok = reader.(*os.File)
41 if ok {
42 // Stdin, Stdout and Stderr all have *os.File type
43 // which happen to also be io.ReaderAt compatible
44 // we need to add special conditions for them to
45 // be ignored by this function.
46 for _, f := range []string{
47 "/dev/stdin",
48 "/dev/stdout",
49 "/dev/stderr",
50 } {
51 if f == v.Name() {
52 ok = false
53 break
54 }
55 }
56 } else {
57 _, ok = reader.(io.ReaderAt)
58 }
59 return
60}
61
62// OptimalPartInfo - calculate the optimal part info for a given
63// object size.
64//
65// NOTE: Assumption here is that for any object to be uploaded to any S3 compatible
66// object storage it will have the following parameters as constants.
67//
68// maxPartsCount - 10000
69// minPartSize - 16MiB
70// maxMultipartPutObjectSize - 5TiB
71func OptimalPartInfo(objectSize int64, configuredPartSize uint64) (totalPartsCount int, partSize, lastPartSize int64, err error) {
72 // object size is '-1' set it to 5TiB.
73 var unknownSize bool
74 if objectSize == -1 {
75 unknownSize = true
76 objectSize = maxMultipartPutObjectSize
77 }
78
79 // object size is larger than supported maximum.
80 if objectSize > maxMultipartPutObjectSize {
81 err = errEntityTooLarge(objectSize, maxMultipartPutObjectSize, "", "")
82 return
83 }
84
85 var partSizeFlt float64
86 if configuredPartSize > 0 {
87 if int64(configuredPartSize) > objectSize {
88 err = errEntityTooLarge(int64(configuredPartSize), objectSize, "", "")
89 return
90 }
91
92 if !unknownSize {
93 if objectSize > (int64(configuredPartSize) * maxPartsCount) {
94 err = errInvalidArgument("Part size * max_parts(10000) is lesser than input objectSize.")
95 return
96 }
97 }
98
99 if configuredPartSize < absMinPartSize {
100 err = errInvalidArgument("Input part size is smaller than allowed minimum of 5MiB.")
101 return
102 }
103
104 if configuredPartSize > maxPartSize {
105 err = errInvalidArgument("Input part size is bigger than allowed maximum of 5GiB.")
106 return
107 }
108
109 partSizeFlt = float64(configuredPartSize)
110 if unknownSize {
111 // If input has unknown size and part size is configured
112 // keep it to maximum allowed as per 10000 parts.
113 objectSize = int64(configuredPartSize) * maxPartsCount
114 }
115 } else {
116 configuredPartSize = minPartSize
117 // Use floats for part size for all calculations to avoid
118 // overflows during float64 to int64 conversions.
119 partSizeFlt = float64(objectSize / maxPartsCount)
120 partSizeFlt = math.Ceil(partSizeFlt/float64(configuredPartSize)) * float64(configuredPartSize)
121 }
122
123 // Total parts count.
124 totalPartsCount = int(math.Ceil(float64(objectSize) / partSizeFlt))
125 // Part size.
126 partSize = int64(partSizeFlt)
127 // Last part size.
128 lastPartSize = objectSize - int64(totalPartsCount-1)*partSize
129 return totalPartsCount, partSize, lastPartSize, nil
130}
131
132// getUploadID - fetch upload id if already present for an object name
133// or initiate a new request to fetch a new upload id.
134func (c *Client) newUploadID(ctx context.Context, bucketName, objectName string, opts PutObjectOptions) (uploadID string, err error) {
135 // Input validation.
136 if err := s3utils.CheckValidBucketName(bucketName); err != nil {
137 return "", err
138 }
139 if err := s3utils.CheckValidObjectName(objectName); err != nil {
140 return "", err
141 }
142
143 // Initiate multipart upload for an object.
144 initMultipartUploadResult, err := c.initiateMultipartUpload(ctx, bucketName, objectName, opts)
145 if err != nil {
146 return "", err
147 }
148 return initMultipartUploadResult.UploadID, nil
149}
diff --git a/vendor/github.com/minio/minio-go/v7/api-put-object-fan-out.go b/vendor/github.com/minio/minio-go/v7/api-put-object-fan-out.go
new file mode 100644
index 0000000..0ae9142
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/api-put-object-fan-out.go
@@ -0,0 +1,164 @@
1/*
2 * MinIO Go Library for Amazon S3 Compatible Cloud Storage
3 * Copyright 2023 MinIO, Inc.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18package minio
19
20import (
21 "context"
22 "encoding/json"
23 "errors"
24 "io"
25 "mime/multipart"
26 "net/http"
27 "strconv"
28 "strings"
29 "time"
30
31 "github.com/minio/minio-go/v7/pkg/encrypt"
32)
33
34// PutObjectFanOutEntry is per object entry fan-out metadata
35type PutObjectFanOutEntry struct {
36 Key string `json:"key"`
37 UserMetadata map[string]string `json:"metadata,omitempty"`
38 UserTags map[string]string `json:"tags,omitempty"`
39 ContentType string `json:"contentType,omitempty"`
40 ContentEncoding string `json:"contentEncoding,omitempty"`
41 ContentDisposition string `json:"contentDisposition,omitempty"`
42 ContentLanguage string `json:"contentLanguage,omitempty"`
43 CacheControl string `json:"cacheControl,omitempty"`
44 Retention RetentionMode `json:"retention,omitempty"`
45 RetainUntilDate *time.Time `json:"retainUntil,omitempty"`
46}
47
48// PutObjectFanOutRequest this is the request structure sent
49// to the server to fan-out the stream to multiple objects.
50type PutObjectFanOutRequest struct {
51 Entries []PutObjectFanOutEntry
52 Checksum Checksum
53 SSE encrypt.ServerSide
54}
55
56// PutObjectFanOutResponse this is the response structure sent
57// by the server upon success or failure for each object
58// fan-out keys. Additionally, this response carries ETag,
59// VersionID and LastModified for each object fan-out.
60type PutObjectFanOutResponse struct {
61 Key string `json:"key"`
62 ETag string `json:"etag,omitempty"`
63 VersionID string `json:"versionId,omitempty"`
64 LastModified *time.Time `json:"lastModified,omitempty"`
65 Error string `json:"error,omitempty"`
66}
67
68// PutObjectFanOut - is a variant of PutObject instead of writing a single object from a single
69// stream multiple objects are written, defined via a list of PutObjectFanOutRequests. Each entry
70// in PutObjectFanOutRequest carries an object keyname and its relevant metadata if any. `Key` is
71// mandatory, rest of the other options in PutObjectFanOutRequest are optional.
72func (c *Client) PutObjectFanOut(ctx context.Context, bucket string, fanOutData io.Reader, fanOutReq PutObjectFanOutRequest) ([]PutObjectFanOutResponse, error) {
73 if len(fanOutReq.Entries) == 0 {
74 return nil, errInvalidArgument("fan out requests cannot be empty")
75 }
76
77 policy := NewPostPolicy()
78 policy.SetBucket(bucket)
79 policy.SetKey(strconv.FormatInt(time.Now().UnixNano(), 16))
80
81 // Expires in 15 minutes.
82 policy.SetExpires(time.Now().UTC().Add(15 * time.Minute))
83
84 // Set encryption headers if any.
85 policy.SetEncryption(fanOutReq.SSE)
86
87 // Set checksum headers if any.
88 policy.SetChecksum(fanOutReq.Checksum)
89
90 url, formData, err := c.PresignedPostPolicy(ctx, policy)
91 if err != nil {
92 return nil, err
93 }
94
95 r, w := io.Pipe()
96
97 req, err := http.NewRequest(http.MethodPost, url.String(), r)
98 if err != nil {
99 w.Close()
100 return nil, err
101 }
102
103 var b strings.Builder
104 enc := json.NewEncoder(&b)
105 for _, req := range fanOutReq.Entries {
106 if req.Key == "" {
107 w.Close()
108 return nil, errors.New("PutObjectFanOutRequest.Key is mandatory and cannot be empty")
109 }
110 if err = enc.Encode(&req); err != nil {
111 w.Close()
112 return nil, err
113 }
114 }
115
116 mwriter := multipart.NewWriter(w)
117 req.Header.Add("Content-Type", mwriter.FormDataContentType())
118
119 go func() {
120 defer w.Close()
121 defer mwriter.Close()
122
123 for k, v := range formData {
124 if err := mwriter.WriteField(k, v); err != nil {
125 return
126 }
127 }
128
129 if err := mwriter.WriteField("x-minio-fanout-list", b.String()); err != nil {
130 return
131 }
132
133 mw, err := mwriter.CreateFormFile("file", "fanout-content")
134 if err != nil {
135 return
136 }
137
138 if _, err = io.Copy(mw, fanOutData); err != nil {
139 return
140 }
141 }()
142
143 resp, err := c.do(req)
144 if err != nil {
145 return nil, err
146 }
147 defer closeResponse(resp)
148
149 if resp.StatusCode != http.StatusOK {
150 return nil, httpRespToErrorResponse(resp, bucket, "fanout-content")
151 }
152
153 dec := json.NewDecoder(resp.Body)
154 fanOutResp := make([]PutObjectFanOutResponse, 0, len(fanOutReq.Entries))
155 for dec.More() {
156 var m PutObjectFanOutResponse
157 if err = dec.Decode(&m); err != nil {
158 return nil, err
159 }
160 fanOutResp = append(fanOutResp, m)
161 }
162
163 return fanOutResp, nil
164}
diff --git a/vendor/github.com/minio/minio-go/v7/api-put-object-file-context.go b/vendor/github.com/minio/minio-go/v7/api-put-object-file-context.go
new file mode 100644
index 0000000..4d29dfc
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/api-put-object-file-context.go
@@ -0,0 +1,64 @@
1/*
2 * MinIO Go Library for Amazon S3 Compatible Cloud Storage
3 * Copyright 2017 MinIO, Inc.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18package minio
19
20import (
21 "context"
22 "mime"
23 "os"
24 "path/filepath"
25
26 "github.com/minio/minio-go/v7/pkg/s3utils"
27)
28
29// FPutObject - Create an object in a bucket, with contents from file at filePath. Allows request cancellation.
30func (c *Client) FPutObject(ctx context.Context, bucketName, objectName, filePath string, opts PutObjectOptions) (info UploadInfo, err error) {
31 // Input validation.
32 if err := s3utils.CheckValidBucketName(bucketName); err != nil {
33 return UploadInfo{}, err
34 }
35 if err := s3utils.CheckValidObjectName(objectName); err != nil {
36 return UploadInfo{}, err
37 }
38
39 // Open the referenced file.
40 fileReader, err := os.Open(filePath)
41 // If any error fail quickly here.
42 if err != nil {
43 return UploadInfo{}, err
44 }
45 defer fileReader.Close()
46
47 // Save the file stat.
48 fileStat, err := fileReader.Stat()
49 if err != nil {
50 return UploadInfo{}, err
51 }
52
53 // Save the file size.
54 fileSize := fileStat.Size()
55
56 // Set contentType based on filepath extension if not given or default
57 // value of "application/octet-stream" if the extension has no associated type.
58 if opts.ContentType == "" {
59 if opts.ContentType = mime.TypeByExtension(filepath.Ext(filePath)); opts.ContentType == "" {
60 opts.ContentType = "application/octet-stream"
61 }
62 }
63 return c.PutObject(ctx, bucketName, objectName, fileReader, fileSize, opts)
64}
diff --git a/vendor/github.com/minio/minio-go/v7/api-put-object-multipart.go b/vendor/github.com/minio/minio-go/v7/api-put-object-multipart.go
new file mode 100644
index 0000000..5f117af
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/api-put-object-multipart.go
@@ -0,0 +1,465 @@
1/*
2 * MinIO Go Library for Amazon S3 Compatible Cloud Storage
3 * Copyright 2015-2017 MinIO, Inc.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18package minio
19
20import (
21 "bytes"
22 "context"
23 "encoding/base64"
24 "encoding/hex"
25 "encoding/xml"
26 "fmt"
27 "hash/crc32"
28 "io"
29 "net/http"
30 "net/url"
31 "sort"
32 "strconv"
33 "strings"
34
35 "github.com/google/uuid"
36 "github.com/minio/minio-go/v7/pkg/encrypt"
37 "github.com/minio/minio-go/v7/pkg/s3utils"
38)
39
40func (c *Client) putObjectMultipart(ctx context.Context, bucketName, objectName string, reader io.Reader, size int64,
41 opts PutObjectOptions,
42) (info UploadInfo, err error) {
43 info, err = c.putObjectMultipartNoStream(ctx, bucketName, objectName, reader, opts)
44 if err != nil {
45 errResp := ToErrorResponse(err)
46 // Verify if multipart functionality is not available, if not
47 // fall back to single PutObject operation.
48 if errResp.Code == "AccessDenied" && strings.Contains(errResp.Message, "Access Denied") {
49 // Verify if size of reader is greater than '5GiB'.
50 if size > maxSinglePutObjectSize {
51 return UploadInfo{}, errEntityTooLarge(size, maxSinglePutObjectSize, bucketName, objectName)
52 }
53 // Fall back to uploading as single PutObject operation.
54 return c.putObject(ctx, bucketName, objectName, reader, size, opts)
55 }
56 }
57 return info, err
58}
59
60func (c *Client) putObjectMultipartNoStream(ctx context.Context, bucketName, objectName string, reader io.Reader, opts PutObjectOptions) (info UploadInfo, err error) {
61 // Input validation.
62 if err = s3utils.CheckValidBucketName(bucketName); err != nil {
63 return UploadInfo{}, err
64 }
65 if err = s3utils.CheckValidObjectName(objectName); err != nil {
66 return UploadInfo{}, err
67 }
68
69 // Total data read and written to server. should be equal to
70 // 'size' at the end of the call.
71 var totalUploadedSize int64
72
73 // Complete multipart upload.
74 var complMultipartUpload completeMultipartUpload
75
76 // Calculate the optimal parts info for a given size.
77 totalPartsCount, partSize, _, err := OptimalPartInfo(-1, opts.PartSize)
78 if err != nil {
79 return UploadInfo{}, err
80 }
81
82 // Choose hash algorithms to be calculated by hashCopyN,
83 // avoid sha256 with non-v4 signature request or
84 // HTTPS connection.
85 hashAlgos, hashSums := c.hashMaterials(opts.SendContentMd5, !opts.DisableContentSha256)
86 if len(hashSums) == 0 {
87 if opts.UserMetadata == nil {
88 opts.UserMetadata = make(map[string]string, 1)
89 }
90 opts.UserMetadata["X-Amz-Checksum-Algorithm"] = "CRC32C"
91 }
92
93 // Initiate a new multipart upload.
94 uploadID, err := c.newUploadID(ctx, bucketName, objectName, opts)
95 if err != nil {
96 return UploadInfo{}, err
97 }
98 delete(opts.UserMetadata, "X-Amz-Checksum-Algorithm")
99
100 defer func() {
101 if err != nil {
102 c.abortMultipartUpload(ctx, bucketName, objectName, uploadID)
103 }
104 }()
105
106 // Part number always starts with '1'.
107 partNumber := 1
108
109 // Initialize parts uploaded map.
110 partsInfo := make(map[int]ObjectPart)
111
112 // Create a buffer.
113 buf := make([]byte, partSize)
114
115 // Create checksums
116 // CRC32C is ~50% faster on AMD64 @ 30GB/s
117 var crcBytes []byte
118 customHeader := make(http.Header)
119 crc := crc32.New(crc32.MakeTable(crc32.Castagnoli))
120 for partNumber <= totalPartsCount {
121 length, rErr := readFull(reader, buf)
122 if rErr == io.EOF && partNumber > 1 {
123 break
124 }
125
126 if rErr != nil && rErr != io.ErrUnexpectedEOF && rErr != io.EOF {
127 return UploadInfo{}, rErr
128 }
129
130 // Calculates hash sums while copying partSize bytes into cw.
131 for k, v := range hashAlgos {
132 v.Write(buf[:length])
133 hashSums[k] = v.Sum(nil)
134 v.Close()
135 }
136
137 // Update progress reader appropriately to the latest offset
138 // as we read from the source.
139 rd := newHook(bytes.NewReader(buf[:length]), opts.Progress)
140
141 // Checksums..
142 var (
143 md5Base64 string
144 sha256Hex string
145 )
146
147 if hashSums["md5"] != nil {
148 md5Base64 = base64.StdEncoding.EncodeToString(hashSums["md5"])
149 }
150 if hashSums["sha256"] != nil {
151 sha256Hex = hex.EncodeToString(hashSums["sha256"])
152 }
153 if len(hashSums) == 0 {
154 crc.Reset()
155 crc.Write(buf[:length])
156 cSum := crc.Sum(nil)
157 customHeader.Set("x-amz-checksum-crc32c", base64.StdEncoding.EncodeToString(cSum))
158 crcBytes = append(crcBytes, cSum...)
159 }
160
161 p := uploadPartParams{bucketName: bucketName, objectName: objectName, uploadID: uploadID, reader: rd, partNumber: partNumber, md5Base64: md5Base64, sha256Hex: sha256Hex, size: int64(length), sse: opts.ServerSideEncryption, streamSha256: !opts.DisableContentSha256, customHeader: customHeader}
162 // Proceed to upload the part.
163 objPart, uerr := c.uploadPart(ctx, p)
164 if uerr != nil {
165 return UploadInfo{}, uerr
166 }
167
168 // Save successfully uploaded part metadata.
169 partsInfo[partNumber] = objPart
170
171 // Save successfully uploaded size.
172 totalUploadedSize += int64(length)
173
174 // Increment part number.
175 partNumber++
176
177 // For unknown size, Read EOF we break away.
178 // We do not have to upload till totalPartsCount.
179 if rErr == io.EOF {
180 break
181 }
182 }
183
184 // Loop over total uploaded parts to save them in
185 // Parts array before completing the multipart request.
186 for i := 1; i < partNumber; i++ {
187 part, ok := partsInfo[i]
188 if !ok {
189 return UploadInfo{}, errInvalidArgument(fmt.Sprintf("Missing part number %d", i))
190 }
191 complMultipartUpload.Parts = append(complMultipartUpload.Parts, CompletePart{
192 ETag: part.ETag,
193 PartNumber: part.PartNumber,
194 ChecksumCRC32: part.ChecksumCRC32,
195 ChecksumCRC32C: part.ChecksumCRC32C,
196 ChecksumSHA1: part.ChecksumSHA1,
197 ChecksumSHA256: part.ChecksumSHA256,
198 })
199 }
200
201 // Sort all completed parts.
202 sort.Sort(completedParts(complMultipartUpload.Parts))
203 opts = PutObjectOptions{
204 ServerSideEncryption: opts.ServerSideEncryption,
205 }
206 if len(crcBytes) > 0 {
207 // Add hash of hashes.
208 crc.Reset()
209 crc.Write(crcBytes)
210 opts.UserMetadata = map[string]string{"X-Amz-Checksum-Crc32c": base64.StdEncoding.EncodeToString(crc.Sum(nil))}
211 }
212 uploadInfo, err := c.completeMultipartUpload(ctx, bucketName, objectName, uploadID, complMultipartUpload, opts)
213 if err != nil {
214 return UploadInfo{}, err
215 }
216
217 uploadInfo.Size = totalUploadedSize
218 return uploadInfo, nil
219}
220
221// initiateMultipartUpload - Initiates a multipart upload and returns an upload ID.
222func (c *Client) initiateMultipartUpload(ctx context.Context, bucketName, objectName string, opts PutObjectOptions) (initiateMultipartUploadResult, error) {
223 // Input validation.
224 if err := s3utils.CheckValidBucketName(bucketName); err != nil {
225 return initiateMultipartUploadResult{}, err
226 }
227 if err := s3utils.CheckValidObjectName(objectName); err != nil {
228 return initiateMultipartUploadResult{}, err
229 }
230
231 // Initialize url queries.
232 urlValues := make(url.Values)
233 urlValues.Set("uploads", "")
234
235 if opts.Internal.SourceVersionID != "" {
236 if opts.Internal.SourceVersionID != nullVersionID {
237 if _, err := uuid.Parse(opts.Internal.SourceVersionID); err != nil {
238 return initiateMultipartUploadResult{}, errInvalidArgument(err.Error())
239 }
240 }
241 urlValues.Set("versionId", opts.Internal.SourceVersionID)
242 }
243
244 // Set ContentType header.
245 customHeader := opts.Header()
246
247 reqMetadata := requestMetadata{
248 bucketName: bucketName,
249 objectName: objectName,
250 queryValues: urlValues,
251 customHeader: customHeader,
252 }
253
254 // Execute POST on an objectName to initiate multipart upload.
255 resp, err := c.executeMethod(ctx, http.MethodPost, reqMetadata)
256 defer closeResponse(resp)
257 if err != nil {
258 return initiateMultipartUploadResult{}, err
259 }
260 if resp != nil {
261 if resp.StatusCode != http.StatusOK {
262 return initiateMultipartUploadResult{}, httpRespToErrorResponse(resp, bucketName, objectName)
263 }
264 }
265 // Decode xml for new multipart upload.
266 initiateMultipartUploadResult := initiateMultipartUploadResult{}
267 err = xmlDecoder(resp.Body, &initiateMultipartUploadResult)
268 if err != nil {
269 return initiateMultipartUploadResult, err
270 }
271 return initiateMultipartUploadResult, nil
272}
273
274type uploadPartParams struct {
275 bucketName string
276 objectName string
277 uploadID string
278 reader io.Reader
279 partNumber int
280 md5Base64 string
281 sha256Hex string
282 size int64
283 sse encrypt.ServerSide
284 streamSha256 bool
285 customHeader http.Header
286 trailer http.Header
287}
288
289// uploadPart - Uploads a part in a multipart upload.
290func (c *Client) uploadPart(ctx context.Context, p uploadPartParams) (ObjectPart, error) {
291 // Input validation.
292 if err := s3utils.CheckValidBucketName(p.bucketName); err != nil {
293 return ObjectPart{}, err
294 }
295 if err := s3utils.CheckValidObjectName(p.objectName); err != nil {
296 return ObjectPart{}, err
297 }
298 if p.size > maxPartSize {
299 return ObjectPart{}, errEntityTooLarge(p.size, maxPartSize, p.bucketName, p.objectName)
300 }
301 if p.size <= -1 {
302 return ObjectPart{}, errEntityTooSmall(p.size, p.bucketName, p.objectName)
303 }
304 if p.partNumber <= 0 {
305 return ObjectPart{}, errInvalidArgument("Part number cannot be negative or equal to zero.")
306 }
307 if p.uploadID == "" {
308 return ObjectPart{}, errInvalidArgument("UploadID cannot be empty.")
309 }
310
311 // Get resources properly escaped and lined up before using them in http request.
312 urlValues := make(url.Values)
313 // Set part number.
314 urlValues.Set("partNumber", strconv.Itoa(p.partNumber))
315 // Set upload id.
316 urlValues.Set("uploadId", p.uploadID)
317
318 // Set encryption headers, if any.
319 if p.customHeader == nil {
320 p.customHeader = make(http.Header)
321 }
322 // https://docs.aws.amazon.com/AmazonS3/latest/API/mpUploadUploadPart.html
323 // Server-side encryption is supported by the S3 Multipart Upload actions.
324 // Unless you are using a customer-provided encryption key, you don't need
325 // to specify the encryption parameters in each UploadPart request.
326 if p.sse != nil && p.sse.Type() == encrypt.SSEC {
327 p.sse.Marshal(p.customHeader)
328 }
329
330 reqMetadata := requestMetadata{
331 bucketName: p.bucketName,
332 objectName: p.objectName,
333 queryValues: urlValues,
334 customHeader: p.customHeader,
335 contentBody: p.reader,
336 contentLength: p.size,
337 contentMD5Base64: p.md5Base64,
338 contentSHA256Hex: p.sha256Hex,
339 streamSha256: p.streamSha256,
340 trailer: p.trailer,
341 }
342
343 // Execute PUT on each part.
344 resp, err := c.executeMethod(ctx, http.MethodPut, reqMetadata)
345 defer closeResponse(resp)
346 if err != nil {
347 return ObjectPart{}, err
348 }
349 if resp != nil {
350 if resp.StatusCode != http.StatusOK {
351 return ObjectPart{}, httpRespToErrorResponse(resp, p.bucketName, p.objectName)
352 }
353 }
354 // Once successfully uploaded, return completed part.
355 h := resp.Header
356 objPart := ObjectPart{
357 ChecksumCRC32: h.Get("x-amz-checksum-crc32"),
358 ChecksumCRC32C: h.Get("x-amz-checksum-crc32c"),
359 ChecksumSHA1: h.Get("x-amz-checksum-sha1"),
360 ChecksumSHA256: h.Get("x-amz-checksum-sha256"),
361 }
362 objPart.Size = p.size
363 objPart.PartNumber = p.partNumber
364 // Trim off the odd double quotes from ETag in the beginning and end.
365 objPart.ETag = trimEtag(h.Get("ETag"))
366 return objPart, nil
367}
368
369// completeMultipartUpload - Completes a multipart upload by assembling previously uploaded parts.
370func (c *Client) completeMultipartUpload(ctx context.Context, bucketName, objectName, uploadID string,
371 complete completeMultipartUpload, opts PutObjectOptions,
372) (UploadInfo, error) {
373 // Input validation.
374 if err := s3utils.CheckValidBucketName(bucketName); err != nil {
375 return UploadInfo{}, err
376 }
377 if err := s3utils.CheckValidObjectName(objectName); err != nil {
378 return UploadInfo{}, err
379 }
380
381 // Initialize url queries.
382 urlValues := make(url.Values)
383 urlValues.Set("uploadId", uploadID)
384 // Marshal complete multipart body.
385 completeMultipartUploadBytes, err := xml.Marshal(complete)
386 if err != nil {
387 return UploadInfo{}, err
388 }
389
390 headers := opts.Header()
391 if s3utils.IsAmazonEndpoint(*c.endpointURL) {
392 headers.Del(encrypt.SseKmsKeyID) // Remove X-Amz-Server-Side-Encryption-Aws-Kms-Key-Id not supported in CompleteMultipartUpload
393 headers.Del(encrypt.SseGenericHeader) // Remove X-Amz-Server-Side-Encryption not supported in CompleteMultipartUpload
394 headers.Del(encrypt.SseEncryptionContext) // Remove X-Amz-Server-Side-Encryption-Context not supported in CompleteMultipartUpload
395 }
396
397 // Instantiate all the complete multipart buffer.
398 completeMultipartUploadBuffer := bytes.NewReader(completeMultipartUploadBytes)
399 reqMetadata := requestMetadata{
400 bucketName: bucketName,
401 objectName: objectName,
402 queryValues: urlValues,
403 contentBody: completeMultipartUploadBuffer,
404 contentLength: int64(len(completeMultipartUploadBytes)),
405 contentSHA256Hex: sum256Hex(completeMultipartUploadBytes),
406 customHeader: headers,
407 }
408
409 // Execute POST to complete multipart upload for an objectName.
410 resp, err := c.executeMethod(ctx, http.MethodPost, reqMetadata)
411 defer closeResponse(resp)
412 if err != nil {
413 return UploadInfo{}, err
414 }
415 if resp != nil {
416 if resp.StatusCode != http.StatusOK {
417 return UploadInfo{}, httpRespToErrorResponse(resp, bucketName, objectName)
418 }
419 }
420
421 // Read resp.Body into a []bytes to parse for Error response inside the body
422 var b []byte
423 b, err = io.ReadAll(resp.Body)
424 if err != nil {
425 return UploadInfo{}, err
426 }
427 // Decode completed multipart upload response on success.
428 completeMultipartUploadResult := completeMultipartUploadResult{}
429 err = xmlDecoder(bytes.NewReader(b), &completeMultipartUploadResult)
430 if err != nil {
431 // xml parsing failure due to presence an ill-formed xml fragment
432 return UploadInfo{}, err
433 } else if completeMultipartUploadResult.Bucket == "" {
434 // xml's Decode method ignores well-formed xml that don't apply to the type of value supplied.
435 // In this case, it would leave completeMultipartUploadResult with the corresponding zero-values
436 // of the members.
437
438 // Decode completed multipart upload response on failure
439 completeMultipartUploadErr := ErrorResponse{}
440 err = xmlDecoder(bytes.NewReader(b), &completeMultipartUploadErr)
441 if err != nil {
442 // xml parsing failure due to presence an ill-formed xml fragment
443 return UploadInfo{}, err
444 }
445 return UploadInfo{}, completeMultipartUploadErr
446 }
447
448 // extract lifecycle expiry date and rule ID
449 expTime, ruleID := amzExpirationToExpiryDateRuleID(resp.Header.Get(amzExpiration))
450
451 return UploadInfo{
452 Bucket: completeMultipartUploadResult.Bucket,
453 Key: completeMultipartUploadResult.Key,
454 ETag: trimEtag(completeMultipartUploadResult.ETag),
455 VersionID: resp.Header.Get(amzVersionID),
456 Location: completeMultipartUploadResult.Location,
457 Expiration: expTime,
458 ExpirationRuleID: ruleID,
459
460 ChecksumSHA256: completeMultipartUploadResult.ChecksumSHA256,
461 ChecksumSHA1: completeMultipartUploadResult.ChecksumSHA1,
462 ChecksumCRC32: completeMultipartUploadResult.ChecksumCRC32,
463 ChecksumCRC32C: completeMultipartUploadResult.ChecksumCRC32C,
464 }, nil
465}
diff --git a/vendor/github.com/minio/minio-go/v7/api-put-object-streaming.go b/vendor/github.com/minio/minio-go/v7/api-put-object-streaming.go
new file mode 100644
index 0000000..9182d4e
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/api-put-object-streaming.go
@@ -0,0 +1,809 @@
1/*
2 * MinIO Go Library for Amazon S3 Compatible Cloud Storage
3 * Copyright 2017 MinIO, Inc.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18package minio
19
20import (
21 "bytes"
22 "context"
23 "encoding/base64"
24 "fmt"
25 "hash/crc32"
26 "io"
27 "net/http"
28 "net/url"
29 "sort"
30 "strings"
31 "sync"
32
33 "github.com/google/uuid"
34 "github.com/minio/minio-go/v7/pkg/s3utils"
35)
36
37// putObjectMultipartStream - upload a large object using
38// multipart upload and streaming signature for signing payload.
39// Comprehensive put object operation involving multipart uploads.
40//
41// Following code handles these types of readers.
42//
43// - *minio.Object
44// - Any reader which has a method 'ReadAt()'
45func (c *Client) putObjectMultipartStream(ctx context.Context, bucketName, objectName string,
46 reader io.Reader, size int64, opts PutObjectOptions,
47) (info UploadInfo, err error) {
48 if opts.ConcurrentStreamParts && opts.NumThreads > 1 {
49 info, err = c.putObjectMultipartStreamParallel(ctx, bucketName, objectName, reader, opts)
50 } else if !isObject(reader) && isReadAt(reader) && !opts.SendContentMd5 {
51 // Verify if the reader implements ReadAt and it is not a *minio.Object then we will use parallel uploader.
52 info, err = c.putObjectMultipartStreamFromReadAt(ctx, bucketName, objectName, reader.(io.ReaderAt), size, opts)
53 } else {
54 info, err = c.putObjectMultipartStreamOptionalChecksum(ctx, bucketName, objectName, reader, size, opts)
55 }
56 if err != nil {
57 errResp := ToErrorResponse(err)
58 // Verify if multipart functionality is not available, if not
59 // fall back to single PutObject operation.
60 if errResp.Code == "AccessDenied" && strings.Contains(errResp.Message, "Access Denied") {
61 // Verify if size of reader is greater than '5GiB'.
62 if size > maxSinglePutObjectSize {
63 return UploadInfo{}, errEntityTooLarge(size, maxSinglePutObjectSize, bucketName, objectName)
64 }
65 // Fall back to uploading as single PutObject operation.
66 return c.putObject(ctx, bucketName, objectName, reader, size, opts)
67 }
68 }
69 return info, err
70}
71
72// uploadedPartRes - the response received from a part upload.
73type uploadedPartRes struct {
74 Error error // Any error encountered while uploading the part.
75 PartNum int // Number of the part uploaded.
76 Size int64 // Size of the part uploaded.
77 Part ObjectPart
78}
79
80type uploadPartReq struct {
81 PartNum int // Number of the part uploaded.
82 Part ObjectPart // Size of the part uploaded.
83}
84
85// putObjectMultipartFromReadAt - Uploads files bigger than 128MiB.
86// Supports all readers which implements io.ReaderAt interface
87// (ReadAt method).
88//
89// NOTE: This function is meant to be used for all readers which
90// implement io.ReaderAt which allows us for resuming multipart
91// uploads but reading at an offset, which would avoid re-read the
92// data which was already uploaded. Internally this function uses
93// temporary files for staging all the data, these temporary files are
94// cleaned automatically when the caller i.e http client closes the
95// stream after uploading all the contents successfully.
96func (c *Client) putObjectMultipartStreamFromReadAt(ctx context.Context, bucketName, objectName string,
97 reader io.ReaderAt, size int64, opts PutObjectOptions,
98) (info UploadInfo, err error) {
99 // Input validation.
100 if err = s3utils.CheckValidBucketName(bucketName); err != nil {
101 return UploadInfo{}, err
102 }
103 if err = s3utils.CheckValidObjectName(objectName); err != nil {
104 return UploadInfo{}, err
105 }
106
107 // Calculate the optimal parts info for a given size.
108 totalPartsCount, partSize, lastPartSize, err := OptimalPartInfo(size, opts.PartSize)
109 if err != nil {
110 return UploadInfo{}, err
111 }
112
113 withChecksum := c.trailingHeaderSupport
114 if withChecksum {
115 if opts.UserMetadata == nil {
116 opts.UserMetadata = make(map[string]string, 1)
117 }
118 opts.UserMetadata["X-Amz-Checksum-Algorithm"] = "CRC32C"
119 }
120 // Initiate a new multipart upload.
121 uploadID, err := c.newUploadID(ctx, bucketName, objectName, opts)
122 if err != nil {
123 return UploadInfo{}, err
124 }
125 delete(opts.UserMetadata, "X-Amz-Checksum-Algorithm")
126
127 // Aborts the multipart upload in progress, if the
128 // function returns any error, since we do not resume
129 // we should purge the parts which have been uploaded
130 // to relinquish storage space.
131 defer func() {
132 if err != nil {
133 c.abortMultipartUpload(ctx, bucketName, objectName, uploadID)
134 }
135 }()
136
137 // Total data read and written to server. should be equal to 'size' at the end of the call.
138 var totalUploadedSize int64
139
140 // Complete multipart upload.
141 var complMultipartUpload completeMultipartUpload
142
143 // Declare a channel that sends the next part number to be uploaded.
144 uploadPartsCh := make(chan uploadPartReq)
145
146 // Declare a channel that sends back the response of a part upload.
147 uploadedPartsCh := make(chan uploadedPartRes)
148
149 // Used for readability, lastPartNumber is always totalPartsCount.
150 lastPartNumber := totalPartsCount
151
152 partitionCtx, partitionCancel := context.WithCancel(ctx)
153 defer partitionCancel()
154 // Send each part number to the channel to be processed.
155 go func() {
156 defer close(uploadPartsCh)
157
158 for p := 1; p <= totalPartsCount; p++ {
159 select {
160 case <-partitionCtx.Done():
161 return
162 case uploadPartsCh <- uploadPartReq{PartNum: p}:
163 }
164 }
165 }()
166
167 // Receive each part number from the channel allowing three parallel uploads.
168 for w := 1; w <= opts.getNumThreads(); w++ {
169 go func(partSize int64) {
170 for {
171 var uploadReq uploadPartReq
172 var ok bool
173 select {
174 case <-ctx.Done():
175 return
176 case uploadReq, ok = <-uploadPartsCh:
177 if !ok {
178 return
179 }
180 // Each worker will draw from the part channel and upload in parallel.
181 }
182
183 // If partNumber was not uploaded we calculate the missing
184 // part offset and size. For all other part numbers we
185 // calculate offset based on multiples of partSize.
186 readOffset := int64(uploadReq.PartNum-1) * partSize
187
188 // As a special case if partNumber is lastPartNumber, we
189 // calculate the offset based on the last part size.
190 if uploadReq.PartNum == lastPartNumber {
191 readOffset = size - lastPartSize
192 partSize = lastPartSize
193 }
194
195 sectionReader := newHook(io.NewSectionReader(reader, readOffset, partSize), opts.Progress)
196 trailer := make(http.Header, 1)
197 if withChecksum {
198 crc := crc32.New(crc32.MakeTable(crc32.Castagnoli))
199 trailer.Set("x-amz-checksum-crc32c", base64.StdEncoding.EncodeToString(crc.Sum(nil)))
200 sectionReader = newHashReaderWrapper(sectionReader, crc, func(hash []byte) {
201 trailer.Set("x-amz-checksum-crc32c", base64.StdEncoding.EncodeToString(hash))
202 })
203 }
204
205 // Proceed to upload the part.
206 p := uploadPartParams{
207 bucketName: bucketName,
208 objectName: objectName,
209 uploadID: uploadID,
210 reader: sectionReader,
211 partNumber: uploadReq.PartNum,
212 size: partSize,
213 sse: opts.ServerSideEncryption,
214 streamSha256: !opts.DisableContentSha256,
215 sha256Hex: "",
216 trailer: trailer,
217 }
218 objPart, err := c.uploadPart(ctx, p)
219 if err != nil {
220 uploadedPartsCh <- uploadedPartRes{
221 Error: err,
222 }
223 // Exit the goroutine.
224 return
225 }
226
227 // Save successfully uploaded part metadata.
228 uploadReq.Part = objPart
229
230 // Send successful part info through the channel.
231 uploadedPartsCh <- uploadedPartRes{
232 Size: objPart.Size,
233 PartNum: uploadReq.PartNum,
234 Part: uploadReq.Part,
235 }
236 }
237 }(partSize)
238 }
239
240 // Gather the responses as they occur and update any
241 // progress bar.
242 for u := 1; u <= totalPartsCount; u++ {
243 select {
244 case <-ctx.Done():
245 return UploadInfo{}, ctx.Err()
246 case uploadRes := <-uploadedPartsCh:
247 if uploadRes.Error != nil {
248 return UploadInfo{}, uploadRes.Error
249 }
250
251 // Update the totalUploadedSize.
252 totalUploadedSize += uploadRes.Size
253 complMultipartUpload.Parts = append(complMultipartUpload.Parts, CompletePart{
254 ETag: uploadRes.Part.ETag,
255 PartNumber: uploadRes.Part.PartNumber,
256 ChecksumCRC32: uploadRes.Part.ChecksumCRC32,
257 ChecksumCRC32C: uploadRes.Part.ChecksumCRC32C,
258 ChecksumSHA1: uploadRes.Part.ChecksumSHA1,
259 ChecksumSHA256: uploadRes.Part.ChecksumSHA256,
260 })
261 }
262 }
263
264 // Verify if we uploaded all the data.
265 if totalUploadedSize != size {
266 return UploadInfo{}, errUnexpectedEOF(totalUploadedSize, size, bucketName, objectName)
267 }
268
269 // Sort all completed parts.
270 sort.Sort(completedParts(complMultipartUpload.Parts))
271
272 opts = PutObjectOptions{
273 ServerSideEncryption: opts.ServerSideEncryption,
274 }
275 if withChecksum {
276 // Add hash of hashes.
277 crc := crc32.New(crc32.MakeTable(crc32.Castagnoli))
278 for _, part := range complMultipartUpload.Parts {
279 cs, err := base64.StdEncoding.DecodeString(part.ChecksumCRC32C)
280 if err == nil {
281 crc.Write(cs)
282 }
283 }
284 opts.UserMetadata = map[string]string{"X-Amz-Checksum-Crc32c": base64.StdEncoding.EncodeToString(crc.Sum(nil))}
285 }
286
287 uploadInfo, err := c.completeMultipartUpload(ctx, bucketName, objectName, uploadID, complMultipartUpload, opts)
288 if err != nil {
289 return UploadInfo{}, err
290 }
291
292 uploadInfo.Size = totalUploadedSize
293 return uploadInfo, nil
294}
295
296func (c *Client) putObjectMultipartStreamOptionalChecksum(ctx context.Context, bucketName, objectName string,
297 reader io.Reader, size int64, opts PutObjectOptions,
298) (info UploadInfo, err error) {
299 // Input validation.
300 if err = s3utils.CheckValidBucketName(bucketName); err != nil {
301 return UploadInfo{}, err
302 }
303 if err = s3utils.CheckValidObjectName(objectName); err != nil {
304 return UploadInfo{}, err
305 }
306
307 if !opts.SendContentMd5 {
308 if opts.UserMetadata == nil {
309 opts.UserMetadata = make(map[string]string, 1)
310 }
311 opts.UserMetadata["X-Amz-Checksum-Algorithm"] = "CRC32C"
312 }
313
314 // Calculate the optimal parts info for a given size.
315 totalPartsCount, partSize, lastPartSize, err := OptimalPartInfo(size, opts.PartSize)
316 if err != nil {
317 return UploadInfo{}, err
318 }
319 // Initiates a new multipart request
320 uploadID, err := c.newUploadID(ctx, bucketName, objectName, opts)
321 if err != nil {
322 return UploadInfo{}, err
323 }
324 delete(opts.UserMetadata, "X-Amz-Checksum-Algorithm")
325
326 // Aborts the multipart upload if the function returns
327 // any error, since we do not resume we should purge
328 // the parts which have been uploaded to relinquish
329 // storage space.
330 defer func() {
331 if err != nil {
332 c.abortMultipartUpload(ctx, bucketName, objectName, uploadID)
333 }
334 }()
335
336 // Create checksums
337 // CRC32C is ~50% faster on AMD64 @ 30GB/s
338 var crcBytes []byte
339 customHeader := make(http.Header)
340 crc := crc32.New(crc32.MakeTable(crc32.Castagnoli))
341 md5Hash := c.md5Hasher()
342 defer md5Hash.Close()
343
344 // Total data read and written to server. should be equal to 'size' at the end of the call.
345 var totalUploadedSize int64
346
347 // Initialize parts uploaded map.
348 partsInfo := make(map[int]ObjectPart)
349
350 // Create a buffer.
351 buf := make([]byte, partSize)
352
353 // Avoid declaring variables in the for loop
354 var md5Base64 string
355
356 // Part number always starts with '1'.
357 var partNumber int
358 for partNumber = 1; partNumber <= totalPartsCount; partNumber++ {
359
360 // Proceed to upload the part.
361 if partNumber == totalPartsCount {
362 partSize = lastPartSize
363 }
364
365 length, rerr := readFull(reader, buf)
366 if rerr == io.EOF && partNumber > 1 {
367 break
368 }
369
370 if rerr != nil && rerr != io.ErrUnexpectedEOF && err != io.EOF {
371 return UploadInfo{}, rerr
372 }
373
374 // Calculate md5sum.
375 if opts.SendContentMd5 {
376 md5Hash.Reset()
377 md5Hash.Write(buf[:length])
378 md5Base64 = base64.StdEncoding.EncodeToString(md5Hash.Sum(nil))
379 } else {
380 // Add CRC32C instead.
381 crc.Reset()
382 crc.Write(buf[:length])
383 cSum := crc.Sum(nil)
384 customHeader.Set("x-amz-checksum-crc32c", base64.StdEncoding.EncodeToString(cSum))
385 crcBytes = append(crcBytes, cSum...)
386 }
387
388 // Update progress reader appropriately to the latest offset
389 // as we read from the source.
390 hooked := newHook(bytes.NewReader(buf[:length]), opts.Progress)
391 p := uploadPartParams{bucketName: bucketName, objectName: objectName, uploadID: uploadID, reader: hooked, partNumber: partNumber, md5Base64: md5Base64, size: partSize, sse: opts.ServerSideEncryption, streamSha256: !opts.DisableContentSha256, customHeader: customHeader}
392 objPart, uerr := c.uploadPart(ctx, p)
393 if uerr != nil {
394 return UploadInfo{}, uerr
395 }
396
397 // Save successfully uploaded part metadata.
398 partsInfo[partNumber] = objPart
399
400 // Save successfully uploaded size.
401 totalUploadedSize += partSize
402 }
403
404 // Verify if we uploaded all the data.
405 if size > 0 {
406 if totalUploadedSize != size {
407 return UploadInfo{}, errUnexpectedEOF(totalUploadedSize, size, bucketName, objectName)
408 }
409 }
410
411 // Complete multipart upload.
412 var complMultipartUpload completeMultipartUpload
413
414 // Loop over total uploaded parts to save them in
415 // Parts array before completing the multipart request.
416 for i := 1; i < partNumber; i++ {
417 part, ok := partsInfo[i]
418 if !ok {
419 return UploadInfo{}, errInvalidArgument(fmt.Sprintf("Missing part number %d", i))
420 }
421 complMultipartUpload.Parts = append(complMultipartUpload.Parts, CompletePart{
422 ETag: part.ETag,
423 PartNumber: part.PartNumber,
424 ChecksumCRC32: part.ChecksumCRC32,
425 ChecksumCRC32C: part.ChecksumCRC32C,
426 ChecksumSHA1: part.ChecksumSHA1,
427 ChecksumSHA256: part.ChecksumSHA256,
428 })
429 }
430
431 // Sort all completed parts.
432 sort.Sort(completedParts(complMultipartUpload.Parts))
433
434 opts = PutObjectOptions{
435 ServerSideEncryption: opts.ServerSideEncryption,
436 }
437 if len(crcBytes) > 0 {
438 // Add hash of hashes.
439 crc.Reset()
440 crc.Write(crcBytes)
441 opts.UserMetadata = map[string]string{"X-Amz-Checksum-Crc32c": base64.StdEncoding.EncodeToString(crc.Sum(nil))}
442 }
443 uploadInfo, err := c.completeMultipartUpload(ctx, bucketName, objectName, uploadID, complMultipartUpload, opts)
444 if err != nil {
445 return UploadInfo{}, err
446 }
447
448 uploadInfo.Size = totalUploadedSize
449 return uploadInfo, nil
450}
451
452// putObjectMultipartStreamParallel uploads opts.NumThreads parts in parallel.
453// This is expected to take opts.PartSize * opts.NumThreads * (GOGC / 100) bytes of buffer.
454func (c *Client) putObjectMultipartStreamParallel(ctx context.Context, bucketName, objectName string,
455 reader io.Reader, opts PutObjectOptions,
456) (info UploadInfo, err error) {
457 // Input validation.
458 if err = s3utils.CheckValidBucketName(bucketName); err != nil {
459 return UploadInfo{}, err
460 }
461
462 if err = s3utils.CheckValidObjectName(objectName); err != nil {
463 return UploadInfo{}, err
464 }
465
466 if !opts.SendContentMd5 {
467 if opts.UserMetadata == nil {
468 opts.UserMetadata = make(map[string]string, 1)
469 }
470 opts.UserMetadata["X-Amz-Checksum-Algorithm"] = "CRC32C"
471 }
472
473 // Cancel all when an error occurs.
474 ctx, cancel := context.WithCancel(ctx)
475 defer cancel()
476
477 // Calculate the optimal parts info for a given size.
478 totalPartsCount, partSize, _, err := OptimalPartInfo(-1, opts.PartSize)
479 if err != nil {
480 return UploadInfo{}, err
481 }
482
483 // Initiates a new multipart request
484 uploadID, err := c.newUploadID(ctx, bucketName, objectName, opts)
485 if err != nil {
486 return UploadInfo{}, err
487 }
488 delete(opts.UserMetadata, "X-Amz-Checksum-Algorithm")
489
490 // Aborts the multipart upload if the function returns
491 // any error, since we do not resume we should purge
492 // the parts which have been uploaded to relinquish
493 // storage space.
494 defer func() {
495 if err != nil {
496 c.abortMultipartUpload(ctx, bucketName, objectName, uploadID)
497 }
498 }()
499
500 // Create checksums
501 // CRC32C is ~50% faster on AMD64 @ 30GB/s
502 var crcBytes []byte
503 crc := crc32.New(crc32.MakeTable(crc32.Castagnoli))
504
505 // Total data read and written to server. should be equal to 'size' at the end of the call.
506 var totalUploadedSize int64
507
508 // Initialize parts uploaded map.
509 partsInfo := make(map[int]ObjectPart)
510
511 // Create a buffer.
512 nBuffers := int64(opts.NumThreads)
513 bufs := make(chan []byte, nBuffers)
514 all := make([]byte, nBuffers*partSize)
515 for i := int64(0); i < nBuffers; i++ {
516 bufs <- all[i*partSize : i*partSize+partSize]
517 }
518
519 var wg sync.WaitGroup
520 var mu sync.Mutex
521 errCh := make(chan error, opts.NumThreads)
522
523 reader = newHook(reader, opts.Progress)
524
525 // Part number always starts with '1'.
526 var partNumber int
527 for partNumber = 1; partNumber <= totalPartsCount; partNumber++ {
528 // Proceed to upload the part.
529 var buf []byte
530 select {
531 case buf = <-bufs:
532 case err = <-errCh:
533 cancel()
534 wg.Wait()
535 return UploadInfo{}, err
536 }
537
538 if int64(len(buf)) != partSize {
539 return UploadInfo{}, fmt.Errorf("read buffer < %d than expected partSize: %d", len(buf), partSize)
540 }
541
542 length, rerr := readFull(reader, buf)
543 if rerr == io.EOF && partNumber > 1 {
544 // Done
545 break
546 }
547
548 if rerr != nil && rerr != io.ErrUnexpectedEOF && err != io.EOF {
549 cancel()
550 wg.Wait()
551 return UploadInfo{}, rerr
552 }
553
554 // Calculate md5sum.
555 customHeader := make(http.Header)
556 if !opts.SendContentMd5 {
557 // Add CRC32C instead.
558 crc.Reset()
559 crc.Write(buf[:length])
560 cSum := crc.Sum(nil)
561 customHeader.Set("x-amz-checksum-crc32c", base64.StdEncoding.EncodeToString(cSum))
562 crcBytes = append(crcBytes, cSum...)
563 }
564
565 wg.Add(1)
566 go func(partNumber int) {
567 // Avoid declaring variables in the for loop
568 var md5Base64 string
569
570 if opts.SendContentMd5 {
571 md5Hash := c.md5Hasher()
572 md5Hash.Write(buf[:length])
573 md5Base64 = base64.StdEncoding.EncodeToString(md5Hash.Sum(nil))
574 md5Hash.Close()
575 }
576
577 defer wg.Done()
578 p := uploadPartParams{
579 bucketName: bucketName,
580 objectName: objectName,
581 uploadID: uploadID,
582 reader: bytes.NewReader(buf[:length]),
583 partNumber: partNumber,
584 md5Base64: md5Base64,
585 size: int64(length),
586 sse: opts.ServerSideEncryption,
587 streamSha256: !opts.DisableContentSha256,
588 customHeader: customHeader,
589 }
590 objPart, uerr := c.uploadPart(ctx, p)
591 if uerr != nil {
592 errCh <- uerr
593 return
594 }
595
596 // Save successfully uploaded part metadata.
597 mu.Lock()
598 partsInfo[partNumber] = objPart
599 mu.Unlock()
600
601 // Send buffer back so it can be reused.
602 bufs <- buf
603 }(partNumber)
604
605 // Save successfully uploaded size.
606 totalUploadedSize += int64(length)
607 }
608 wg.Wait()
609
610 // Collect any error
611 select {
612 case err = <-errCh:
613 return UploadInfo{}, err
614 default:
615 }
616
617 // Complete multipart upload.
618 var complMultipartUpload completeMultipartUpload
619
620 // Loop over total uploaded parts to save them in
621 // Parts array before completing the multipart request.
622 for i := 1; i < partNumber; i++ {
623 part, ok := partsInfo[i]
624 if !ok {
625 return UploadInfo{}, errInvalidArgument(fmt.Sprintf("Missing part number %d", i))
626 }
627 complMultipartUpload.Parts = append(complMultipartUpload.Parts, CompletePart{
628 ETag: part.ETag,
629 PartNumber: part.PartNumber,
630 ChecksumCRC32: part.ChecksumCRC32,
631 ChecksumCRC32C: part.ChecksumCRC32C,
632 ChecksumSHA1: part.ChecksumSHA1,
633 ChecksumSHA256: part.ChecksumSHA256,
634 })
635 }
636
637 // Sort all completed parts.
638 sort.Sort(completedParts(complMultipartUpload.Parts))
639
640 opts = PutObjectOptions{}
641 if len(crcBytes) > 0 {
642 // Add hash of hashes.
643 crc.Reset()
644 crc.Write(crcBytes)
645 opts.UserMetadata = map[string]string{"X-Amz-Checksum-Crc32c": base64.StdEncoding.EncodeToString(crc.Sum(nil))}
646 }
647 uploadInfo, err := c.completeMultipartUpload(ctx, bucketName, objectName, uploadID, complMultipartUpload, opts)
648 if err != nil {
649 return UploadInfo{}, err
650 }
651
652 uploadInfo.Size = totalUploadedSize
653 return uploadInfo, nil
654}
655
656// putObject special function used Google Cloud Storage. This special function
657// is used for Google Cloud Storage since Google's multipart API is not S3 compatible.
658func (c *Client) putObject(ctx context.Context, bucketName, objectName string, reader io.Reader, size int64, opts PutObjectOptions) (info UploadInfo, err error) {
659 // Input validation.
660 if err := s3utils.CheckValidBucketName(bucketName); err != nil {
661 return UploadInfo{}, err
662 }
663 if err := s3utils.CheckValidObjectName(objectName); err != nil {
664 return UploadInfo{}, err
665 }
666
667 // Size -1 is only supported on Google Cloud Storage, we error
668 // out in all other situations.
669 if size < 0 && !s3utils.IsGoogleEndpoint(*c.endpointURL) {
670 return UploadInfo{}, errEntityTooSmall(size, bucketName, objectName)
671 }
672
673 if opts.SendContentMd5 && s3utils.IsGoogleEndpoint(*c.endpointURL) && size < 0 {
674 return UploadInfo{}, errInvalidArgument("MD5Sum cannot be calculated with size '-1'")
675 }
676
677 var readSeeker io.Seeker
678 if size > 0 {
679 if isReadAt(reader) && !isObject(reader) {
680 seeker, ok := reader.(io.Seeker)
681 if ok {
682 offset, err := seeker.Seek(0, io.SeekCurrent)
683 if err != nil {
684 return UploadInfo{}, errInvalidArgument(err.Error())
685 }
686 reader = io.NewSectionReader(reader.(io.ReaderAt), offset, size)
687 readSeeker = reader.(io.Seeker)
688 }
689 }
690 }
691
692 var md5Base64 string
693 if opts.SendContentMd5 {
694 // Calculate md5sum.
695 hash := c.md5Hasher()
696
697 if readSeeker != nil {
698 if _, err := io.Copy(hash, reader); err != nil {
699 return UploadInfo{}, err
700 }
701 // Seek back to beginning of io.NewSectionReader's offset.
702 _, err = readSeeker.Seek(0, io.SeekStart)
703 if err != nil {
704 return UploadInfo{}, errInvalidArgument(err.Error())
705 }
706 } else {
707 // Create a buffer.
708 buf := make([]byte, size)
709
710 length, err := readFull(reader, buf)
711 if err != nil && err != io.ErrUnexpectedEOF && err != io.EOF {
712 return UploadInfo{}, err
713 }
714
715 hash.Write(buf[:length])
716 reader = bytes.NewReader(buf[:length])
717 }
718
719 md5Base64 = base64.StdEncoding.EncodeToString(hash.Sum(nil))
720 hash.Close()
721 }
722
723 // Update progress reader appropriately to the latest offset as we
724 // read from the source.
725 progressReader := newHook(reader, opts.Progress)
726
727 // This function does not calculate sha256 and md5sum for payload.
728 // Execute put object.
729 return c.putObjectDo(ctx, bucketName, objectName, progressReader, md5Base64, "", size, opts)
730}
731
732// putObjectDo - executes the put object http operation.
733// NOTE: You must have WRITE permissions on a bucket to add an object to it.
734func (c *Client) putObjectDo(ctx context.Context, bucketName, objectName string, reader io.Reader, md5Base64, sha256Hex string, size int64, opts PutObjectOptions) (UploadInfo, error) {
735 // Input validation.
736 if err := s3utils.CheckValidBucketName(bucketName); err != nil {
737 return UploadInfo{}, err
738 }
739 if err := s3utils.CheckValidObjectName(objectName); err != nil {
740 return UploadInfo{}, err
741 }
742 // Set headers.
743 customHeader := opts.Header()
744
745 // Add CRC when client supports it, MD5 is not set, not Google and we don't add SHA256 to chunks.
746 addCrc := c.trailingHeaderSupport && md5Base64 == "" && !s3utils.IsGoogleEndpoint(*c.endpointURL) && (opts.DisableContentSha256 || c.secure)
747
748 if addCrc {
749 // If user has added checksums, don't add them ourselves.
750 for k := range opts.UserMetadata {
751 if strings.HasPrefix(strings.ToLower(k), "x-amz-checksum-") {
752 addCrc = false
753 }
754 }
755 }
756 // Populate request metadata.
757 reqMetadata := requestMetadata{
758 bucketName: bucketName,
759 objectName: objectName,
760 customHeader: customHeader,
761 contentBody: reader,
762 contentLength: size,
763 contentMD5Base64: md5Base64,
764 contentSHA256Hex: sha256Hex,
765 streamSha256: !opts.DisableContentSha256,
766 addCrc: addCrc,
767 }
768 if opts.Internal.SourceVersionID != "" {
769 if opts.Internal.SourceVersionID != nullVersionID {
770 if _, err := uuid.Parse(opts.Internal.SourceVersionID); err != nil {
771 return UploadInfo{}, errInvalidArgument(err.Error())
772 }
773 }
774 urlValues := make(url.Values)
775 urlValues.Set("versionId", opts.Internal.SourceVersionID)
776 reqMetadata.queryValues = urlValues
777 }
778
779 // Execute PUT an objectName.
780 resp, err := c.executeMethod(ctx, http.MethodPut, reqMetadata)
781 defer closeResponse(resp)
782 if err != nil {
783 return UploadInfo{}, err
784 }
785 if resp != nil {
786 if resp.StatusCode != http.StatusOK {
787 return UploadInfo{}, httpRespToErrorResponse(resp, bucketName, objectName)
788 }
789 }
790
791 // extract lifecycle expiry date and rule ID
792 expTime, ruleID := amzExpirationToExpiryDateRuleID(resp.Header.Get(amzExpiration))
793 h := resp.Header
794 return UploadInfo{
795 Bucket: bucketName,
796 Key: objectName,
797 ETag: trimEtag(h.Get("ETag")),
798 VersionID: h.Get(amzVersionID),
799 Size: size,
800 Expiration: expTime,
801 ExpirationRuleID: ruleID,
802
803 // Checksum values
804 ChecksumCRC32: h.Get("x-amz-checksum-crc32"),
805 ChecksumCRC32C: h.Get("x-amz-checksum-crc32c"),
806 ChecksumSHA1: h.Get("x-amz-checksum-sha1"),
807 ChecksumSHA256: h.Get("x-amz-checksum-sha256"),
808 }, nil
809}
diff --git a/vendor/github.com/minio/minio-go/v7/api-put-object.go b/vendor/github.com/minio/minio-go/v7/api-put-object.go
new file mode 100644
index 0000000..bbd8924
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/api-put-object.go
@@ -0,0 +1,473 @@
1/*
2 * MinIO Go Library for Amazon S3 Compatible Cloud Storage
3 * Copyright 2015-2017 MinIO, Inc.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18package minio
19
20import (
21 "bytes"
22 "context"
23 "encoding/base64"
24 "errors"
25 "fmt"
26 "hash/crc32"
27 "io"
28 "net/http"
29 "sort"
30 "time"
31
32 "github.com/minio/minio-go/v7/pkg/encrypt"
33 "github.com/minio/minio-go/v7/pkg/s3utils"
34 "golang.org/x/net/http/httpguts"
35)
36
37// ReplicationStatus represents replication status of object
38type ReplicationStatus string
39
40const (
41 // ReplicationStatusPending indicates replication is pending
42 ReplicationStatusPending ReplicationStatus = "PENDING"
43 // ReplicationStatusComplete indicates replication completed ok
44 ReplicationStatusComplete ReplicationStatus = "COMPLETED"
45 // ReplicationStatusFailed indicates replication failed
46 ReplicationStatusFailed ReplicationStatus = "FAILED"
47 // ReplicationStatusReplica indicates object is a replica of a source
48 ReplicationStatusReplica ReplicationStatus = "REPLICA"
49)
50
51// Empty returns true if no replication status set.
52func (r ReplicationStatus) Empty() bool {
53 return r == ""
54}
55
56// AdvancedPutOptions for internal use - to be utilized by replication, ILM transition
57// implementation on MinIO server
58type AdvancedPutOptions struct {
59 SourceVersionID string
60 SourceETag string
61 ReplicationStatus ReplicationStatus
62 SourceMTime time.Time
63 ReplicationRequest bool
64 RetentionTimestamp time.Time
65 TaggingTimestamp time.Time
66 LegalholdTimestamp time.Time
67 ReplicationValidityCheck bool
68}
69
70// PutObjectOptions represents options specified by user for PutObject call
71type PutObjectOptions struct {
72 UserMetadata map[string]string
73 UserTags map[string]string
74 Progress io.Reader
75 ContentType string
76 ContentEncoding string
77 ContentDisposition string
78 ContentLanguage string
79 CacheControl string
80 Expires time.Time
81 Mode RetentionMode
82 RetainUntilDate time.Time
83 ServerSideEncryption encrypt.ServerSide
84 NumThreads uint
85 StorageClass string
86 WebsiteRedirectLocation string
87 PartSize uint64
88 LegalHold LegalHoldStatus
89 SendContentMd5 bool
90 DisableContentSha256 bool
91 DisableMultipart bool
92
93 // ConcurrentStreamParts will create NumThreads buffers of PartSize bytes,
94 // fill them serially and upload them in parallel.
95 // This can be used for faster uploads on non-seekable or slow-to-seek input.
96 ConcurrentStreamParts bool
97 Internal AdvancedPutOptions
98
99 customHeaders http.Header
100}
101
102// SetMatchETag if etag matches while PUT MinIO returns an error
103// this is a MinIO specific extension to support optimistic locking
104// semantics.
105func (opts *PutObjectOptions) SetMatchETag(etag string) {
106 if opts.customHeaders == nil {
107 opts.customHeaders = http.Header{}
108 }
109 opts.customHeaders.Set("If-Match", "\""+etag+"\"")
110}
111
112// SetMatchETagExcept if etag does not match while PUT MinIO returns an
113// error this is a MinIO specific extension to support optimistic locking
114// semantics.
115func (opts *PutObjectOptions) SetMatchETagExcept(etag string) {
116 if opts.customHeaders == nil {
117 opts.customHeaders = http.Header{}
118 }
119 opts.customHeaders.Set("If-None-Match", "\""+etag+"\"")
120}
121
122// getNumThreads - gets the number of threads to be used in the multipart
123// put object operation
124func (opts PutObjectOptions) getNumThreads() (numThreads int) {
125 if opts.NumThreads > 0 {
126 numThreads = int(opts.NumThreads)
127 } else {
128 numThreads = totalWorkers
129 }
130 return
131}
132
133// Header - constructs the headers from metadata entered by user in
134// PutObjectOptions struct
135func (opts PutObjectOptions) Header() (header http.Header) {
136 header = make(http.Header)
137
138 contentType := opts.ContentType
139 if contentType == "" {
140 contentType = "application/octet-stream"
141 }
142 header.Set("Content-Type", contentType)
143
144 if opts.ContentEncoding != "" {
145 header.Set("Content-Encoding", opts.ContentEncoding)
146 }
147 if opts.ContentDisposition != "" {
148 header.Set("Content-Disposition", opts.ContentDisposition)
149 }
150 if opts.ContentLanguage != "" {
151 header.Set("Content-Language", opts.ContentLanguage)
152 }
153 if opts.CacheControl != "" {
154 header.Set("Cache-Control", opts.CacheControl)
155 }
156
157 if !opts.Expires.IsZero() {
158 header.Set("Expires", opts.Expires.UTC().Format(http.TimeFormat))
159 }
160
161 if opts.Mode != "" {
162 header.Set(amzLockMode, opts.Mode.String())
163 }
164
165 if !opts.RetainUntilDate.IsZero() {
166 header.Set("X-Amz-Object-Lock-Retain-Until-Date", opts.RetainUntilDate.Format(time.RFC3339))
167 }
168
169 if opts.LegalHold != "" {
170 header.Set(amzLegalHoldHeader, opts.LegalHold.String())
171 }
172
173 if opts.ServerSideEncryption != nil {
174 opts.ServerSideEncryption.Marshal(header)
175 }
176
177 if opts.StorageClass != "" {
178 header.Set(amzStorageClass, opts.StorageClass)
179 }
180
181 if opts.WebsiteRedirectLocation != "" {
182 header.Set(amzWebsiteRedirectLocation, opts.WebsiteRedirectLocation)
183 }
184
185 if !opts.Internal.ReplicationStatus.Empty() {
186 header.Set(amzBucketReplicationStatus, string(opts.Internal.ReplicationStatus))
187 }
188 if !opts.Internal.SourceMTime.IsZero() {
189 header.Set(minIOBucketSourceMTime, opts.Internal.SourceMTime.Format(time.RFC3339Nano))
190 }
191 if opts.Internal.SourceETag != "" {
192 header.Set(minIOBucketSourceETag, opts.Internal.SourceETag)
193 }
194 if opts.Internal.ReplicationRequest {
195 header.Set(minIOBucketReplicationRequest, "true")
196 }
197 if opts.Internal.ReplicationValidityCheck {
198 header.Set(minIOBucketReplicationCheck, "true")
199 }
200 if !opts.Internal.LegalholdTimestamp.IsZero() {
201 header.Set(minIOBucketReplicationObjectLegalHoldTimestamp, opts.Internal.LegalholdTimestamp.Format(time.RFC3339Nano))
202 }
203 if !opts.Internal.RetentionTimestamp.IsZero() {
204 header.Set(minIOBucketReplicationObjectRetentionTimestamp, opts.Internal.RetentionTimestamp.Format(time.RFC3339Nano))
205 }
206 if !opts.Internal.TaggingTimestamp.IsZero() {
207 header.Set(minIOBucketReplicationTaggingTimestamp, opts.Internal.TaggingTimestamp.Format(time.RFC3339Nano))
208 }
209
210 if len(opts.UserTags) != 0 {
211 header.Set(amzTaggingHeader, s3utils.TagEncode(opts.UserTags))
212 }
213
214 for k, v := range opts.UserMetadata {
215 if isAmzHeader(k) || isStandardHeader(k) || isStorageClassHeader(k) {
216 header.Set(k, v)
217 } else {
218 header.Set("x-amz-meta-"+k, v)
219 }
220 }
221
222 // set any other additional custom headers.
223 for k, v := range opts.customHeaders {
224 header[k] = v
225 }
226
227 return
228}
229
230// validate() checks if the UserMetadata map has standard headers or and raises an error if so.
231func (opts PutObjectOptions) validate() (err error) {
232 for k, v := range opts.UserMetadata {
233 if !httpguts.ValidHeaderFieldName(k) || isStandardHeader(k) || isSSEHeader(k) || isStorageClassHeader(k) {
234 return errInvalidArgument(k + " unsupported user defined metadata name")
235 }
236 if !httpguts.ValidHeaderFieldValue(v) {
237 return errInvalidArgument(v + " unsupported user defined metadata value")
238 }
239 }
240 if opts.Mode != "" && !opts.Mode.IsValid() {
241 return errInvalidArgument(opts.Mode.String() + " unsupported retention mode")
242 }
243 if opts.LegalHold != "" && !opts.LegalHold.IsValid() {
244 return errInvalidArgument(opts.LegalHold.String() + " unsupported legal-hold status")
245 }
246 return nil
247}
248
249// completedParts is a collection of parts sortable by their part numbers.
250// used for sorting the uploaded parts before completing the multipart request.
251type completedParts []CompletePart
252
253func (a completedParts) Len() int { return len(a) }
254func (a completedParts) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
255func (a completedParts) Less(i, j int) bool { return a[i].PartNumber < a[j].PartNumber }
256
257// PutObject creates an object in a bucket.
258//
259// You must have WRITE permissions on a bucket to create an object.
260//
261// - For size smaller than 16MiB PutObject automatically does a
262// single atomic PUT operation.
263//
264// - For size larger than 16MiB PutObject automatically does a
265// multipart upload operation.
266//
267// - For size input as -1 PutObject does a multipart Put operation
268// until input stream reaches EOF. Maximum object size that can
269// be uploaded through this operation will be 5TiB.
270//
271// WARNING: Passing down '-1' will use memory and these cannot
272// be reused for best outcomes for PutObject(), pass the size always.
273//
274// NOTE: Upon errors during upload multipart operation is entirely aborted.
275func (c *Client) PutObject(ctx context.Context, bucketName, objectName string, reader io.Reader, objectSize int64,
276 opts PutObjectOptions,
277) (info UploadInfo, err error) {
278 if objectSize < 0 && opts.DisableMultipart {
279 return UploadInfo{}, errors.New("object size must be provided with disable multipart upload")
280 }
281
282 err = opts.validate()
283 if err != nil {
284 return UploadInfo{}, err
285 }
286
287 return c.putObjectCommon(ctx, bucketName, objectName, reader, objectSize, opts)
288}
289
290func (c *Client) putObjectCommon(ctx context.Context, bucketName, objectName string, reader io.Reader, size int64, opts PutObjectOptions) (info UploadInfo, err error) {
291 // Check for largest object size allowed.
292 if size > int64(maxMultipartPutObjectSize) {
293 return UploadInfo{}, errEntityTooLarge(size, maxMultipartPutObjectSize, bucketName, objectName)
294 }
295
296 // NOTE: Streaming signature is not supported by GCS.
297 if s3utils.IsGoogleEndpoint(*c.endpointURL) {
298 return c.putObject(ctx, bucketName, objectName, reader, size, opts)
299 }
300
301 partSize := opts.PartSize
302 if opts.PartSize == 0 {
303 partSize = minPartSize
304 }
305
306 if c.overrideSignerType.IsV2() {
307 if size >= 0 && size < int64(partSize) || opts.DisableMultipart {
308 return c.putObject(ctx, bucketName, objectName, reader, size, opts)
309 }
310 return c.putObjectMultipart(ctx, bucketName, objectName, reader, size, opts)
311 }
312
313 if size < 0 {
314 if opts.DisableMultipart {
315 return UploadInfo{}, errors.New("no length provided and multipart disabled")
316 }
317 if opts.ConcurrentStreamParts && opts.NumThreads > 1 {
318 return c.putObjectMultipartStreamParallel(ctx, bucketName, objectName, reader, opts)
319 }
320 return c.putObjectMultipartStreamNoLength(ctx, bucketName, objectName, reader, opts)
321 }
322
323 if size < int64(partSize) || opts.DisableMultipart {
324 return c.putObject(ctx, bucketName, objectName, reader, size, opts)
325 }
326
327 return c.putObjectMultipartStream(ctx, bucketName, objectName, reader, size, opts)
328}
329
330func (c *Client) putObjectMultipartStreamNoLength(ctx context.Context, bucketName, objectName string, reader io.Reader, opts PutObjectOptions) (info UploadInfo, err error) {
331 // Input validation.
332 if err = s3utils.CheckValidBucketName(bucketName); err != nil {
333 return UploadInfo{}, err
334 }
335 if err = s3utils.CheckValidObjectName(objectName); err != nil {
336 return UploadInfo{}, err
337 }
338
339 // Total data read and written to server. should be equal to
340 // 'size' at the end of the call.
341 var totalUploadedSize int64
342
343 // Complete multipart upload.
344 var complMultipartUpload completeMultipartUpload
345
346 // Calculate the optimal parts info for a given size.
347 totalPartsCount, partSize, _, err := OptimalPartInfo(-1, opts.PartSize)
348 if err != nil {
349 return UploadInfo{}, err
350 }
351
352 if !opts.SendContentMd5 {
353 if opts.UserMetadata == nil {
354 opts.UserMetadata = make(map[string]string, 1)
355 }
356 opts.UserMetadata["X-Amz-Checksum-Algorithm"] = "CRC32C"
357 }
358
359 // Initiate a new multipart upload.
360 uploadID, err := c.newUploadID(ctx, bucketName, objectName, opts)
361 if err != nil {
362 return UploadInfo{}, err
363 }
364 delete(opts.UserMetadata, "X-Amz-Checksum-Algorithm")
365
366 defer func() {
367 if err != nil {
368 c.abortMultipartUpload(ctx, bucketName, objectName, uploadID)
369 }
370 }()
371
372 // Part number always starts with '1'.
373 partNumber := 1
374
375 // Initialize parts uploaded map.
376 partsInfo := make(map[int]ObjectPart)
377
378 // Create a buffer.
379 buf := make([]byte, partSize)
380
381 // Create checksums
382 // CRC32C is ~50% faster on AMD64 @ 30GB/s
383 var crcBytes []byte
384 customHeader := make(http.Header)
385 crc := crc32.New(crc32.MakeTable(crc32.Castagnoli))
386
387 for partNumber <= totalPartsCount {
388 length, rerr := readFull(reader, buf)
389 if rerr == io.EOF && partNumber > 1 {
390 break
391 }
392
393 if rerr != nil && rerr != io.ErrUnexpectedEOF && rerr != io.EOF {
394 return UploadInfo{}, rerr
395 }
396
397 var md5Base64 string
398 if opts.SendContentMd5 {
399 // Calculate md5sum.
400 hash := c.md5Hasher()
401 hash.Write(buf[:length])
402 md5Base64 = base64.StdEncoding.EncodeToString(hash.Sum(nil))
403 hash.Close()
404 } else {
405 crc.Reset()
406 crc.Write(buf[:length])
407 cSum := crc.Sum(nil)
408 customHeader.Set("x-amz-checksum-crc32c", base64.StdEncoding.EncodeToString(cSum))
409 crcBytes = append(crcBytes, cSum...)
410 }
411
412 // Update progress reader appropriately to the latest offset
413 // as we read from the source.
414 rd := newHook(bytes.NewReader(buf[:length]), opts.Progress)
415
416 // Proceed to upload the part.
417 p := uploadPartParams{bucketName: bucketName, objectName: objectName, uploadID: uploadID, reader: rd, partNumber: partNumber, md5Base64: md5Base64, size: int64(length), sse: opts.ServerSideEncryption, streamSha256: !opts.DisableContentSha256, customHeader: customHeader}
418 objPart, uerr := c.uploadPart(ctx, p)
419 if uerr != nil {
420 return UploadInfo{}, uerr
421 }
422
423 // Save successfully uploaded part metadata.
424 partsInfo[partNumber] = objPart
425
426 // Save successfully uploaded size.
427 totalUploadedSize += int64(length)
428
429 // Increment part number.
430 partNumber++
431
432 // For unknown size, Read EOF we break away.
433 // We do not have to upload till totalPartsCount.
434 if rerr == io.EOF {
435 break
436 }
437 }
438
439 // Loop over total uploaded parts to save them in
440 // Parts array before completing the multipart request.
441 for i := 1; i < partNumber; i++ {
442 part, ok := partsInfo[i]
443 if !ok {
444 return UploadInfo{}, errInvalidArgument(fmt.Sprintf("Missing part number %d", i))
445 }
446 complMultipartUpload.Parts = append(complMultipartUpload.Parts, CompletePart{
447 ETag: part.ETag,
448 PartNumber: part.PartNumber,
449 ChecksumCRC32: part.ChecksumCRC32,
450 ChecksumCRC32C: part.ChecksumCRC32C,
451 ChecksumSHA1: part.ChecksumSHA1,
452 ChecksumSHA256: part.ChecksumSHA256,
453 })
454 }
455
456 // Sort all completed parts.
457 sort.Sort(completedParts(complMultipartUpload.Parts))
458
459 opts = PutObjectOptions{}
460 if len(crcBytes) > 0 {
461 // Add hash of hashes.
462 crc.Reset()
463 crc.Write(crcBytes)
464 opts.UserMetadata = map[string]string{"X-Amz-Checksum-Crc32c": base64.StdEncoding.EncodeToString(crc.Sum(nil))}
465 }
466 uploadInfo, err := c.completeMultipartUpload(ctx, bucketName, objectName, uploadID, complMultipartUpload, opts)
467 if err != nil {
468 return UploadInfo{}, err
469 }
470
471 uploadInfo.Size = totalUploadedSize
472 return uploadInfo, nil
473}
diff --git a/vendor/github.com/minio/minio-go/v7/api-putobject-snowball.go b/vendor/github.com/minio/minio-go/v7/api-putobject-snowball.go
new file mode 100644
index 0000000..eb4da41
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/api-putobject-snowball.go
@@ -0,0 +1,246 @@
1/*
2 * MinIO Go Library for Amazon S3 Compatible Cloud Storage
3 * Copyright 2021 MinIO, Inc.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18package minio
19
20import (
21 "archive/tar"
22 "bufio"
23 "bytes"
24 "context"
25 "fmt"
26 "io"
27 "net/http"
28 "os"
29 "strings"
30 "sync"
31 "time"
32
33 "github.com/klauspost/compress/s2"
34)
35
36// SnowballOptions contains options for PutObjectsSnowball calls.
37type SnowballOptions struct {
38 // Opts is options applied to all objects.
39 Opts PutObjectOptions
40
41 // Processing options:
42
43 // InMemory specifies that all objects should be collected in memory
44 // before they are uploaded.
45 // If false a temporary file will be created.
46 InMemory bool
47
48 // Compress enabled content compression before upload.
49 // Compression will typically reduce memory and network usage,
50 // Compression can safely be enabled with MinIO hosts.
51 Compress bool
52
53 // SkipErrs if enabled will skip any errors while reading the
54 // object content while creating the snowball archive
55 SkipErrs bool
56}
57
58// SnowballObject contains information about a single object to be added to the snowball.
59type SnowballObject struct {
60 // Key is the destination key, including prefix.
61 Key string
62
63 // Size is the content size of this object.
64 Size int64
65
66 // Modtime to apply to the object.
67 // If Modtime is the zero value current time will be used.
68 ModTime time.Time
69
70 // Content of the object.
71 // Exactly 'Size' number of bytes must be provided.
72 Content io.Reader
73
74 // VersionID of the object; if empty, a new versionID will be generated
75 VersionID string
76
77 // Headers contains more options for this object upload, the same as you
78 // would include in a regular PutObject operation, such as user metadata
79 // and content-disposition, expires, ..
80 Headers http.Header
81
82 // Close will be called when an object has finished processing.
83 // Note that if PutObjectsSnowball returns because of an error,
84 // objects not consumed from the input will NOT have been closed.
85 // Leave as nil for no callback.
86 Close func()
87}
88
89type nopReadSeekCloser struct {
90 io.ReadSeeker
91}
92
93func (n nopReadSeekCloser) Close() error {
94 return nil
95}
96
97// This is available as io.ReadSeekCloser from go1.16
98type readSeekCloser interface {
99 io.Reader
100 io.Closer
101 io.Seeker
102}
103
104// PutObjectsSnowball will put multiple objects with a single put call.
105// A (compressed) TAR file will be created which will contain multiple objects.
106// The key for each object will be used for the destination in the specified bucket.
107// Total size should be < 5TB.
108// This function blocks until 'objs' is closed and the content has been uploaded.
109func (c Client) PutObjectsSnowball(ctx context.Context, bucketName string, opts SnowballOptions, objs <-chan SnowballObject) (err error) {
110 err = opts.Opts.validate()
111 if err != nil {
112 return err
113 }
114 var tmpWriter io.Writer
115 var getTmpReader func() (rc readSeekCloser, sz int64, err error)
116 if opts.InMemory {
117 b := bytes.NewBuffer(nil)
118 tmpWriter = b
119 getTmpReader = func() (readSeekCloser, int64, error) {
120 return nopReadSeekCloser{bytes.NewReader(b.Bytes())}, int64(b.Len()), nil
121 }
122 } else {
123 f, err := os.CreateTemp("", "s3-putsnowballobjects-*")
124 if err != nil {
125 return err
126 }
127 name := f.Name()
128 tmpWriter = f
129 var once sync.Once
130 defer once.Do(func() {
131 f.Close()
132 })
133 defer os.Remove(name)
134 getTmpReader = func() (readSeekCloser, int64, error) {
135 once.Do(func() {
136 f.Close()
137 })
138 f, err := os.Open(name)
139 if err != nil {
140 return nil, 0, err
141 }
142 st, err := f.Stat()
143 if err != nil {
144 return nil, 0, err
145 }
146 return f, st.Size(), nil
147 }
148 }
149 flush := func() error { return nil }
150 if !opts.Compress {
151 if !opts.InMemory {
152 // Insert buffer for writes.
153 buf := bufio.NewWriterSize(tmpWriter, 1<<20)
154 flush = buf.Flush
155 tmpWriter = buf
156 }
157 } else {
158 s2c := s2.NewWriter(tmpWriter, s2.WriterBetterCompression())
159 flush = s2c.Close
160 defer s2c.Close()
161 tmpWriter = s2c
162 }
163 t := tar.NewWriter(tmpWriter)
164
165objectLoop:
166 for {
167 select {
168 case <-ctx.Done():
169 return ctx.Err()
170 case obj, ok := <-objs:
171 if !ok {
172 break objectLoop
173 }
174
175 closeObj := func() {}
176 if obj.Close != nil {
177 closeObj = obj.Close
178 }
179
180 // Trim accidental slash prefix.
181 obj.Key = strings.TrimPrefix(obj.Key, "/")
182 header := tar.Header{
183 Typeflag: tar.TypeReg,
184 Name: obj.Key,
185 Size: obj.Size,
186 ModTime: obj.ModTime,
187 Format: tar.FormatPAX,
188 }
189 if header.ModTime.IsZero() {
190 header.ModTime = time.Now().UTC()
191 }
192
193 header.PAXRecords = make(map[string]string)
194 if obj.VersionID != "" {
195 header.PAXRecords["minio.versionId"] = obj.VersionID
196 }
197 for k, vals := range obj.Headers {
198 header.PAXRecords["minio.metadata."+k] = strings.Join(vals, ",")
199 }
200
201 if err := t.WriteHeader(&header); err != nil {
202 closeObj()
203 return err
204 }
205 n, err := io.Copy(t, obj.Content)
206 if err != nil {
207 closeObj()
208 if opts.SkipErrs {
209 continue
210 }
211 return err
212 }
213 if n != obj.Size {
214 closeObj()
215 if opts.SkipErrs {
216 continue
217 }
218 return io.ErrUnexpectedEOF
219 }
220 closeObj()
221 }
222 }
223 // Flush tar
224 err = t.Flush()
225 if err != nil {
226 return err
227 }
228 // Flush compression
229 err = flush()
230 if err != nil {
231 return err
232 }
233 if opts.Opts.UserMetadata == nil {
234 opts.Opts.UserMetadata = map[string]string{}
235 }
236 opts.Opts.UserMetadata["X-Amz-Meta-Snowball-Auto-Extract"] = "true"
237 opts.Opts.DisableMultipart = true
238 rc, sz, err := getTmpReader()
239 if err != nil {
240 return err
241 }
242 defer rc.Close()
243 rand := c.random.Uint64()
244 _, err = c.PutObject(ctx, bucketName, fmt.Sprintf("snowball-upload-%x.tar", rand), rc, sz, opts.Opts)
245 return err
246}
diff --git a/vendor/github.com/minio/minio-go/v7/api-remove.go b/vendor/github.com/minio/minio-go/v7/api-remove.go
new file mode 100644
index 0000000..9c0ac44
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/api-remove.go
@@ -0,0 +1,548 @@
1/*
2 * MinIO Go Library for Amazon S3 Compatible Cloud Storage
3 * Copyright 2015-2020 MinIO, Inc.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18package minio
19
20import (
21 "bytes"
22 "context"
23 "encoding/xml"
24 "io"
25 "net/http"
26 "net/url"
27 "time"
28
29 "github.com/minio/minio-go/v7/pkg/s3utils"
30)
31
32//revive:disable
33
34// Deprecated: BucketOptions will be renamed to RemoveBucketOptions in future versions.
35type BucketOptions = RemoveBucketOptions
36
37//revive:enable
38
39// RemoveBucketOptions special headers to purge buckets, only
40// useful when endpoint is MinIO
41type RemoveBucketOptions struct {
42 ForceDelete bool
43}
44
45// RemoveBucketWithOptions deletes the bucket name.
46//
47// All objects (including all object versions and delete markers)
48// in the bucket will be deleted forcibly if bucket options set
49// ForceDelete to 'true'.
50func (c *Client) RemoveBucketWithOptions(ctx context.Context, bucketName string, opts RemoveBucketOptions) error {
51 // Input validation.
52 if err := s3utils.CheckValidBucketName(bucketName); err != nil {
53 return err
54 }
55
56 // Build headers.
57 headers := make(http.Header)
58 if opts.ForceDelete {
59 headers.Set(minIOForceDelete, "true")
60 }
61
62 // Execute DELETE on bucket.
63 resp, err := c.executeMethod(ctx, http.MethodDelete, requestMetadata{
64 bucketName: bucketName,
65 contentSHA256Hex: emptySHA256Hex,
66 customHeader: headers,
67 })
68 defer closeResponse(resp)
69 if err != nil {
70 return err
71 }
72 if resp != nil {
73 if resp.StatusCode != http.StatusNoContent {
74 return httpRespToErrorResponse(resp, bucketName, "")
75 }
76 }
77
78 // Remove the location from cache on a successful delete.
79 c.bucketLocCache.Delete(bucketName)
80 return nil
81}
82
83// RemoveBucket deletes the bucket name.
84//
85// All objects (including all object versions and delete markers).
86// in the bucket must be deleted before successfully attempting this request.
87func (c *Client) RemoveBucket(ctx context.Context, bucketName string) error {
88 // Input validation.
89 if err := s3utils.CheckValidBucketName(bucketName); err != nil {
90 return err
91 }
92 // Execute DELETE on bucket.
93 resp, err := c.executeMethod(ctx, http.MethodDelete, requestMetadata{
94 bucketName: bucketName,
95 contentSHA256Hex: emptySHA256Hex,
96 })
97 defer closeResponse(resp)
98 if err != nil {
99 return err
100 }
101 if resp != nil {
102 if resp.StatusCode != http.StatusNoContent {
103 return httpRespToErrorResponse(resp, bucketName, "")
104 }
105 }
106
107 // Remove the location from cache on a successful delete.
108 c.bucketLocCache.Delete(bucketName)
109
110 return nil
111}
112
113// AdvancedRemoveOptions intended for internal use by replication
114type AdvancedRemoveOptions struct {
115 ReplicationDeleteMarker bool
116 ReplicationStatus ReplicationStatus
117 ReplicationMTime time.Time
118 ReplicationRequest bool
119 ReplicationValidityCheck bool // check permissions
120}
121
122// RemoveObjectOptions represents options specified by user for RemoveObject call
123type RemoveObjectOptions struct {
124 ForceDelete bool
125 GovernanceBypass bool
126 VersionID string
127 Internal AdvancedRemoveOptions
128}
129
130// RemoveObject removes an object from a bucket.
131func (c *Client) RemoveObject(ctx context.Context, bucketName, objectName string, opts RemoveObjectOptions) error {
132 // Input validation.
133 if err := s3utils.CheckValidBucketName(bucketName); err != nil {
134 return err
135 }
136 if err := s3utils.CheckValidObjectName(objectName); err != nil {
137 return err
138 }
139
140 res := c.removeObject(ctx, bucketName, objectName, opts)
141 return res.Err
142}
143
144func (c *Client) removeObject(ctx context.Context, bucketName, objectName string, opts RemoveObjectOptions) RemoveObjectResult {
145 // Get resources properly escaped and lined up before
146 // using them in http request.
147 urlValues := make(url.Values)
148
149 if opts.VersionID != "" {
150 urlValues.Set("versionId", opts.VersionID)
151 }
152
153 // Build headers.
154 headers := make(http.Header)
155
156 if opts.GovernanceBypass {
157 // Set the bypass goverenance retention header
158 headers.Set(amzBypassGovernance, "true")
159 }
160 if opts.Internal.ReplicationDeleteMarker {
161 headers.Set(minIOBucketReplicationDeleteMarker, "true")
162 }
163 if !opts.Internal.ReplicationMTime.IsZero() {
164 headers.Set(minIOBucketSourceMTime, opts.Internal.ReplicationMTime.Format(time.RFC3339Nano))
165 }
166 if !opts.Internal.ReplicationStatus.Empty() {
167 headers.Set(amzBucketReplicationStatus, string(opts.Internal.ReplicationStatus))
168 }
169 if opts.Internal.ReplicationRequest {
170 headers.Set(minIOBucketReplicationRequest, "true")
171 }
172 if opts.Internal.ReplicationValidityCheck {
173 headers.Set(minIOBucketReplicationCheck, "true")
174 }
175 if opts.ForceDelete {
176 headers.Set(minIOForceDelete, "true")
177 }
178 // Execute DELETE on objectName.
179 resp, err := c.executeMethod(ctx, http.MethodDelete, requestMetadata{
180 bucketName: bucketName,
181 objectName: objectName,
182 contentSHA256Hex: emptySHA256Hex,
183 queryValues: urlValues,
184 customHeader: headers,
185 })
186 defer closeResponse(resp)
187 if err != nil {
188 return RemoveObjectResult{Err: err}
189 }
190 if resp != nil {
191 // if some unexpected error happened and max retry is reached, we want to let client know
192 if resp.StatusCode != http.StatusNoContent {
193 err := httpRespToErrorResponse(resp, bucketName, objectName)
194 return RemoveObjectResult{Err: err}
195 }
196 }
197
198 // DeleteObject always responds with http '204' even for
199 // objects which do not exist. So no need to handle them
200 // specifically.
201 return RemoveObjectResult{
202 ObjectName: objectName,
203 ObjectVersionID: opts.VersionID,
204 DeleteMarker: resp.Header.Get("x-amz-delete-marker") == "true",
205 DeleteMarkerVersionID: resp.Header.Get("x-amz-version-id"),
206 }
207}
208
209// RemoveObjectError - container of Multi Delete S3 API error
210type RemoveObjectError struct {
211 ObjectName string
212 VersionID string
213 Err error
214}
215
216// RemoveObjectResult - container of Multi Delete S3 API result
217type RemoveObjectResult struct {
218 ObjectName string
219 ObjectVersionID string
220
221 DeleteMarker bool
222 DeleteMarkerVersionID string
223
224 Err error
225}
226
227// generateRemoveMultiObjects - generate the XML request for remove multi objects request
228func generateRemoveMultiObjectsRequest(objects []ObjectInfo) []byte {
229 delObjects := []deleteObject{}
230 for _, obj := range objects {
231 delObjects = append(delObjects, deleteObject{
232 Key: obj.Key,
233 VersionID: obj.VersionID,
234 })
235 }
236 xmlBytes, _ := xml.Marshal(deleteMultiObjects{Objects: delObjects, Quiet: false})
237 return xmlBytes
238}
239
240// processRemoveMultiObjectsResponse - parse the remove multi objects web service
241// and return the success/failure result status for each object
242func processRemoveMultiObjectsResponse(body io.Reader, resultCh chan<- RemoveObjectResult) {
243 // Parse multi delete XML response
244 rmResult := &deleteMultiObjectsResult{}
245 err := xmlDecoder(body, rmResult)
246 if err != nil {
247 resultCh <- RemoveObjectResult{ObjectName: "", Err: err}
248 return
249 }
250
251 // Fill deletion that returned success
252 for _, obj := range rmResult.DeletedObjects {
253 resultCh <- RemoveObjectResult{
254 ObjectName: obj.Key,
255 // Only filled with versioned buckets
256 ObjectVersionID: obj.VersionID,
257 DeleteMarker: obj.DeleteMarker,
258 DeleteMarkerVersionID: obj.DeleteMarkerVersionID,
259 }
260 }
261
262 // Fill deletion that returned an error.
263 for _, obj := range rmResult.UnDeletedObjects {
264 // Version does not exist is not an error ignore and continue.
265 switch obj.Code {
266 case "InvalidArgument", "NoSuchVersion":
267 continue
268 }
269 resultCh <- RemoveObjectResult{
270 ObjectName: obj.Key,
271 ObjectVersionID: obj.VersionID,
272 Err: ErrorResponse{
273 Code: obj.Code,
274 Message: obj.Message,
275 },
276 }
277 }
278}
279
280// RemoveObjectsOptions represents options specified by user for RemoveObjects call
281type RemoveObjectsOptions struct {
282 GovernanceBypass bool
283}
284
285// RemoveObjects removes multiple objects from a bucket while
286// it is possible to specify objects versions which are received from
287// objectsCh. Remove failures are sent back via error channel.
288func (c *Client) RemoveObjects(ctx context.Context, bucketName string, objectsCh <-chan ObjectInfo, opts RemoveObjectsOptions) <-chan RemoveObjectError {
289 errorCh := make(chan RemoveObjectError, 1)
290
291 // Validate if bucket name is valid.
292 if err := s3utils.CheckValidBucketName(bucketName); err != nil {
293 defer close(errorCh)
294 errorCh <- RemoveObjectError{
295 Err: err,
296 }
297 return errorCh
298 }
299 // Validate objects channel to be properly allocated.
300 if objectsCh == nil {
301 defer close(errorCh)
302 errorCh <- RemoveObjectError{
303 Err: errInvalidArgument("Objects channel cannot be nil"),
304 }
305 return errorCh
306 }
307
308 resultCh := make(chan RemoveObjectResult, 1)
309 go c.removeObjects(ctx, bucketName, objectsCh, resultCh, opts)
310 go func() {
311 defer close(errorCh)
312 for res := range resultCh {
313 // Send only errors to the error channel
314 if res.Err == nil {
315 continue
316 }
317 errorCh <- RemoveObjectError{
318 ObjectName: res.ObjectName,
319 VersionID: res.ObjectVersionID,
320 Err: res.Err,
321 }
322 }
323 }()
324
325 return errorCh
326}
327
328// RemoveObjectsWithResult removes multiple objects from a bucket while
329// it is possible to specify objects versions which are received from
330// objectsCh. Remove results, successes and failures are sent back via
331// RemoveObjectResult channel
332func (c *Client) RemoveObjectsWithResult(ctx context.Context, bucketName string, objectsCh <-chan ObjectInfo, opts RemoveObjectsOptions) <-chan RemoveObjectResult {
333 resultCh := make(chan RemoveObjectResult, 1)
334
335 // Validate if bucket name is valid.
336 if err := s3utils.CheckValidBucketName(bucketName); err != nil {
337 defer close(resultCh)
338 resultCh <- RemoveObjectResult{
339 Err: err,
340 }
341 return resultCh
342 }
343 // Validate objects channel to be properly allocated.
344 if objectsCh == nil {
345 defer close(resultCh)
346 resultCh <- RemoveObjectResult{
347 Err: errInvalidArgument("Objects channel cannot be nil"),
348 }
349 return resultCh
350 }
351
352 go c.removeObjects(ctx, bucketName, objectsCh, resultCh, opts)
353 return resultCh
354}
355
356// Return true if the character is within the allowed characters in an XML 1.0 document
357// The list of allowed characters can be found here: https://www.w3.org/TR/xml/#charsets
358func validXMLChar(r rune) (ok bool) {
359 return r == 0x09 ||
360 r == 0x0A ||
361 r == 0x0D ||
362 r >= 0x20 && r <= 0xD7FF ||
363 r >= 0xE000 && r <= 0xFFFD ||
364 r >= 0x10000 && r <= 0x10FFFF
365}
366
367func hasInvalidXMLChar(str string) bool {
368 for _, s := range str {
369 if !validXMLChar(s) {
370 return true
371 }
372 }
373 return false
374}
375
376// Generate and call MultiDelete S3 requests based on entries received from objectsCh
377func (c *Client) removeObjects(ctx context.Context, bucketName string, objectsCh <-chan ObjectInfo, resultCh chan<- RemoveObjectResult, opts RemoveObjectsOptions) {
378 maxEntries := 1000
379 finish := false
380 urlValues := make(url.Values)
381 urlValues.Set("delete", "")
382
383 // Close result channel when Multi delete finishes.
384 defer close(resultCh)
385
386 // Loop over entries by 1000 and call MultiDelete requests
387 for {
388 if finish {
389 break
390 }
391 count := 0
392 var batch []ObjectInfo
393
394 // Try to gather 1000 entries
395 for object := range objectsCh {
396 if hasInvalidXMLChar(object.Key) {
397 // Use single DELETE so the object name will be in the request URL instead of the multi-delete XML document.
398 removeResult := c.removeObject(ctx, bucketName, object.Key, RemoveObjectOptions{
399 VersionID: object.VersionID,
400 GovernanceBypass: opts.GovernanceBypass,
401 })
402 if err := removeResult.Err; err != nil {
403 // Version does not exist is not an error ignore and continue.
404 switch ToErrorResponse(err).Code {
405 case "InvalidArgument", "NoSuchVersion":
406 continue
407 }
408 resultCh <- removeResult
409 }
410
411 resultCh <- removeResult
412 continue
413 }
414
415 batch = append(batch, object)
416 if count++; count >= maxEntries {
417 break
418 }
419 }
420 if count == 0 {
421 // Multi Objects Delete API doesn't accept empty object list, quit immediately
422 break
423 }
424 if count < maxEntries {
425 // We didn't have 1000 entries, so this is the last batch
426 finish = true
427 }
428
429 // Build headers.
430 headers := make(http.Header)
431 if opts.GovernanceBypass {
432 // Set the bypass goverenance retention header
433 headers.Set(amzBypassGovernance, "true")
434 }
435
436 // Generate remove multi objects XML request
437 removeBytes := generateRemoveMultiObjectsRequest(batch)
438 // Execute GET on bucket to list objects.
439 resp, err := c.executeMethod(ctx, http.MethodPost, requestMetadata{
440 bucketName: bucketName,
441 queryValues: urlValues,
442 contentBody: bytes.NewReader(removeBytes),
443 contentLength: int64(len(removeBytes)),
444 contentMD5Base64: sumMD5Base64(removeBytes),
445 contentSHA256Hex: sum256Hex(removeBytes),
446 customHeader: headers,
447 })
448 if resp != nil {
449 if resp.StatusCode != http.StatusOK {
450 e := httpRespToErrorResponse(resp, bucketName, "")
451 resultCh <- RemoveObjectResult{ObjectName: "", Err: e}
452 }
453 }
454 if err != nil {
455 for _, b := range batch {
456 resultCh <- RemoveObjectResult{
457 ObjectName: b.Key,
458 ObjectVersionID: b.VersionID,
459 Err: err,
460 }
461 }
462 continue
463 }
464
465 // Process multiobjects remove xml response
466 processRemoveMultiObjectsResponse(resp.Body, resultCh)
467
468 closeResponse(resp)
469 }
470}
471
472// RemoveIncompleteUpload aborts an partially uploaded object.
473func (c *Client) RemoveIncompleteUpload(ctx context.Context, bucketName, objectName string) error {
474 // Input validation.
475 if err := s3utils.CheckValidBucketName(bucketName); err != nil {
476 return err
477 }
478 if err := s3utils.CheckValidObjectName(objectName); err != nil {
479 return err
480 }
481 // Find multipart upload ids of the object to be aborted.
482 uploadIDs, err := c.findUploadIDs(ctx, bucketName, objectName)
483 if err != nil {
484 return err
485 }
486
487 for _, uploadID := range uploadIDs {
488 // abort incomplete multipart upload, based on the upload id passed.
489 err := c.abortMultipartUpload(ctx, bucketName, objectName, uploadID)
490 if err != nil {
491 return err
492 }
493 }
494
495 return nil
496}
497
498// abortMultipartUpload aborts a multipart upload for the given
499// uploadID, all previously uploaded parts are deleted.
500func (c *Client) abortMultipartUpload(ctx context.Context, bucketName, objectName, uploadID string) error {
501 // Input validation.
502 if err := s3utils.CheckValidBucketName(bucketName); err != nil {
503 return err
504 }
505 if err := s3utils.CheckValidObjectName(objectName); err != nil {
506 return err
507 }
508
509 // Initialize url queries.
510 urlValues := make(url.Values)
511 urlValues.Set("uploadId", uploadID)
512
513 // Execute DELETE on multipart upload.
514 resp, err := c.executeMethod(ctx, http.MethodDelete, requestMetadata{
515 bucketName: bucketName,
516 objectName: objectName,
517 queryValues: urlValues,
518 contentSHA256Hex: emptySHA256Hex,
519 })
520 defer closeResponse(resp)
521 if err != nil {
522 return err
523 }
524 if resp != nil {
525 if resp.StatusCode != http.StatusNoContent {
526 // Abort has no response body, handle it for any errors.
527 var errorResponse ErrorResponse
528 switch resp.StatusCode {
529 case http.StatusNotFound:
530 // This is needed specifically for abort and it cannot
531 // be converged into default case.
532 errorResponse = ErrorResponse{
533 Code: "NoSuchUpload",
534 Message: "The specified multipart upload does not exist.",
535 BucketName: bucketName,
536 Key: objectName,
537 RequestID: resp.Header.Get("x-amz-request-id"),
538 HostID: resp.Header.Get("x-amz-id-2"),
539 Region: resp.Header.Get("x-amz-bucket-region"),
540 }
541 default:
542 return httpRespToErrorResponse(resp, bucketName, objectName)
543 }
544 return errorResponse
545 }
546 }
547 return nil
548}
diff --git a/vendor/github.com/minio/minio-go/v7/api-restore.go b/vendor/github.com/minio/minio-go/v7/api-restore.go
new file mode 100644
index 0000000..9ec8f4f
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/api-restore.go
@@ -0,0 +1,182 @@
1/*
2 * MinIO Go Library for Amazon S3 Compatible Cloud Storage
3 * (C) 2018-2021 MinIO, Inc.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18package minio
19
20import (
21 "bytes"
22 "context"
23 "encoding/xml"
24 "net/http"
25 "net/url"
26
27 "github.com/minio/minio-go/v7/pkg/s3utils"
28 "github.com/minio/minio-go/v7/pkg/tags"
29)
30
31// RestoreType represents the restore request type
32type RestoreType string
33
34const (
35 // RestoreSelect represents the restore SELECT operation
36 RestoreSelect = RestoreType("SELECT")
37)
38
39// TierType represents a retrieval tier
40type TierType string
41
42const (
43 // TierStandard is the standard retrieval tier
44 TierStandard = TierType("Standard")
45 // TierBulk is the bulk retrieval tier
46 TierBulk = TierType("Bulk")
47 // TierExpedited is the expedited retrieval tier
48 TierExpedited = TierType("Expedited")
49)
50
51// GlacierJobParameters represents the retrieval tier parameter
52type GlacierJobParameters struct {
53 Tier TierType
54}
55
56// Encryption contains the type of server-side encryption used during object retrieval
57type Encryption struct {
58 EncryptionType string
59 KMSContext string
60 KMSKeyID string `xml:"KMSKeyId"`
61}
62
63// MetadataEntry represents a metadata information of the restored object.
64type MetadataEntry struct {
65 Name string
66 Value string
67}
68
69// S3 holds properties of the copy of the archived object
70type S3 struct {
71 AccessControlList *AccessControlList `xml:"AccessControlList,omitempty"`
72 BucketName string
73 Prefix string
74 CannedACL *string `xml:"CannedACL,omitempty"`
75 Encryption *Encryption `xml:"Encryption,omitempty"`
76 StorageClass *string `xml:"StorageClass,omitempty"`
77 Tagging *tags.Tags `xml:"Tagging,omitempty"`
78 UserMetadata *MetadataEntry `xml:"UserMetadata,omitempty"`
79}
80
81// SelectParameters holds the select request parameters
82type SelectParameters struct {
83 XMLName xml.Name `xml:"SelectParameters"`
84 ExpressionType QueryExpressionType
85 Expression string
86 InputSerialization SelectObjectInputSerialization
87 OutputSerialization SelectObjectOutputSerialization
88}
89
90// OutputLocation holds properties of the copy of the archived object
91type OutputLocation struct {
92 XMLName xml.Name `xml:"OutputLocation"`
93 S3 S3 `xml:"S3"`
94}
95
96// RestoreRequest holds properties of the restore object request
97type RestoreRequest struct {
98 XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ RestoreRequest"`
99 Type *RestoreType `xml:"Type,omitempty"`
100 Tier *TierType `xml:"Tier,omitempty"`
101 Days *int `xml:"Days,omitempty"`
102 GlacierJobParameters *GlacierJobParameters `xml:"GlacierJobParameters,omitempty"`
103 Description *string `xml:"Description,omitempty"`
104 SelectParameters *SelectParameters `xml:"SelectParameters,omitempty"`
105 OutputLocation *OutputLocation `xml:"OutputLocation,omitempty"`
106}
107
108// SetDays sets the days parameter of the restore request
109func (r *RestoreRequest) SetDays(v int) {
110 r.Days = &v
111}
112
113// SetGlacierJobParameters sets the GlacierJobParameters of the restore request
114func (r *RestoreRequest) SetGlacierJobParameters(v GlacierJobParameters) {
115 r.GlacierJobParameters = &v
116}
117
118// SetType sets the type of the restore request
119func (r *RestoreRequest) SetType(v RestoreType) {
120 r.Type = &v
121}
122
123// SetTier sets the retrieval tier of the restore request
124func (r *RestoreRequest) SetTier(v TierType) {
125 r.Tier = &v
126}
127
128// SetDescription sets the description of the restore request
129func (r *RestoreRequest) SetDescription(v string) {
130 r.Description = &v
131}
132
133// SetSelectParameters sets SelectParameters of the restore select request
134func (r *RestoreRequest) SetSelectParameters(v SelectParameters) {
135 r.SelectParameters = &v
136}
137
138// SetOutputLocation sets the properties of the copy of the archived object
139func (r *RestoreRequest) SetOutputLocation(v OutputLocation) {
140 r.OutputLocation = &v
141}
142
143// RestoreObject is a implementation of https://docs.aws.amazon.com/AmazonS3/latest/API/API_RestoreObject.html AWS S3 API
144func (c *Client) RestoreObject(ctx context.Context, bucketName, objectName, versionID string, req RestoreRequest) error {
145 // Input validation.
146 if err := s3utils.CheckValidBucketName(bucketName); err != nil {
147 return err
148 }
149 if err := s3utils.CheckValidObjectName(objectName); err != nil {
150 return err
151 }
152
153 restoreRequestBytes, err := xml.Marshal(req)
154 if err != nil {
155 return err
156 }
157
158 urlValues := make(url.Values)
159 urlValues.Set("restore", "")
160 if versionID != "" {
161 urlValues.Set("versionId", versionID)
162 }
163
164 // Execute POST on bucket/object.
165 resp, err := c.executeMethod(ctx, http.MethodPost, requestMetadata{
166 bucketName: bucketName,
167 objectName: objectName,
168 queryValues: urlValues,
169 contentMD5Base64: sumMD5Base64(restoreRequestBytes),
170 contentSHA256Hex: sum256Hex(restoreRequestBytes),
171 contentBody: bytes.NewReader(restoreRequestBytes),
172 contentLength: int64(len(restoreRequestBytes)),
173 })
174 defer closeResponse(resp)
175 if err != nil {
176 return err
177 }
178 if resp.StatusCode != http.StatusAccepted && resp.StatusCode != http.StatusOK {
179 return httpRespToErrorResponse(resp, bucketName, "")
180 }
181 return nil
182}
diff --git a/vendor/github.com/minio/minio-go/v7/api-s3-datatypes.go b/vendor/github.com/minio/minio-go/v7/api-s3-datatypes.go
new file mode 100644
index 0000000..1527b74
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/api-s3-datatypes.go
@@ -0,0 +1,390 @@
1/*
2 * MinIO Go Library for Amazon S3 Compatible Cloud Storage
3 * Copyright 2015-2020 MinIO, Inc.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18package minio
19
20import (
21 "encoding/xml"
22 "errors"
23 "io"
24 "reflect"
25 "time"
26)
27
28// listAllMyBucketsResult container for listBuckets response.
29type listAllMyBucketsResult struct {
30 // Container for one or more buckets.
31 Buckets struct {
32 Bucket []BucketInfo
33 }
34 Owner owner
35}
36
37// owner container for bucket owner information.
38type owner struct {
39 DisplayName string
40 ID string
41}
42
43// CommonPrefix container for prefix response.
44type CommonPrefix struct {
45 Prefix string
46}
47
48// ListBucketV2Result container for listObjects response version 2.
49type ListBucketV2Result struct {
50 // A response can contain CommonPrefixes only if you have
51 // specified a delimiter.
52 CommonPrefixes []CommonPrefix
53 // Metadata about each object returned.
54 Contents []ObjectInfo
55 Delimiter string
56
57 // Encoding type used to encode object keys in the response.
58 EncodingType string
59
60 // A flag that indicates whether or not ListObjects returned all of the results
61 // that satisfied the search criteria.
62 IsTruncated bool
63 MaxKeys int64
64 Name string
65
66 // Hold the token that will be sent in the next request to fetch the next group of keys
67 NextContinuationToken string
68
69 ContinuationToken string
70 Prefix string
71
72 // FetchOwner and StartAfter are currently not used
73 FetchOwner string
74 StartAfter string
75}
76
77// Version is an element in the list object versions response
78type Version struct {
79 ETag string
80 IsLatest bool
81 Key string
82 LastModified time.Time
83 Owner Owner
84 Size int64
85 StorageClass string
86 VersionID string `xml:"VersionId"`
87
88 // x-amz-meta-* headers stripped "x-amz-meta-" prefix containing the first value.
89 // Only returned by MinIO servers.
90 UserMetadata StringMap `json:"userMetadata,omitempty"`
91
92 // x-amz-tagging values in their k/v values.
93 // Only returned by MinIO servers.
94 UserTags URLMap `json:"userTags,omitempty" xml:"UserTags"`
95
96 Internal *struct {
97 K int // Data blocks
98 M int // Parity blocks
99 } `xml:"Internal"`
100
101 isDeleteMarker bool
102}
103
104// ListVersionsResult is an element in the list object versions response
105// and has a special Unmarshaler because we need to preserver the order
106// of <Version> and <DeleteMarker> in ListVersionsResult.Versions slice
107type ListVersionsResult struct {
108 Versions []Version
109
110 CommonPrefixes []CommonPrefix
111 Name string
112 Prefix string
113 Delimiter string
114 MaxKeys int64
115 EncodingType string
116 IsTruncated bool
117 KeyMarker string
118 VersionIDMarker string
119 NextKeyMarker string
120 NextVersionIDMarker string
121}
122
123// UnmarshalXML is a custom unmarshal code for the response of ListObjectVersions, the custom
124// code will unmarshal <Version> and <DeleteMarker> tags and save them in Versions field to
125// preserve the lexical order of the listing.
126func (l *ListVersionsResult) UnmarshalXML(d *xml.Decoder, _ xml.StartElement) (err error) {
127 for {
128 // Read tokens from the XML document in a stream.
129 t, err := d.Token()
130 if err != nil {
131 if err == io.EOF {
132 break
133 }
134 return err
135 }
136
137 se, ok := t.(xml.StartElement)
138 if ok {
139 tagName := se.Name.Local
140 switch tagName {
141 case "Name", "Prefix",
142 "Delimiter", "EncodingType",
143 "KeyMarker", "NextKeyMarker":
144 var s string
145 if err = d.DecodeElement(&s, &se); err != nil {
146 return err
147 }
148 v := reflect.ValueOf(l).Elem().FieldByName(tagName)
149 if v.IsValid() {
150 v.SetString(s)
151 }
152 case "VersionIdMarker":
153 // VersionIdMarker is a special case because of 'Id' instead of 'ID' in field name
154 var s string
155 if err = d.DecodeElement(&s, &se); err != nil {
156 return err
157 }
158 l.VersionIDMarker = s
159 case "NextVersionIdMarker":
160 // NextVersionIdMarker is a special case because of 'Id' instead of 'ID' in field name
161 var s string
162 if err = d.DecodeElement(&s, &se); err != nil {
163 return err
164 }
165 l.NextVersionIDMarker = s
166 case "IsTruncated": // bool
167 var b bool
168 if err = d.DecodeElement(&b, &se); err != nil {
169 return err
170 }
171 l.IsTruncated = b
172 case "MaxKeys": // int64
173 var i int64
174 if err = d.DecodeElement(&i, &se); err != nil {
175 return err
176 }
177 l.MaxKeys = i
178 case "CommonPrefixes":
179 var cp CommonPrefix
180 if err = d.DecodeElement(&cp, &se); err != nil {
181 return err
182 }
183 l.CommonPrefixes = append(l.CommonPrefixes, cp)
184 case "DeleteMarker", "Version":
185 var v Version
186 if err = d.DecodeElement(&v, &se); err != nil {
187 return err
188 }
189 if tagName == "DeleteMarker" {
190 v.isDeleteMarker = true
191 }
192 l.Versions = append(l.Versions, v)
193 default:
194 return errors.New("unrecognized option:" + tagName)
195 }
196
197 }
198 }
199 return nil
200}
201
202// ListBucketResult container for listObjects response.
203type ListBucketResult struct {
204 // A response can contain CommonPrefixes only if you have
205 // specified a delimiter.
206 CommonPrefixes []CommonPrefix
207 // Metadata about each object returned.
208 Contents []ObjectInfo
209 Delimiter string
210
211 // Encoding type used to encode object keys in the response.
212 EncodingType string
213
214 // A flag that indicates whether or not ListObjects returned all of the results
215 // that satisfied the search criteria.
216 IsTruncated bool
217 Marker string
218 MaxKeys int64
219 Name string
220
221 // When response is truncated (the IsTruncated element value in
222 // the response is true), you can use the key name in this field
223 // as marker in the subsequent request to get next set of objects.
224 // Object storage lists objects in alphabetical order Note: This
225 // element is returned only if you have delimiter request
226 // parameter specified. If response does not include the NextMaker
227 // and it is truncated, you can use the value of the last Key in
228 // the response as the marker in the subsequent request to get the
229 // next set of object keys.
230 NextMarker string
231 Prefix string
232}
233
234// ListMultipartUploadsResult container for ListMultipartUploads response
235type ListMultipartUploadsResult struct {
236 Bucket string
237 KeyMarker string
238 UploadIDMarker string `xml:"UploadIdMarker"`
239 NextKeyMarker string
240 NextUploadIDMarker string `xml:"NextUploadIdMarker"`
241 EncodingType string
242 MaxUploads int64
243 IsTruncated bool
244 Uploads []ObjectMultipartInfo `xml:"Upload"`
245 Prefix string
246 Delimiter string
247 // A response can contain CommonPrefixes only if you specify a delimiter.
248 CommonPrefixes []CommonPrefix
249}
250
251// initiator container for who initiated multipart upload.
252type initiator struct {
253 ID string
254 DisplayName string
255}
256
257// copyObjectResult container for copy object response.
258type copyObjectResult struct {
259 ETag string
260 LastModified time.Time // time string format "2006-01-02T15:04:05.000Z"
261}
262
263// ObjectPart container for particular part of an object.
264type ObjectPart struct {
265 // Part number identifies the part.
266 PartNumber int
267
268 // Date and time the part was uploaded.
269 LastModified time.Time
270
271 // Entity tag returned when the part was uploaded, usually md5sum
272 // of the part.
273 ETag string
274
275 // Size of the uploaded part data.
276 Size int64
277
278 // Checksum values of each part.
279 ChecksumCRC32 string
280 ChecksumCRC32C string
281 ChecksumSHA1 string
282 ChecksumSHA256 string
283}
284
285// ListObjectPartsResult container for ListObjectParts response.
286type ListObjectPartsResult struct {
287 Bucket string
288 Key string
289 UploadID string `xml:"UploadId"`
290
291 Initiator initiator
292 Owner owner
293
294 StorageClass string
295 PartNumberMarker int
296 NextPartNumberMarker int
297 MaxParts int
298
299 // Indicates whether the returned list of parts is truncated.
300 IsTruncated bool
301 ObjectParts []ObjectPart `xml:"Part"`
302
303 EncodingType string
304}
305
306// initiateMultipartUploadResult container for InitiateMultiPartUpload
307// response.
308type initiateMultipartUploadResult struct {
309 Bucket string
310 Key string
311 UploadID string `xml:"UploadId"`
312}
313
314// completeMultipartUploadResult container for completed multipart
315// upload response.
316type completeMultipartUploadResult struct {
317 Location string
318 Bucket string
319 Key string
320 ETag string
321
322 // Checksum values, hash of hashes of parts.
323 ChecksumCRC32 string
324 ChecksumCRC32C string
325 ChecksumSHA1 string
326 ChecksumSHA256 string
327}
328
329// CompletePart sub container lists individual part numbers and their
330// md5sum, part of completeMultipartUpload.
331type CompletePart struct {
332 // Part number identifies the part.
333 PartNumber int
334 ETag string
335
336 // Checksum values
337 ChecksumCRC32 string `xml:"ChecksumCRC32,omitempty"`
338 ChecksumCRC32C string `xml:"ChecksumCRC32C,omitempty"`
339 ChecksumSHA1 string `xml:"ChecksumSHA1,omitempty"`
340 ChecksumSHA256 string `xml:"ChecksumSHA256,omitempty"`
341}
342
343// completeMultipartUpload container for completing multipart upload.
344type completeMultipartUpload struct {
345 XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ CompleteMultipartUpload" json:"-"`
346 Parts []CompletePart `xml:"Part"`
347}
348
349// createBucketConfiguration container for bucket configuration.
350type createBucketConfiguration struct {
351 XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ CreateBucketConfiguration" json:"-"`
352 Location string `xml:"LocationConstraint"`
353}
354
355// deleteObject container for Delete element in MultiObjects Delete XML request
356type deleteObject struct {
357 Key string
358 VersionID string `xml:"VersionId,omitempty"`
359}
360
361// deletedObject container for Deleted element in MultiObjects Delete XML response
362type deletedObject struct {
363 Key string
364 VersionID string `xml:"VersionId,omitempty"`
365 // These fields are ignored.
366 DeleteMarker bool
367 DeleteMarkerVersionID string `xml:"DeleteMarkerVersionId,omitempty"`
368}
369
370// nonDeletedObject container for Error element (failed deletion) in MultiObjects Delete XML response
371type nonDeletedObject struct {
372 Key string
373 Code string
374 Message string
375 VersionID string `xml:"VersionId"`
376}
377
378// deletedMultiObjects container for MultiObjects Delete XML request
379type deleteMultiObjects struct {
380 XMLName xml.Name `xml:"Delete"`
381 Quiet bool
382 Objects []deleteObject `xml:"Object"`
383}
384
385// deletedMultiObjectsResult container for MultiObjects Delete XML response
386type deleteMultiObjectsResult struct {
387 XMLName xml.Name `xml:"DeleteResult"`
388 DeletedObjects []deletedObject `xml:"Deleted"`
389 UnDeletedObjects []nonDeletedObject `xml:"Error"`
390}
diff --git a/vendor/github.com/minio/minio-go/v7/api-select.go b/vendor/github.com/minio/minio-go/v7/api-select.go
new file mode 100644
index 0000000..628d967
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/api-select.go
@@ -0,0 +1,757 @@
1/*
2 * MinIO Go Library for Amazon S3 Compatible Cloud Storage
3 * (C) 2018-2020 MinIO, Inc.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18package minio
19
20import (
21 "bytes"
22 "context"
23 "encoding/binary"
24 "encoding/xml"
25 "errors"
26 "fmt"
27 "hash"
28 "hash/crc32"
29 "io"
30 "net/http"
31 "net/url"
32 "strings"
33
34 "github.com/minio/minio-go/v7/pkg/encrypt"
35 "github.com/minio/minio-go/v7/pkg/s3utils"
36)
37
38// CSVFileHeaderInfo - is the parameter for whether to utilize headers.
39type CSVFileHeaderInfo string
40
41// Constants for file header info.
42const (
43 CSVFileHeaderInfoNone CSVFileHeaderInfo = "NONE"
44 CSVFileHeaderInfoIgnore CSVFileHeaderInfo = "IGNORE"
45 CSVFileHeaderInfoUse CSVFileHeaderInfo = "USE"
46)
47
48// SelectCompressionType - is the parameter for what type of compression is
49// present
50type SelectCompressionType string
51
52// Constants for compression types under select API.
53const (
54 SelectCompressionNONE SelectCompressionType = "NONE"
55 SelectCompressionGZIP SelectCompressionType = "GZIP"
56 SelectCompressionBZIP SelectCompressionType = "BZIP2"
57
58 // Non-standard compression schemes, supported by MinIO hosts:
59
60 SelectCompressionZSTD SelectCompressionType = "ZSTD" // Zstandard compression.
61 SelectCompressionLZ4 SelectCompressionType = "LZ4" // LZ4 Stream
62 SelectCompressionS2 SelectCompressionType = "S2" // S2 Stream
63 SelectCompressionSNAPPY SelectCompressionType = "SNAPPY" // Snappy stream
64)
65
66// CSVQuoteFields - is the parameter for how CSV fields are quoted.
67type CSVQuoteFields string
68
69// Constants for csv quote styles.
70const (
71 CSVQuoteFieldsAlways CSVQuoteFields = "Always"
72 CSVQuoteFieldsAsNeeded CSVQuoteFields = "AsNeeded"
73)
74
75// QueryExpressionType - is of what syntax the expression is, this should only
76// be SQL
77type QueryExpressionType string
78
79// Constants for expression type.
80const (
81 QueryExpressionTypeSQL QueryExpressionType = "SQL"
82)
83
84// JSONType determines json input serialization type.
85type JSONType string
86
87// Constants for JSONTypes.
88const (
89 JSONDocumentType JSONType = "DOCUMENT"
90 JSONLinesType JSONType = "LINES"
91)
92
93// ParquetInputOptions parquet input specific options
94type ParquetInputOptions struct{}
95
96// CSVInputOptions csv input specific options
97type CSVInputOptions struct {
98 FileHeaderInfo CSVFileHeaderInfo
99 fileHeaderInfoSet bool
100
101 RecordDelimiter string
102 recordDelimiterSet bool
103
104 FieldDelimiter string
105 fieldDelimiterSet bool
106
107 QuoteCharacter string
108 quoteCharacterSet bool
109
110 QuoteEscapeCharacter string
111 quoteEscapeCharacterSet bool
112
113 Comments string
114 commentsSet bool
115}
116
117// SetFileHeaderInfo sets the file header info in the CSV input options
118func (c *CSVInputOptions) SetFileHeaderInfo(val CSVFileHeaderInfo) {
119 c.FileHeaderInfo = val
120 c.fileHeaderInfoSet = true
121}
122
123// SetRecordDelimiter sets the record delimiter in the CSV input options
124func (c *CSVInputOptions) SetRecordDelimiter(val string) {
125 c.RecordDelimiter = val
126 c.recordDelimiterSet = true
127}
128
129// SetFieldDelimiter sets the field delimiter in the CSV input options
130func (c *CSVInputOptions) SetFieldDelimiter(val string) {
131 c.FieldDelimiter = val
132 c.fieldDelimiterSet = true
133}
134
135// SetQuoteCharacter sets the quote character in the CSV input options
136func (c *CSVInputOptions) SetQuoteCharacter(val string) {
137 c.QuoteCharacter = val
138 c.quoteCharacterSet = true
139}
140
141// SetQuoteEscapeCharacter sets the quote escape character in the CSV input options
142func (c *CSVInputOptions) SetQuoteEscapeCharacter(val string) {
143 c.QuoteEscapeCharacter = val
144 c.quoteEscapeCharacterSet = true
145}
146
147// SetComments sets the comments character in the CSV input options
148func (c *CSVInputOptions) SetComments(val string) {
149 c.Comments = val
150 c.commentsSet = true
151}
152
153// MarshalXML - produces the xml representation of the CSV input options struct
154func (c CSVInputOptions) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
155 if err := e.EncodeToken(start); err != nil {
156 return err
157 }
158 if c.FileHeaderInfo != "" || c.fileHeaderInfoSet {
159 if err := e.EncodeElement(c.FileHeaderInfo, xml.StartElement{Name: xml.Name{Local: "FileHeaderInfo"}}); err != nil {
160 return err
161 }
162 }
163
164 if c.RecordDelimiter != "" || c.recordDelimiterSet {
165 if err := e.EncodeElement(c.RecordDelimiter, xml.StartElement{Name: xml.Name{Local: "RecordDelimiter"}}); err != nil {
166 return err
167 }
168 }
169
170 if c.FieldDelimiter != "" || c.fieldDelimiterSet {
171 if err := e.EncodeElement(c.FieldDelimiter, xml.StartElement{Name: xml.Name{Local: "FieldDelimiter"}}); err != nil {
172 return err
173 }
174 }
175
176 if c.QuoteCharacter != "" || c.quoteCharacterSet {
177 if err := e.EncodeElement(c.QuoteCharacter, xml.StartElement{Name: xml.Name{Local: "QuoteCharacter"}}); err != nil {
178 return err
179 }
180 }
181
182 if c.QuoteEscapeCharacter != "" || c.quoteEscapeCharacterSet {
183 if err := e.EncodeElement(c.QuoteEscapeCharacter, xml.StartElement{Name: xml.Name{Local: "QuoteEscapeCharacter"}}); err != nil {
184 return err
185 }
186 }
187
188 if c.Comments != "" || c.commentsSet {
189 if err := e.EncodeElement(c.Comments, xml.StartElement{Name: xml.Name{Local: "Comments"}}); err != nil {
190 return err
191 }
192 }
193
194 return e.EncodeToken(xml.EndElement{Name: start.Name})
195}
196
197// CSVOutputOptions csv output specific options
198type CSVOutputOptions struct {
199 QuoteFields CSVQuoteFields
200 quoteFieldsSet bool
201
202 RecordDelimiter string
203 recordDelimiterSet bool
204
205 FieldDelimiter string
206 fieldDelimiterSet bool
207
208 QuoteCharacter string
209 quoteCharacterSet bool
210
211 QuoteEscapeCharacter string
212 quoteEscapeCharacterSet bool
213}
214
215// SetQuoteFields sets the quote field parameter in the CSV output options
216func (c *CSVOutputOptions) SetQuoteFields(val CSVQuoteFields) {
217 c.QuoteFields = val
218 c.quoteFieldsSet = true
219}
220
221// SetRecordDelimiter sets the record delimiter character in the CSV output options
222func (c *CSVOutputOptions) SetRecordDelimiter(val string) {
223 c.RecordDelimiter = val
224 c.recordDelimiterSet = true
225}
226
227// SetFieldDelimiter sets the field delimiter character in the CSV output options
228func (c *CSVOutputOptions) SetFieldDelimiter(val string) {
229 c.FieldDelimiter = val
230 c.fieldDelimiterSet = true
231}
232
233// SetQuoteCharacter sets the quote character in the CSV output options
234func (c *CSVOutputOptions) SetQuoteCharacter(val string) {
235 c.QuoteCharacter = val
236 c.quoteCharacterSet = true
237}
238
239// SetQuoteEscapeCharacter sets the quote escape character in the CSV output options
240func (c *CSVOutputOptions) SetQuoteEscapeCharacter(val string) {
241 c.QuoteEscapeCharacter = val
242 c.quoteEscapeCharacterSet = true
243}
244
245// MarshalXML - produces the xml representation of the CSVOutputOptions struct
246func (c CSVOutputOptions) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
247 if err := e.EncodeToken(start); err != nil {
248 return err
249 }
250
251 if c.QuoteFields != "" || c.quoteFieldsSet {
252 if err := e.EncodeElement(c.QuoteFields, xml.StartElement{Name: xml.Name{Local: "QuoteFields"}}); err != nil {
253 return err
254 }
255 }
256
257 if c.RecordDelimiter != "" || c.recordDelimiterSet {
258 if err := e.EncodeElement(c.RecordDelimiter, xml.StartElement{Name: xml.Name{Local: "RecordDelimiter"}}); err != nil {
259 return err
260 }
261 }
262
263 if c.FieldDelimiter != "" || c.fieldDelimiterSet {
264 if err := e.EncodeElement(c.FieldDelimiter, xml.StartElement{Name: xml.Name{Local: "FieldDelimiter"}}); err != nil {
265 return err
266 }
267 }
268
269 if c.QuoteCharacter != "" || c.quoteCharacterSet {
270 if err := e.EncodeElement(c.QuoteCharacter, xml.StartElement{Name: xml.Name{Local: "QuoteCharacter"}}); err != nil {
271 return err
272 }
273 }
274
275 if c.QuoteEscapeCharacter != "" || c.quoteEscapeCharacterSet {
276 if err := e.EncodeElement(c.QuoteEscapeCharacter, xml.StartElement{Name: xml.Name{Local: "QuoteEscapeCharacter"}}); err != nil {
277 return err
278 }
279 }
280
281 return e.EncodeToken(xml.EndElement{Name: start.Name})
282}
283
284// JSONInputOptions json input specific options
285type JSONInputOptions struct {
286 Type JSONType
287 typeSet bool
288}
289
290// SetType sets the JSON type in the JSON input options
291func (j *JSONInputOptions) SetType(typ JSONType) {
292 j.Type = typ
293 j.typeSet = true
294}
295
296// MarshalXML - produces the xml representation of the JSONInputOptions struct
297func (j JSONInputOptions) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
298 if err := e.EncodeToken(start); err != nil {
299 return err
300 }
301
302 if j.Type != "" || j.typeSet {
303 if err := e.EncodeElement(j.Type, xml.StartElement{Name: xml.Name{Local: "Type"}}); err != nil {
304 return err
305 }
306 }
307
308 return e.EncodeToken(xml.EndElement{Name: start.Name})
309}
310
311// JSONOutputOptions - json output specific options
312type JSONOutputOptions struct {
313 RecordDelimiter string
314 recordDelimiterSet bool
315}
316
317// SetRecordDelimiter sets the record delimiter in the JSON output options
318func (j *JSONOutputOptions) SetRecordDelimiter(val string) {
319 j.RecordDelimiter = val
320 j.recordDelimiterSet = true
321}
322
323// MarshalXML - produces the xml representation of the JSONOutputOptions struct
324func (j JSONOutputOptions) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
325 if err := e.EncodeToken(start); err != nil {
326 return err
327 }
328
329 if j.RecordDelimiter != "" || j.recordDelimiterSet {
330 if err := e.EncodeElement(j.RecordDelimiter, xml.StartElement{Name: xml.Name{Local: "RecordDelimiter"}}); err != nil {
331 return err
332 }
333 }
334
335 return e.EncodeToken(xml.EndElement{Name: start.Name})
336}
337
338// SelectObjectInputSerialization - input serialization parameters
339type SelectObjectInputSerialization struct {
340 CompressionType SelectCompressionType `xml:"CompressionType,omitempty"`
341 Parquet *ParquetInputOptions `xml:"Parquet,omitempty"`
342 CSV *CSVInputOptions `xml:"CSV,omitempty"`
343 JSON *JSONInputOptions `xml:"JSON,omitempty"`
344}
345
346// SelectObjectOutputSerialization - output serialization parameters.
347type SelectObjectOutputSerialization struct {
348 CSV *CSVOutputOptions `xml:"CSV,omitempty"`
349 JSON *JSONOutputOptions `xml:"JSON,omitempty"`
350}
351
352// SelectObjectOptions - represents the input select body
353type SelectObjectOptions struct {
354 XMLName xml.Name `xml:"SelectObjectContentRequest" json:"-"`
355 ServerSideEncryption encrypt.ServerSide `xml:"-"`
356 Expression string
357 ExpressionType QueryExpressionType
358 InputSerialization SelectObjectInputSerialization
359 OutputSerialization SelectObjectOutputSerialization
360 RequestProgress struct {
361 Enabled bool
362 }
363}
364
365// Header returns the http.Header representation of the SelectObject options.
366func (o SelectObjectOptions) Header() http.Header {
367 headers := make(http.Header)
368 if o.ServerSideEncryption != nil && o.ServerSideEncryption.Type() == encrypt.SSEC {
369 o.ServerSideEncryption.Marshal(headers)
370 }
371 return headers
372}
373
374// SelectObjectType - is the parameter which defines what type of object the
375// operation is being performed on.
376type SelectObjectType string
377
378// Constants for input data types.
379const (
380 SelectObjectTypeCSV SelectObjectType = "CSV"
381 SelectObjectTypeJSON SelectObjectType = "JSON"
382 SelectObjectTypeParquet SelectObjectType = "Parquet"
383)
384
385// preludeInfo is used for keeping track of necessary information from the
386// prelude.
387type preludeInfo struct {
388 totalLen uint32
389 headerLen uint32
390}
391
392// SelectResults is used for the streaming responses from the server.
393type SelectResults struct {
394 pipeReader *io.PipeReader
395 resp *http.Response
396 stats *StatsMessage
397 progress *ProgressMessage
398}
399
400// ProgressMessage is a struct for progress xml message.
401type ProgressMessage struct {
402 XMLName xml.Name `xml:"Progress" json:"-"`
403 StatsMessage
404}
405
406// StatsMessage is a struct for stat xml message.
407type StatsMessage struct {
408 XMLName xml.Name `xml:"Stats" json:"-"`
409 BytesScanned int64
410 BytesProcessed int64
411 BytesReturned int64
412}
413
414// messageType represents the type of message.
415type messageType string
416
417const (
418 errorMsg messageType = "error"
419 commonMsg messageType = "event"
420)
421
422// eventType represents the type of event.
423type eventType string
424
425// list of event-types returned by Select API.
426const (
427 endEvent eventType = "End"
428 recordsEvent eventType = "Records"
429 progressEvent eventType = "Progress"
430 statsEvent eventType = "Stats"
431)
432
433// contentType represents content type of event.
434type contentType string
435
436const (
437 xmlContent contentType = "text/xml"
438)
439
440// SelectObjectContent is a implementation of http://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectSELECTContent.html AWS S3 API.
441func (c *Client) SelectObjectContent(ctx context.Context, bucketName, objectName string, opts SelectObjectOptions) (*SelectResults, error) {
442 // Input validation.
443 if err := s3utils.CheckValidBucketName(bucketName); err != nil {
444 return nil, err
445 }
446 if err := s3utils.CheckValidObjectName(objectName); err != nil {
447 return nil, err
448 }
449
450 selectReqBytes, err := xml.Marshal(opts)
451 if err != nil {
452 return nil, err
453 }
454
455 urlValues := make(url.Values)
456 urlValues.Set("select", "")
457 urlValues.Set("select-type", "2")
458
459 // Execute POST on bucket/object.
460 resp, err := c.executeMethod(ctx, http.MethodPost, requestMetadata{
461 bucketName: bucketName,
462 objectName: objectName,
463 queryValues: urlValues,
464 customHeader: opts.Header(),
465 contentMD5Base64: sumMD5Base64(selectReqBytes),
466 contentSHA256Hex: sum256Hex(selectReqBytes),
467 contentBody: bytes.NewReader(selectReqBytes),
468 contentLength: int64(len(selectReqBytes)),
469 })
470 if err != nil {
471 return nil, err
472 }
473
474 return NewSelectResults(resp, bucketName)
475}
476
477// NewSelectResults creates a Select Result parser that parses the response
478// and returns a Reader that will return parsed and assembled select output.
479func NewSelectResults(resp *http.Response, bucketName string) (*SelectResults, error) {
480 if resp.StatusCode != http.StatusOK {
481 return nil, httpRespToErrorResponse(resp, bucketName, "")
482 }
483
484 pipeReader, pipeWriter := io.Pipe()
485 streamer := &SelectResults{
486 resp: resp,
487 stats: &StatsMessage{},
488 progress: &ProgressMessage{},
489 pipeReader: pipeReader,
490 }
491 streamer.start(pipeWriter)
492 return streamer, nil
493}
494
495// Close - closes the underlying response body and the stream reader.
496func (s *SelectResults) Close() error {
497 defer closeResponse(s.resp)
498 return s.pipeReader.Close()
499}
500
501// Read - is a reader compatible implementation for SelectObjectContent records.
502func (s *SelectResults) Read(b []byte) (n int, err error) {
503 return s.pipeReader.Read(b)
504}
505
506// Stats - information about a request's stats when processing is complete.
507func (s *SelectResults) Stats() *StatsMessage {
508 return s.stats
509}
510
511// Progress - information about the progress of a request.
512func (s *SelectResults) Progress() *ProgressMessage {
513 return s.progress
514}
515
516// start is the main function that decodes the large byte array into
517// several events that are sent through the eventstream.
518func (s *SelectResults) start(pipeWriter *io.PipeWriter) {
519 go func() {
520 for {
521 var prelude preludeInfo
522 headers := make(http.Header)
523 var err error
524
525 // Create CRC code
526 crc := crc32.New(crc32.IEEETable)
527 crcReader := io.TeeReader(s.resp.Body, crc)
528
529 // Extract the prelude(12 bytes) into a struct to extract relevant information.
530 prelude, err = processPrelude(crcReader, crc)
531 if err != nil {
532 pipeWriter.CloseWithError(err)
533 closeResponse(s.resp)
534 return
535 }
536
537 // Extract the headers(variable bytes) into a struct to extract relevant information
538 if prelude.headerLen > 0 {
539 if err = extractHeader(io.LimitReader(crcReader, int64(prelude.headerLen)), headers); err != nil {
540 pipeWriter.CloseWithError(err)
541 closeResponse(s.resp)
542 return
543 }
544 }
545
546 // Get the actual payload length so that the appropriate amount of
547 // bytes can be read or parsed.
548 payloadLen := prelude.PayloadLen()
549
550 m := messageType(headers.Get("message-type"))
551
552 switch m {
553 case errorMsg:
554 pipeWriter.CloseWithError(errors.New(headers.Get("error-code") + ":\"" + headers.Get("error-message") + "\""))
555 closeResponse(s.resp)
556 return
557 case commonMsg:
558 // Get content-type of the payload.
559 c := contentType(headers.Get("content-type"))
560
561 // Get event type of the payload.
562 e := eventType(headers.Get("event-type"))
563
564 // Handle all supported events.
565 switch e {
566 case endEvent:
567 pipeWriter.Close()
568 closeResponse(s.resp)
569 return
570 case recordsEvent:
571 if _, err = io.Copy(pipeWriter, io.LimitReader(crcReader, payloadLen)); err != nil {
572 pipeWriter.CloseWithError(err)
573 closeResponse(s.resp)
574 return
575 }
576 case progressEvent:
577 switch c {
578 case xmlContent:
579 if err = xmlDecoder(io.LimitReader(crcReader, payloadLen), s.progress); err != nil {
580 pipeWriter.CloseWithError(err)
581 closeResponse(s.resp)
582 return
583 }
584 default:
585 pipeWriter.CloseWithError(fmt.Errorf("Unexpected content-type %s sent for event-type %s", c, progressEvent))
586 closeResponse(s.resp)
587 return
588 }
589 case statsEvent:
590 switch c {
591 case xmlContent:
592 if err = xmlDecoder(io.LimitReader(crcReader, payloadLen), s.stats); err != nil {
593 pipeWriter.CloseWithError(err)
594 closeResponse(s.resp)
595 return
596 }
597 default:
598 pipeWriter.CloseWithError(fmt.Errorf("Unexpected content-type %s sent for event-type %s", c, statsEvent))
599 closeResponse(s.resp)
600 return
601 }
602 }
603 }
604
605 // Ensures that the full message's CRC is correct and
606 // that the message is not corrupted
607 if err := checkCRC(s.resp.Body, crc.Sum32()); err != nil {
608 pipeWriter.CloseWithError(err)
609 closeResponse(s.resp)
610 return
611 }
612
613 }
614 }()
615}
616
617// PayloadLen is a function that calculates the length of the payload.
618func (p preludeInfo) PayloadLen() int64 {
619 return int64(p.totalLen - p.headerLen - 16)
620}
621
622// processPrelude is the function that reads the 12 bytes of the prelude and
623// ensures the CRC is correct while also extracting relevant information into
624// the struct,
625func processPrelude(prelude io.Reader, crc hash.Hash32) (preludeInfo, error) {
626 var err error
627 pInfo := preludeInfo{}
628
629 // reads total length of the message (first 4 bytes)
630 pInfo.totalLen, err = extractUint32(prelude)
631 if err != nil {
632 return pInfo, err
633 }
634
635 // reads total header length of the message (2nd 4 bytes)
636 pInfo.headerLen, err = extractUint32(prelude)
637 if err != nil {
638 return pInfo, err
639 }
640
641 // checks that the CRC is correct (3rd 4 bytes)
642 preCRC := crc.Sum32()
643 if err := checkCRC(prelude, preCRC); err != nil {
644 return pInfo, err
645 }
646
647 return pInfo, nil
648}
649
650// extracts the relevant information from the Headers.
651func extractHeader(body io.Reader, myHeaders http.Header) error {
652 for {
653 // extracts the first part of the header,
654 headerTypeName, err := extractHeaderType(body)
655 if err != nil {
656 // Since end of file, we have read all of our headers
657 if err == io.EOF {
658 break
659 }
660 return err
661 }
662
663 // reads the 7 present in the header and ignores it.
664 extractUint8(body)
665
666 headerValueName, err := extractHeaderValue(body)
667 if err != nil {
668 return err
669 }
670
671 myHeaders.Set(headerTypeName, headerValueName)
672
673 }
674 return nil
675}
676
677// extractHeaderType extracts the first half of the header message, the header type.
678func extractHeaderType(body io.Reader) (string, error) {
679 // extracts 2 bit integer
680 headerNameLen, err := extractUint8(body)
681 if err != nil {
682 return "", err
683 }
684 // extracts the string with the appropriate number of bytes
685 headerName, err := extractString(body, int(headerNameLen))
686 if err != nil {
687 return "", err
688 }
689 return strings.TrimPrefix(headerName, ":"), nil
690}
691
692// extractsHeaderValue extracts the second half of the header message, the
693// header value
694func extractHeaderValue(body io.Reader) (string, error) {
695 bodyLen, err := extractUint16(body)
696 if err != nil {
697 return "", err
698 }
699 bodyName, err := extractString(body, int(bodyLen))
700 if err != nil {
701 return "", err
702 }
703 return bodyName, nil
704}
705
706// extracts a string from byte array of a particular number of bytes.
707func extractString(source io.Reader, lenBytes int) (string, error) {
708 myVal := make([]byte, lenBytes)
709 _, err := source.Read(myVal)
710 if err != nil {
711 return "", err
712 }
713 return string(myVal), nil
714}
715
716// extractUint32 extracts a 4 byte integer from the byte array.
717func extractUint32(r io.Reader) (uint32, error) {
718 buf := make([]byte, 4)
719 _, err := readFull(r, buf)
720 if err != nil {
721 return 0, err
722 }
723 return binary.BigEndian.Uint32(buf), nil
724}
725
726// extractUint16 extracts a 2 byte integer from the byte array.
727func extractUint16(r io.Reader) (uint16, error) {
728 buf := make([]byte, 2)
729 _, err := readFull(r, buf)
730 if err != nil {
731 return 0, err
732 }
733 return binary.BigEndian.Uint16(buf), nil
734}
735
736// extractUint8 extracts a 1 byte integer from the byte array.
737func extractUint8(r io.Reader) (uint8, error) {
738 buf := make([]byte, 1)
739 _, err := readFull(r, buf)
740 if err != nil {
741 return 0, err
742 }
743 return buf[0], nil
744}
745
746// checkCRC ensures that the CRC matches with the one from the reader.
747func checkCRC(r io.Reader, expect uint32) error {
748 msgCRC, err := extractUint32(r)
749 if err != nil {
750 return err
751 }
752
753 if msgCRC != expect {
754 return fmt.Errorf("Checksum Mismatch, MessageCRC of 0x%X does not equal expected CRC of 0x%X", msgCRC, expect)
755 }
756 return nil
757}
diff --git a/vendor/github.com/minio/minio-go/v7/api-stat.go b/vendor/github.com/minio/minio-go/v7/api-stat.go
new file mode 100644
index 0000000..b043dc4
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/api-stat.go
@@ -0,0 +1,116 @@
1/*
2 * MinIO Go Library for Amazon S3 Compatible Cloud Storage
3 * Copyright 2015-2020 MinIO, Inc.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18package minio
19
20import (
21 "context"
22 "net/http"
23
24 "github.com/minio/minio-go/v7/pkg/s3utils"
25)
26
27// BucketExists verifies if bucket exists and you have permission to access it. Allows for a Context to
28// control cancellations and timeouts.
29func (c *Client) BucketExists(ctx context.Context, bucketName string) (bool, error) {
30 // Input validation.
31 if err := s3utils.CheckValidBucketName(bucketName); err != nil {
32 return false, err
33 }
34
35 // Execute HEAD on bucketName.
36 resp, err := c.executeMethod(ctx, http.MethodHead, requestMetadata{
37 bucketName: bucketName,
38 contentSHA256Hex: emptySHA256Hex,
39 })
40 defer closeResponse(resp)
41 if err != nil {
42 if ToErrorResponse(err).Code == "NoSuchBucket" {
43 return false, nil
44 }
45 return false, err
46 }
47 if resp != nil {
48 resperr := httpRespToErrorResponse(resp, bucketName, "")
49 if ToErrorResponse(resperr).Code == "NoSuchBucket" {
50 return false, nil
51 }
52 if resp.StatusCode != http.StatusOK {
53 return false, httpRespToErrorResponse(resp, bucketName, "")
54 }
55 }
56 return true, nil
57}
58
59// StatObject verifies if object exists, you have permission to access it
60// and returns information about the object.
61func (c *Client) StatObject(ctx context.Context, bucketName, objectName string, opts StatObjectOptions) (ObjectInfo, error) {
62 // Input validation.
63 if err := s3utils.CheckValidBucketName(bucketName); err != nil {
64 return ObjectInfo{}, err
65 }
66 if err := s3utils.CheckValidObjectName(objectName); err != nil {
67 return ObjectInfo{}, err
68 }
69 headers := opts.Header()
70 if opts.Internal.ReplicationDeleteMarker {
71 headers.Set(minIOBucketReplicationDeleteMarker, "true")
72 }
73 if opts.Internal.IsReplicationReadyForDeleteMarker {
74 headers.Set(isMinioTgtReplicationReady, "true")
75 }
76
77 // Execute HEAD on objectName.
78 resp, err := c.executeMethod(ctx, http.MethodHead, requestMetadata{
79 bucketName: bucketName,
80 objectName: objectName,
81 queryValues: opts.toQueryValues(),
82 contentSHA256Hex: emptySHA256Hex,
83 customHeader: headers,
84 })
85 defer closeResponse(resp)
86 if err != nil {
87 return ObjectInfo{}, err
88 }
89
90 if resp != nil {
91 deleteMarker := resp.Header.Get(amzDeleteMarker) == "true"
92 replicationReady := resp.Header.Get(minioTgtReplicationReady) == "true"
93 if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusPartialContent {
94 if resp.StatusCode == http.StatusMethodNotAllowed && opts.VersionID != "" && deleteMarker {
95 errResp := ErrorResponse{
96 StatusCode: resp.StatusCode,
97 Code: "MethodNotAllowed",
98 Message: "The specified method is not allowed against this resource.",
99 BucketName: bucketName,
100 Key: objectName,
101 }
102 return ObjectInfo{
103 VersionID: resp.Header.Get(amzVersionID),
104 IsDeleteMarker: deleteMarker,
105 }, errResp
106 }
107 return ObjectInfo{
108 VersionID: resp.Header.Get(amzVersionID),
109 IsDeleteMarker: deleteMarker,
110 ReplicationReady: replicationReady, // whether delete marker can be replicated
111 }, httpRespToErrorResponse(resp, bucketName, objectName)
112 }
113 }
114
115 return ToObjectInfo(bucketName, objectName, resp.Header)
116}
diff --git a/vendor/github.com/minio/minio-go/v7/api.go b/vendor/github.com/minio/minio-go/v7/api.go
new file mode 100644
index 0000000..f8a9b34
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/api.go
@@ -0,0 +1,995 @@
1/*
2 * MinIO Go Library for Amazon S3 Compatible Cloud Storage
3 * Copyright 2015-2023 MinIO, Inc.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18package minio
19
20import (
21 "bytes"
22 "context"
23 "encoding/base64"
24 "errors"
25 "fmt"
26 "hash/crc32"
27 "io"
28 "math/rand"
29 "net"
30 "net/http"
31 "net/http/cookiejar"
32 "net/http/httptrace"
33 "net/http/httputil"
34 "net/url"
35 "os"
36 "runtime"
37 "strings"
38 "sync"
39 "sync/atomic"
40 "time"
41
42 md5simd "github.com/minio/md5-simd"
43 "github.com/minio/minio-go/v7/pkg/credentials"
44 "github.com/minio/minio-go/v7/pkg/s3utils"
45 "github.com/minio/minio-go/v7/pkg/signer"
46 "golang.org/x/net/publicsuffix"
47)
48
49// Client implements Amazon S3 compatible methods.
50type Client struct {
51 // Standard options.
52
53 // Parsed endpoint url provided by the user.
54 endpointURL *url.URL
55
56 // Holds various credential providers.
57 credsProvider *credentials.Credentials
58
59 // Custom signerType value overrides all credentials.
60 overrideSignerType credentials.SignatureType
61
62 // User supplied.
63 appInfo struct {
64 appName string
65 appVersion string
66 }
67
68 // Indicate whether we are using https or not
69 secure bool
70
71 // Needs allocation.
72 httpClient *http.Client
73 httpTrace *httptrace.ClientTrace
74 bucketLocCache *bucketLocationCache
75
76 // Advanced functionality.
77 isTraceEnabled bool
78 traceErrorsOnly bool
79 traceOutput io.Writer
80
81 // S3 specific accelerated endpoint.
82 s3AccelerateEndpoint string
83
84 // Region endpoint
85 region string
86
87 // Random seed.
88 random *rand.Rand
89
90 // lookup indicates type of url lookup supported by server. If not specified,
91 // default to Auto.
92 lookup BucketLookupType
93
94 // Factory for MD5 hash functions.
95 md5Hasher func() md5simd.Hasher
96 sha256Hasher func() md5simd.Hasher
97
98 healthStatus int32
99
100 trailingHeaderSupport bool
101}
102
103// Options for New method
104type Options struct {
105 Creds *credentials.Credentials
106 Secure bool
107 Transport http.RoundTripper
108 Trace *httptrace.ClientTrace
109 Region string
110 BucketLookup BucketLookupType
111
112 // Allows setting a custom region lookup based on URL pattern
113 // not all URL patterns are covered by this library so if you
114 // have a custom endpoints with many regions you can use this
115 // function to perform region lookups appropriately.
116 CustomRegionViaURL func(u url.URL) string
117
118 // TrailingHeaders indicates server support of trailing headers.
119 // Only supported for v4 signatures.
120 TrailingHeaders bool
121
122 // Custom hash routines. Leave nil to use standard.
123 CustomMD5 func() md5simd.Hasher
124 CustomSHA256 func() md5simd.Hasher
125}
126
127// Global constants.
128const (
129 libraryName = "minio-go"
130 libraryVersion = "v7.0.66"
131)
132
133// User Agent should always following the below style.
134// Please open an issue to discuss any new changes here.
135//
136// MinIO (OS; ARCH) LIB/VER APP/VER
137const (
138 libraryUserAgentPrefix = "MinIO (" + runtime.GOOS + "; " + runtime.GOARCH + ") "
139 libraryUserAgent = libraryUserAgentPrefix + libraryName + "/" + libraryVersion
140)
141
142// BucketLookupType is type of url lookup supported by server.
143type BucketLookupType int
144
145// Different types of url lookup supported by the server.Initialized to BucketLookupAuto
146const (
147 BucketLookupAuto BucketLookupType = iota
148 BucketLookupDNS
149 BucketLookupPath
150)
151
152// New - instantiate minio client with options
153func New(endpoint string, opts *Options) (*Client, error) {
154 if opts == nil {
155 return nil, errors.New("no options provided")
156 }
157 clnt, err := privateNew(endpoint, opts)
158 if err != nil {
159 return nil, err
160 }
161 // If Amazon S3 set to signature v4.
162 if s3utils.IsAmazonEndpoint(*clnt.endpointURL) {
163 clnt.overrideSignerType = credentials.SignatureV4
164 }
165
166 return clnt, nil
167}
168
169// EndpointURL returns the URL of the S3 endpoint.
170func (c *Client) EndpointURL() *url.URL {
171 endpoint := *c.endpointURL // copy to prevent callers from modifying internal state
172 return &endpoint
173}
174
175// lockedRandSource provides protected rand source, implements rand.Source interface.
176type lockedRandSource struct {
177 lk sync.Mutex
178 src rand.Source
179}
180
181// Int63 returns a non-negative pseudo-random 63-bit integer as an int64.
182func (r *lockedRandSource) Int63() (n int64) {
183 r.lk.Lock()
184 n = r.src.Int63()
185 r.lk.Unlock()
186 return
187}
188
189// Seed uses the provided seed value to initialize the generator to a
190// deterministic state.
191func (r *lockedRandSource) Seed(seed int64) {
192 r.lk.Lock()
193 r.src.Seed(seed)
194 r.lk.Unlock()
195}
196
197func privateNew(endpoint string, opts *Options) (*Client, error) {
198 // construct endpoint.
199 endpointURL, err := getEndpointURL(endpoint, opts.Secure)
200 if err != nil {
201 return nil, err
202 }
203
204 // Initialize cookies to preserve server sent cookies if any and replay
205 // them upon each request.
206 jar, err := cookiejar.New(&cookiejar.Options{PublicSuffixList: publicsuffix.List})
207 if err != nil {
208 return nil, err
209 }
210
211 // instantiate new Client.
212 clnt := new(Client)
213
214 // Save the credentials.
215 clnt.credsProvider = opts.Creds
216
217 // Remember whether we are using https or not
218 clnt.secure = opts.Secure
219
220 // Save endpoint URL, user agent for future uses.
221 clnt.endpointURL = endpointURL
222
223 transport := opts.Transport
224 if transport == nil {
225 transport, err = DefaultTransport(opts.Secure)
226 if err != nil {
227 return nil, err
228 }
229 }
230
231 clnt.httpTrace = opts.Trace
232
233 // Instantiate http client and bucket location cache.
234 clnt.httpClient = &http.Client{
235 Jar: jar,
236 Transport: transport,
237 CheckRedirect: func(req *http.Request, via []*http.Request) error {
238 return http.ErrUseLastResponse
239 },
240 }
241
242 // Sets custom region, if region is empty bucket location cache is used automatically.
243 if opts.Region == "" {
244 if opts.CustomRegionViaURL != nil {
245 opts.Region = opts.CustomRegionViaURL(*clnt.endpointURL)
246 } else {
247 opts.Region = s3utils.GetRegionFromURL(*clnt.endpointURL)
248 }
249 }
250 clnt.region = opts.Region
251
252 // Instantiate bucket location cache.
253 clnt.bucketLocCache = newBucketLocationCache()
254
255 // Introduce a new locked random seed.
256 clnt.random = rand.New(&lockedRandSource{src: rand.NewSource(time.Now().UTC().UnixNano())})
257
258 // Add default md5 hasher.
259 clnt.md5Hasher = opts.CustomMD5
260 clnt.sha256Hasher = opts.CustomSHA256
261 if clnt.md5Hasher == nil {
262 clnt.md5Hasher = newMd5Hasher
263 }
264 if clnt.sha256Hasher == nil {
265 clnt.sha256Hasher = newSHA256Hasher
266 }
267
268 clnt.trailingHeaderSupport = opts.TrailingHeaders && clnt.overrideSignerType.IsV4()
269
270 // Sets bucket lookup style, whether server accepts DNS or Path lookup. Default is Auto - determined
271 // by the SDK. When Auto is specified, DNS lookup is used for Amazon/Google cloud endpoints and Path for all other endpoints.
272 clnt.lookup = opts.BucketLookup
273
274 // healthcheck is not initialized
275 clnt.healthStatus = unknown
276
277 // Return.
278 return clnt, nil
279}
280
281// SetAppInfo - add application details to user agent.
282func (c *Client) SetAppInfo(appName, appVersion string) {
283 // if app name and version not set, we do not set a new user agent.
284 if appName != "" && appVersion != "" {
285 c.appInfo.appName = appName
286 c.appInfo.appVersion = appVersion
287 }
288}
289
290// TraceOn - enable HTTP tracing.
291func (c *Client) TraceOn(outputStream io.Writer) {
292 // if outputStream is nil then default to os.Stdout.
293 if outputStream == nil {
294 outputStream = os.Stdout
295 }
296 // Sets a new output stream.
297 c.traceOutput = outputStream
298
299 // Enable tracing.
300 c.isTraceEnabled = true
301}
302
303// TraceErrorsOnlyOn - same as TraceOn, but only errors will be traced.
304func (c *Client) TraceErrorsOnlyOn(outputStream io.Writer) {
305 c.TraceOn(outputStream)
306 c.traceErrorsOnly = true
307}
308
309// TraceErrorsOnlyOff - Turns off the errors only tracing and everything will be traced after this call.
310// If all tracing needs to be turned off, call TraceOff().
311func (c *Client) TraceErrorsOnlyOff() {
312 c.traceErrorsOnly = false
313}
314
315// TraceOff - disable HTTP tracing.
316func (c *Client) TraceOff() {
317 // Disable tracing.
318 c.isTraceEnabled = false
319 c.traceErrorsOnly = false
320}
321
322// SetS3TransferAccelerate - turns s3 accelerated endpoint on or off for all your
323// requests. This feature is only specific to S3 for all other endpoints this
324// function does nothing. To read further details on s3 transfer acceleration
325// please vist -
326// http://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html
327func (c *Client) SetS3TransferAccelerate(accelerateEndpoint string) {
328 if s3utils.IsAmazonEndpoint(*c.endpointURL) {
329 c.s3AccelerateEndpoint = accelerateEndpoint
330 }
331}
332
333// Hash materials provides relevant initialized hash algo writers
334// based on the expected signature type.
335//
336// - For signature v4 request if the connection is insecure compute only sha256.
337// - For signature v4 request if the connection is secure compute only md5.
338// - For anonymous request compute md5.
339func (c *Client) hashMaterials(isMd5Requested, isSha256Requested bool) (hashAlgos map[string]md5simd.Hasher, hashSums map[string][]byte) {
340 hashSums = make(map[string][]byte)
341 hashAlgos = make(map[string]md5simd.Hasher)
342 if c.overrideSignerType.IsV4() {
343 if c.secure {
344 hashAlgos["md5"] = c.md5Hasher()
345 } else {
346 if isSha256Requested {
347 hashAlgos["sha256"] = c.sha256Hasher()
348 }
349 }
350 } else {
351 if c.overrideSignerType.IsAnonymous() {
352 hashAlgos["md5"] = c.md5Hasher()
353 }
354 }
355 if isMd5Requested {
356 hashAlgos["md5"] = c.md5Hasher()
357 }
358 return hashAlgos, hashSums
359}
360
361const (
362 unknown = -1
363 offline = 0
364 online = 1
365)
366
367// IsOnline returns true if healthcheck enabled and client is online.
368// If HealthCheck function has not been called this will always return true.
369func (c *Client) IsOnline() bool {
370 return !c.IsOffline()
371}
372
373// sets online healthStatus to offline
374func (c *Client) markOffline() {
375 atomic.CompareAndSwapInt32(&c.healthStatus, online, offline)
376}
377
378// IsOffline returns true if healthcheck enabled and client is offline
379// If HealthCheck function has not been called this will always return false.
380func (c *Client) IsOffline() bool {
381 return atomic.LoadInt32(&c.healthStatus) == offline
382}
383
384// HealthCheck starts a healthcheck to see if endpoint is up.
385// Returns a context cancellation function, to stop the health check,
386// and an error if health check is already started.
387func (c *Client) HealthCheck(hcDuration time.Duration) (context.CancelFunc, error) {
388 if atomic.LoadInt32(&c.healthStatus) != unknown {
389 return nil, fmt.Errorf("health check is running")
390 }
391 if hcDuration < 1*time.Second {
392 return nil, fmt.Errorf("health check duration should be at least 1 second")
393 }
394 probeBucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "probe-health-")
395 ctx, cancelFn := context.WithCancel(context.Background())
396 atomic.StoreInt32(&c.healthStatus, offline)
397 {
398 // Change to online, if we can connect.
399 gctx, gcancel := context.WithTimeout(ctx, 3*time.Second)
400 _, err := c.getBucketLocation(gctx, probeBucketName)
401 gcancel()
402 if !IsNetworkOrHostDown(err, false) {
403 switch ToErrorResponse(err).Code {
404 case "NoSuchBucket", "AccessDenied", "":
405 atomic.CompareAndSwapInt32(&c.healthStatus, offline, online)
406 }
407 }
408 }
409
410 go func(duration time.Duration) {
411 timer := time.NewTimer(duration)
412 defer timer.Stop()
413 for {
414 select {
415 case <-ctx.Done():
416 atomic.StoreInt32(&c.healthStatus, unknown)
417 return
418 case <-timer.C:
419 // Do health check the first time and ONLY if the connection is marked offline
420 if c.IsOffline() {
421 gctx, gcancel := context.WithTimeout(context.Background(), 3*time.Second)
422 _, err := c.getBucketLocation(gctx, probeBucketName)
423 gcancel()
424 if !IsNetworkOrHostDown(err, false) {
425 switch ToErrorResponse(err).Code {
426 case "NoSuchBucket", "AccessDenied", "":
427 atomic.CompareAndSwapInt32(&c.healthStatus, offline, online)
428 }
429 }
430 }
431
432 timer.Reset(duration)
433 }
434 }
435 }(hcDuration)
436 return cancelFn, nil
437}
438
439// requestMetadata - is container for all the values to make a request.
440type requestMetadata struct {
441 // If set newRequest presigns the URL.
442 presignURL bool
443
444 // User supplied.
445 bucketName string
446 objectName string
447 queryValues url.Values
448 customHeader http.Header
449 extraPresignHeader http.Header
450 expires int64
451
452 // Generated by our internal code.
453 bucketLocation string
454 contentBody io.Reader
455 contentLength int64
456 contentMD5Base64 string // carries base64 encoded md5sum
457 contentSHA256Hex string // carries hex encoded sha256sum
458 streamSha256 bool
459 addCrc bool
460 trailer http.Header // (http.Request).Trailer. Requires v4 signature.
461}
462
463// dumpHTTP - dump HTTP request and response.
464func (c *Client) dumpHTTP(req *http.Request, resp *http.Response) error {
465 // Starts http dump.
466 _, err := fmt.Fprintln(c.traceOutput, "---------START-HTTP---------")
467 if err != nil {
468 return err
469 }
470
471 // Filter out Signature field from Authorization header.
472 origAuth := req.Header.Get("Authorization")
473 if origAuth != "" {
474 req.Header.Set("Authorization", redactSignature(origAuth))
475 }
476
477 // Only display request header.
478 reqTrace, err := httputil.DumpRequestOut(req, false)
479 if err != nil {
480 return err
481 }
482
483 // Write request to trace output.
484 _, err = fmt.Fprint(c.traceOutput, string(reqTrace))
485 if err != nil {
486 return err
487 }
488
489 // Only display response header.
490 var respTrace []byte
491
492 // For errors we make sure to dump response body as well.
493 if resp.StatusCode != http.StatusOK &&
494 resp.StatusCode != http.StatusPartialContent &&
495 resp.StatusCode != http.StatusNoContent {
496 respTrace, err = httputil.DumpResponse(resp, true)
497 if err != nil {
498 return err
499 }
500 } else {
501 respTrace, err = httputil.DumpResponse(resp, false)
502 if err != nil {
503 return err
504 }
505 }
506
507 // Write response to trace output.
508 _, err = fmt.Fprint(c.traceOutput, strings.TrimSuffix(string(respTrace), "\r\n"))
509 if err != nil {
510 return err
511 }
512
513 // Ends the http dump.
514 _, err = fmt.Fprintln(c.traceOutput, "---------END-HTTP---------")
515 if err != nil {
516 return err
517 }
518
519 // Returns success.
520 return nil
521}
522
523// do - execute http request.
524func (c *Client) do(req *http.Request) (resp *http.Response, err error) {
525 defer func() {
526 if IsNetworkOrHostDown(err, false) {
527 c.markOffline()
528 }
529 }()
530
531 resp, err = c.httpClient.Do(req)
532 if err != nil {
533 // Handle this specifically for now until future Golang versions fix this issue properly.
534 if urlErr, ok := err.(*url.Error); ok {
535 if strings.Contains(urlErr.Err.Error(), "EOF") {
536 return nil, &url.Error{
537 Op: urlErr.Op,
538 URL: urlErr.URL,
539 Err: errors.New("Connection closed by foreign host " + urlErr.URL + ". Retry again."),
540 }
541 }
542 }
543 return nil, err
544 }
545
546 // Response cannot be non-nil, report error if thats the case.
547 if resp == nil {
548 msg := "Response is empty. " + reportIssue
549 return nil, errInvalidArgument(msg)
550 }
551
552 // If trace is enabled, dump http request and response,
553 // except when the traceErrorsOnly enabled and the response's status code is ok
554 if c.isTraceEnabled && !(c.traceErrorsOnly && resp.StatusCode == http.StatusOK) {
555 err = c.dumpHTTP(req, resp)
556 if err != nil {
557 return nil, err
558 }
559 }
560
561 return resp, nil
562}
563
564// List of success status.
565var successStatus = []int{
566 http.StatusOK,
567 http.StatusNoContent,
568 http.StatusPartialContent,
569}
570
571// executeMethod - instantiates a given method, and retries the
572// request upon any error up to maxRetries attempts in a binomially
573// delayed manner using a standard back off algorithm.
574func (c *Client) executeMethod(ctx context.Context, method string, metadata requestMetadata) (res *http.Response, err error) {
575 if c.IsOffline() {
576 return nil, errors.New(c.endpointURL.String() + " is offline.")
577 }
578
579 var retryable bool // Indicates if request can be retried.
580 var bodySeeker io.Seeker // Extracted seeker from io.Reader.
581 reqRetry := MaxRetry // Indicates how many times we can retry the request
582
583 if metadata.contentBody != nil {
584 // Check if body is seekable then it is retryable.
585 bodySeeker, retryable = metadata.contentBody.(io.Seeker)
586 switch bodySeeker {
587 case os.Stdin, os.Stdout, os.Stderr:
588 retryable = false
589 }
590 // Retry only when reader is seekable
591 if !retryable {
592 reqRetry = 1
593 }
594
595 // Figure out if the body can be closed - if yes
596 // we will definitely close it upon the function
597 // return.
598 bodyCloser, ok := metadata.contentBody.(io.Closer)
599 if ok {
600 defer bodyCloser.Close()
601 }
602 }
603
604 // Create cancel context to control 'newRetryTimer' go routine.
605 retryCtx, cancel := context.WithCancel(ctx)
606
607 // Indicate to our routine to exit cleanly upon return.
608 defer cancel()
609
610 for range c.newRetryTimer(retryCtx, reqRetry, DefaultRetryUnit, DefaultRetryCap, MaxJitter) {
611 // Retry executes the following function body if request has an
612 // error until maxRetries have been exhausted, retry attempts are
613 // performed after waiting for a given period of time in a
614 // binomial fashion.
615 if retryable {
616 // Seek back to beginning for each attempt.
617 if _, err = bodySeeker.Seek(0, 0); err != nil {
618 // If seek failed, no need to retry.
619 return nil, err
620 }
621 }
622
623 if metadata.addCrc {
624 if metadata.trailer == nil {
625 metadata.trailer = make(http.Header, 1)
626 }
627 crc := crc32.New(crc32.MakeTable(crc32.Castagnoli))
628 metadata.contentBody = newHashReaderWrapper(metadata.contentBody, crc, func(hash []byte) {
629 // Update trailer when done.
630 metadata.trailer.Set("x-amz-checksum-crc32c", base64.StdEncoding.EncodeToString(hash))
631 })
632 metadata.trailer.Set("x-amz-checksum-crc32c", base64.StdEncoding.EncodeToString(crc.Sum(nil)))
633 }
634 // Instantiate a new request.
635 var req *http.Request
636 req, err = c.newRequest(ctx, method, metadata)
637 if err != nil {
638 errResponse := ToErrorResponse(err)
639 if isS3CodeRetryable(errResponse.Code) {
640 continue // Retry.
641 }
642
643 return nil, err
644 }
645
646 // Initiate the request.
647 res, err = c.do(req)
648 if err != nil {
649 if isRequestErrorRetryable(err) {
650 // Retry the request
651 continue
652 }
653 return nil, err
654 }
655
656 // For any known successful http status, return quickly.
657 for _, httpStatus := range successStatus {
658 if httpStatus == res.StatusCode {
659 return res, nil
660 }
661 }
662
663 // Read the body to be saved later.
664 errBodyBytes, err := io.ReadAll(res.Body)
665 // res.Body should be closed
666 closeResponse(res)
667 if err != nil {
668 return nil, err
669 }
670
671 // Save the body.
672 errBodySeeker := bytes.NewReader(errBodyBytes)
673 res.Body = io.NopCloser(errBodySeeker)
674
675 // For errors verify if its retryable otherwise fail quickly.
676 errResponse := ToErrorResponse(httpRespToErrorResponse(res, metadata.bucketName, metadata.objectName))
677
678 // Save the body back again.
679 errBodySeeker.Seek(0, 0) // Seek back to starting point.
680 res.Body = io.NopCloser(errBodySeeker)
681
682 // Bucket region if set in error response and the error
683 // code dictates invalid region, we can retry the request
684 // with the new region.
685 //
686 // Additionally, we should only retry if bucketLocation and custom
687 // region is empty.
688 if c.region == "" {
689 switch errResponse.Code {
690 case "AuthorizationHeaderMalformed":
691 fallthrough
692 case "InvalidRegion":
693 fallthrough
694 case "AccessDenied":
695 if errResponse.Region == "" {
696 // Region is empty we simply return the error.
697 return res, err
698 }
699 // Region is not empty figure out a way to
700 // handle this appropriately.
701 if metadata.bucketName != "" {
702 // Gather Cached location only if bucketName is present.
703 if location, cachedOk := c.bucketLocCache.Get(metadata.bucketName); cachedOk && location != errResponse.Region {
704 c.bucketLocCache.Set(metadata.bucketName, errResponse.Region)
705 continue // Retry.
706 }
707 } else {
708 // This is for ListBuckets() fallback.
709 if errResponse.Region != metadata.bucketLocation {
710 // Retry if the error response has a different region
711 // than the request we just made.
712 metadata.bucketLocation = errResponse.Region
713 continue // Retry
714 }
715 }
716 }
717 }
718
719 // Verify if error response code is retryable.
720 if isS3CodeRetryable(errResponse.Code) {
721 continue // Retry.
722 }
723
724 // Verify if http status code is retryable.
725 if isHTTPStatusRetryable(res.StatusCode) {
726 continue // Retry.
727 }
728
729 // For all other cases break out of the retry loop.
730 break
731 }
732
733 // Return an error when retry is canceled or deadlined
734 if e := retryCtx.Err(); e != nil {
735 return nil, e
736 }
737
738 return res, err
739}
740
741// newRequest - instantiate a new HTTP request for a given method.
742func (c *Client) newRequest(ctx context.Context, method string, metadata requestMetadata) (req *http.Request, err error) {
743 // If no method is supplied default to 'POST'.
744 if method == "" {
745 method = http.MethodPost
746 }
747
748 location := metadata.bucketLocation
749 if location == "" {
750 if metadata.bucketName != "" {
751 // Gather location only if bucketName is present.
752 location, err = c.getBucketLocation(ctx, metadata.bucketName)
753 if err != nil {
754 return nil, err
755 }
756 }
757 if location == "" {
758 location = getDefaultLocation(*c.endpointURL, c.region)
759 }
760 }
761
762 // Look if target url supports virtual host.
763 // We explicitly disallow MakeBucket calls to not use virtual DNS style,
764 // since the resolution may fail.
765 isMakeBucket := (metadata.objectName == "" && method == http.MethodPut && len(metadata.queryValues) == 0)
766 isVirtualHost := c.isVirtualHostStyleRequest(*c.endpointURL, metadata.bucketName) && !isMakeBucket
767
768 // Construct a new target URL.
769 targetURL, err := c.makeTargetURL(metadata.bucketName, metadata.objectName, location,
770 isVirtualHost, metadata.queryValues)
771 if err != nil {
772 return nil, err
773 }
774
775 if c.httpTrace != nil {
776 ctx = httptrace.WithClientTrace(ctx, c.httpTrace)
777 }
778
779 // Initialize a new HTTP request for the method.
780 req, err = http.NewRequestWithContext(ctx, method, targetURL.String(), nil)
781 if err != nil {
782 return nil, err
783 }
784
785 // Get credentials from the configured credentials provider.
786 value, err := c.credsProvider.Get()
787 if err != nil {
788 return nil, err
789 }
790
791 var (
792 signerType = value.SignerType
793 accessKeyID = value.AccessKeyID
794 secretAccessKey = value.SecretAccessKey
795 sessionToken = value.SessionToken
796 )
797
798 // Custom signer set then override the behavior.
799 if c.overrideSignerType != credentials.SignatureDefault {
800 signerType = c.overrideSignerType
801 }
802
803 // If signerType returned by credentials helper is anonymous,
804 // then do not sign regardless of signerType override.
805 if value.SignerType == credentials.SignatureAnonymous {
806 signerType = credentials.SignatureAnonymous
807 }
808
809 // Generate presign url if needed, return right here.
810 if metadata.expires != 0 && metadata.presignURL {
811 if signerType.IsAnonymous() {
812 return nil, errInvalidArgument("Presigned URLs cannot be generated with anonymous credentials.")
813 }
814 if metadata.extraPresignHeader != nil {
815 if signerType.IsV2() {
816 return nil, errInvalidArgument("Extra signed headers for Presign with Signature V2 is not supported.")
817 }
818 for k, v := range metadata.extraPresignHeader {
819 req.Header.Set(k, v[0])
820 }
821 }
822 if signerType.IsV2() {
823 // Presign URL with signature v2.
824 req = signer.PreSignV2(*req, accessKeyID, secretAccessKey, metadata.expires, isVirtualHost)
825 } else if signerType.IsV4() {
826 // Presign URL with signature v4.
827 req = signer.PreSignV4(*req, accessKeyID, secretAccessKey, sessionToken, location, metadata.expires)
828 }
829 return req, nil
830 }
831
832 // Set 'User-Agent' header for the request.
833 c.setUserAgent(req)
834
835 // Set all headers.
836 for k, v := range metadata.customHeader {
837 req.Header.Set(k, v[0])
838 }
839
840 // Go net/http notoriously closes the request body.
841 // - The request Body, if non-nil, will be closed by the underlying Transport, even on errors.
842 // This can cause underlying *os.File seekers to fail, avoid that
843 // by making sure to wrap the closer as a nop.
844 if metadata.contentLength == 0 {
845 req.Body = nil
846 } else {
847 req.Body = io.NopCloser(metadata.contentBody)
848 }
849
850 // Set incoming content-length.
851 req.ContentLength = metadata.contentLength
852 if req.ContentLength <= -1 {
853 // For unknown content length, we upload using transfer-encoding: chunked.
854 req.TransferEncoding = []string{"chunked"}
855 }
856
857 // set md5Sum for content protection.
858 if len(metadata.contentMD5Base64) > 0 {
859 req.Header.Set("Content-Md5", metadata.contentMD5Base64)
860 }
861
862 // For anonymous requests just return.
863 if signerType.IsAnonymous() {
864 return req, nil
865 }
866
867 switch {
868 case signerType.IsV2():
869 // Add signature version '2' authorization header.
870 req = signer.SignV2(*req, accessKeyID, secretAccessKey, isVirtualHost)
871 case metadata.streamSha256 && !c.secure:
872 if len(metadata.trailer) > 0 {
873 req.Trailer = metadata.trailer
874 }
875 // Streaming signature is used by default for a PUT object request.
876 // Additionally, we also look if the initialized client is secure,
877 // if yes then we don't need to perform streaming signature.
878 req = signer.StreamingSignV4(req, accessKeyID,
879 secretAccessKey, sessionToken, location, metadata.contentLength, time.Now().UTC(), c.sha256Hasher())
880 default:
881 // Set sha256 sum for signature calculation only with signature version '4'.
882 shaHeader := unsignedPayload
883 if metadata.contentSHA256Hex != "" {
884 shaHeader = metadata.contentSHA256Hex
885 if len(metadata.trailer) > 0 {
886 // Sanity check, we should not end up here if upstream is sane.
887 return nil, errors.New("internal error: contentSHA256Hex with trailer not supported")
888 }
889 } else if len(metadata.trailer) > 0 {
890 shaHeader = unsignedPayloadTrailer
891 }
892 req.Header.Set("X-Amz-Content-Sha256", shaHeader)
893
894 // Add signature version '4' authorization header.
895 req = signer.SignV4Trailer(*req, accessKeyID, secretAccessKey, sessionToken, location, metadata.trailer)
896 }
897
898 // Return request.
899 return req, nil
900}
901
902// set User agent.
903func (c *Client) setUserAgent(req *http.Request) {
904 req.Header.Set("User-Agent", libraryUserAgent)
905 if c.appInfo.appName != "" && c.appInfo.appVersion != "" {
906 req.Header.Set("User-Agent", libraryUserAgent+" "+c.appInfo.appName+"/"+c.appInfo.appVersion)
907 }
908}
909
910// makeTargetURL make a new target url.
911func (c *Client) makeTargetURL(bucketName, objectName, bucketLocation string, isVirtualHostStyle bool, queryValues url.Values) (*url.URL, error) {
912 host := c.endpointURL.Host
913 // For Amazon S3 endpoint, try to fetch location based endpoint.
914 if s3utils.IsAmazonEndpoint(*c.endpointURL) {
915 if c.s3AccelerateEndpoint != "" && bucketName != "" {
916 // http://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html
917 // Disable transfer acceleration for non-compliant bucket names.
918 if strings.Contains(bucketName, ".") {
919 return nil, errTransferAccelerationBucket(bucketName)
920 }
921 // If transfer acceleration is requested set new host.
922 // For more details about enabling transfer acceleration read here.
923 // http://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html
924 host = c.s3AccelerateEndpoint
925 } else {
926 // Do not change the host if the endpoint URL is a FIPS S3 endpoint or a S3 PrivateLink interface endpoint
927 if !s3utils.IsAmazonFIPSEndpoint(*c.endpointURL) && !s3utils.IsAmazonPrivateLinkEndpoint(*c.endpointURL) {
928 // Fetch new host based on the bucket location.
929 host = getS3Endpoint(bucketLocation)
930 }
931 }
932 }
933
934 // Save scheme.
935 scheme := c.endpointURL.Scheme
936
937 // Strip port 80 and 443 so we won't send these ports in Host header.
938 // The reason is that browsers and curl automatically remove :80 and :443
939 // with the generated presigned urls, then a signature mismatch error.
940 if h, p, err := net.SplitHostPort(host); err == nil {
941 if scheme == "http" && p == "80" || scheme == "https" && p == "443" {
942 host = h
943 if ip := net.ParseIP(h); ip != nil && ip.To4() == nil {
944 host = "[" + h + "]"
945 }
946 }
947 }
948
949 urlStr := scheme + "://" + host + "/"
950
951 // Make URL only if bucketName is available, otherwise use the
952 // endpoint URL.
953 if bucketName != "" {
954 // If endpoint supports virtual host style use that always.
955 // Currently only S3 and Google Cloud Storage would support
956 // virtual host style.
957 if isVirtualHostStyle {
958 urlStr = scheme + "://" + bucketName + "." + host + "/"
959 if objectName != "" {
960 urlStr += s3utils.EncodePath(objectName)
961 }
962 } else {
963 // If not fall back to using path style.
964 urlStr = urlStr + bucketName + "/"
965 if objectName != "" {
966 urlStr += s3utils.EncodePath(objectName)
967 }
968 }
969 }
970
971 // If there are any query values, add them to the end.
972 if len(queryValues) > 0 {
973 urlStr = urlStr + "?" + s3utils.QueryEncode(queryValues)
974 }
975
976 return url.Parse(urlStr)
977}
978
979// returns true if virtual hosted style requests are to be used.
980func (c *Client) isVirtualHostStyleRequest(url url.URL, bucketName string) bool {
981 if bucketName == "" {
982 return false
983 }
984
985 if c.lookup == BucketLookupDNS {
986 return true
987 }
988 if c.lookup == BucketLookupPath {
989 return false
990 }
991
992 // default to virtual only for Amazon/Google storage. In all other cases use
993 // path style requests
994 return s3utils.IsVirtualHostSupported(url, bucketName)
995}
diff --git a/vendor/github.com/minio/minio-go/v7/bucket-cache.go b/vendor/github.com/minio/minio-go/v7/bucket-cache.go
new file mode 100644
index 0000000..b1d3b38
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/bucket-cache.go
@@ -0,0 +1,256 @@
1/*
2 * MinIO Go Library for Amazon S3 Compatible Cloud Storage
3 * Copyright 2015-2017 MinIO, Inc.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18package minio
19
20import (
21 "context"
22 "net"
23 "net/http"
24 "net/url"
25 "path"
26 "sync"
27
28 "github.com/minio/minio-go/v7/pkg/credentials"
29 "github.com/minio/minio-go/v7/pkg/s3utils"
30 "github.com/minio/minio-go/v7/pkg/signer"
31)
32
33// bucketLocationCache - Provides simple mechanism to hold bucket
34// locations in memory.
35type bucketLocationCache struct {
36 // mutex is used for handling the concurrent
37 // read/write requests for cache.
38 sync.RWMutex
39
40 // items holds the cached bucket locations.
41 items map[string]string
42}
43
44// newBucketLocationCache - Provides a new bucket location cache to be
45// used internally with the client object.
46func newBucketLocationCache() *bucketLocationCache {
47 return &bucketLocationCache{
48 items: make(map[string]string),
49 }
50}
51
52// Get - Returns a value of a given key if it exists.
53func (r *bucketLocationCache) Get(bucketName string) (location string, ok bool) {
54 r.RLock()
55 defer r.RUnlock()
56 location, ok = r.items[bucketName]
57 return
58}
59
60// Set - Will persist a value into cache.
61func (r *bucketLocationCache) Set(bucketName, location string) {
62 r.Lock()
63 defer r.Unlock()
64 r.items[bucketName] = location
65}
66
67// Delete - Deletes a bucket name from cache.
68func (r *bucketLocationCache) Delete(bucketName string) {
69 r.Lock()
70 defer r.Unlock()
71 delete(r.items, bucketName)
72}
73
74// GetBucketLocation - get location for the bucket name from location cache, if not
75// fetch freshly by making a new request.
76func (c *Client) GetBucketLocation(ctx context.Context, bucketName string) (string, error) {
77 if err := s3utils.CheckValidBucketName(bucketName); err != nil {
78 return "", err
79 }
80 return c.getBucketLocation(ctx, bucketName)
81}
82
83// getBucketLocation - Get location for the bucketName from location map cache, if not
84// fetch freshly by making a new request.
85func (c *Client) getBucketLocation(ctx context.Context, bucketName string) (string, error) {
86 if err := s3utils.CheckValidBucketName(bucketName); err != nil {
87 return "", err
88 }
89
90 // Region set then no need to fetch bucket location.
91 if c.region != "" {
92 return c.region, nil
93 }
94
95 if location, ok := c.bucketLocCache.Get(bucketName); ok {
96 return location, nil
97 }
98
99 // Initialize a new request.
100 req, err := c.getBucketLocationRequest(ctx, bucketName)
101 if err != nil {
102 return "", err
103 }
104
105 // Initiate the request.
106 resp, err := c.do(req)
107 defer closeResponse(resp)
108 if err != nil {
109 return "", err
110 }
111 location, err := processBucketLocationResponse(resp, bucketName)
112 if err != nil {
113 return "", err
114 }
115 c.bucketLocCache.Set(bucketName, location)
116 return location, nil
117}
118
119// processes the getBucketLocation http response from the server.
120func processBucketLocationResponse(resp *http.Response, bucketName string) (bucketLocation string, err error) {
121 if resp != nil {
122 if resp.StatusCode != http.StatusOK {
123 err = httpRespToErrorResponse(resp, bucketName, "")
124 errResp := ToErrorResponse(err)
125 // For access denied error, it could be an anonymous
126 // request. Move forward and let the top level callers
127 // succeed if possible based on their policy.
128 switch errResp.Code {
129 case "NotImplemented":
130 switch errResp.Server {
131 case "AmazonSnowball":
132 return "snowball", nil
133 case "cloudflare":
134 return "us-east-1", nil
135 }
136 case "AuthorizationHeaderMalformed":
137 fallthrough
138 case "InvalidRegion":
139 fallthrough
140 case "AccessDenied":
141 if errResp.Region == "" {
142 return "us-east-1", nil
143 }
144 return errResp.Region, nil
145 }
146 return "", err
147 }
148 }
149
150 // Extract location.
151 var locationConstraint string
152 err = xmlDecoder(resp.Body, &locationConstraint)
153 if err != nil {
154 return "", err
155 }
156
157 location := locationConstraint
158 // Location is empty will be 'us-east-1'.
159 if location == "" {
160 location = "us-east-1"
161 }
162
163 // Location can be 'EU' convert it to meaningful 'eu-west-1'.
164 if location == "EU" {
165 location = "eu-west-1"
166 }
167
168 // Save the location into cache.
169
170 // Return.
171 return location, nil
172}
173
174// getBucketLocationRequest - Wrapper creates a new getBucketLocation request.
175func (c *Client) getBucketLocationRequest(ctx context.Context, bucketName string) (*http.Request, error) {
176 // Set location query.
177 urlValues := make(url.Values)
178 urlValues.Set("location", "")
179
180 // Set get bucket location always as path style.
181 targetURL := *c.endpointURL
182
183 // as it works in makeTargetURL method from api.go file
184 if h, p, err := net.SplitHostPort(targetURL.Host); err == nil {
185 if targetURL.Scheme == "http" && p == "80" || targetURL.Scheme == "https" && p == "443" {
186 targetURL.Host = h
187 if ip := net.ParseIP(h); ip != nil && ip.To16() != nil {
188 targetURL.Host = "[" + h + "]"
189 }
190 }
191 }
192
193 isVirtualStyle := c.isVirtualHostStyleRequest(targetURL, bucketName)
194
195 var urlStr string
196
197 if isVirtualStyle {
198 urlStr = c.endpointURL.Scheme + "://" + bucketName + "." + targetURL.Host + "/?location"
199 } else {
200 targetURL.Path = path.Join(bucketName, "") + "/"
201 targetURL.RawQuery = urlValues.Encode()
202 urlStr = targetURL.String()
203 }
204
205 // Get a new HTTP request for the method.
206 req, err := http.NewRequestWithContext(ctx, http.MethodGet, urlStr, nil)
207 if err != nil {
208 return nil, err
209 }
210
211 // Set UserAgent for the request.
212 c.setUserAgent(req)
213
214 // Get credentials from the configured credentials provider.
215 value, err := c.credsProvider.Get()
216 if err != nil {
217 return nil, err
218 }
219
220 var (
221 signerType = value.SignerType
222 accessKeyID = value.AccessKeyID
223 secretAccessKey = value.SecretAccessKey
224 sessionToken = value.SessionToken
225 )
226
227 // Custom signer set then override the behavior.
228 if c.overrideSignerType != credentials.SignatureDefault {
229 signerType = c.overrideSignerType
230 }
231
232 // If signerType returned by credentials helper is anonymous,
233 // then do not sign regardless of signerType override.
234 if value.SignerType == credentials.SignatureAnonymous {
235 signerType = credentials.SignatureAnonymous
236 }
237
238 if signerType.IsAnonymous() {
239 return req, nil
240 }
241
242 if signerType.IsV2() {
243 req = signer.SignV2(*req, accessKeyID, secretAccessKey, isVirtualStyle)
244 return req, nil
245 }
246
247 // Set sha256 sum for signature calculation only with signature version '4'.
248 contentSha256 := emptySHA256Hex
249 if c.secure {
250 contentSha256 = unsignedPayload
251 }
252
253 req.Header.Set("X-Amz-Content-Sha256", contentSha256)
254 req = signer.SignV4(*req, accessKeyID, secretAccessKey, sessionToken, "us-east-1")
255 return req, nil
256}
diff --git a/vendor/github.com/minio/minio-go/v7/checksum.go b/vendor/github.com/minio/minio-go/v7/checksum.go
new file mode 100644
index 0000000..a1f6f43
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/checksum.go
@@ -0,0 +1,210 @@
1/*
2 * MinIO Go Library for Amazon S3 Compatible Cloud Storage
3 * Copyright 2015-2023 MinIO, Inc.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18package minio
19
20import (
21 "crypto/sha1"
22 "crypto/sha256"
23 "encoding/base64"
24 "hash"
25 "hash/crc32"
26 "io"
27 "math/bits"
28)
29
30// ChecksumType contains information about the checksum type.
31type ChecksumType uint32
32
33const (
34
35 // ChecksumSHA256 indicates a SHA256 checksum.
36 ChecksumSHA256 ChecksumType = 1 << iota
37 // ChecksumSHA1 indicates a SHA-1 checksum.
38 ChecksumSHA1
39 // ChecksumCRC32 indicates a CRC32 checksum with IEEE table.
40 ChecksumCRC32
41 // ChecksumCRC32C indicates a CRC32 checksum with Castagnoli table.
42 ChecksumCRC32C
43
44 // Keep after all valid checksums
45 checksumLast
46
47 // checksumMask is a mask for valid checksum types.
48 checksumMask = checksumLast - 1
49
50 // ChecksumNone indicates no checksum.
51 ChecksumNone ChecksumType = 0
52
53 amzChecksumAlgo = "x-amz-checksum-algorithm"
54 amzChecksumCRC32 = "x-amz-checksum-crc32"
55 amzChecksumCRC32C = "x-amz-checksum-crc32c"
56 amzChecksumSHA1 = "x-amz-checksum-sha1"
57 amzChecksumSHA256 = "x-amz-checksum-sha256"
58)
59
60// Is returns if c is all of t.
61func (c ChecksumType) Is(t ChecksumType) bool {
62 return c&t == t
63}
64
65// Key returns the header key.
66// returns empty string if invalid or none.
67func (c ChecksumType) Key() string {
68 switch c & checksumMask {
69 case ChecksumCRC32:
70 return amzChecksumCRC32
71 case ChecksumCRC32C:
72 return amzChecksumCRC32C
73 case ChecksumSHA1:
74 return amzChecksumSHA1
75 case ChecksumSHA256:
76 return amzChecksumSHA256
77 }
78 return ""
79}
80
81// RawByteLen returns the size of the un-encoded checksum.
82func (c ChecksumType) RawByteLen() int {
83 switch c & checksumMask {
84 case ChecksumCRC32, ChecksumCRC32C:
85 return 4
86 case ChecksumSHA1:
87 return sha1.Size
88 case ChecksumSHA256:
89 return sha256.Size
90 }
91 return 0
92}
93
94// Hasher returns a hasher corresponding to the checksum type.
95// Returns nil if no checksum.
96func (c ChecksumType) Hasher() hash.Hash {
97 switch c & checksumMask {
98 case ChecksumCRC32:
99 return crc32.NewIEEE()
100 case ChecksumCRC32C:
101 return crc32.New(crc32.MakeTable(crc32.Castagnoli))
102 case ChecksumSHA1:
103 return sha1.New()
104 case ChecksumSHA256:
105 return sha256.New()
106 }
107 return nil
108}
109
110// IsSet returns whether the type is valid and known.
111func (c ChecksumType) IsSet() bool {
112 return bits.OnesCount32(uint32(c)) == 1
113}
114
115// String returns the type as a string.
116// CRC32, CRC32C, SHA1, and SHA256 for valid values.
117// Empty string for unset and "<invalid>" if not valid.
118func (c ChecksumType) String() string {
119 switch c & checksumMask {
120 case ChecksumCRC32:
121 return "CRC32"
122 case ChecksumCRC32C:
123 return "CRC32C"
124 case ChecksumSHA1:
125 return "SHA1"
126 case ChecksumSHA256:
127 return "SHA256"
128 case ChecksumNone:
129 return ""
130 }
131 return "<invalid>"
132}
133
134// ChecksumReader reads all of r and returns a checksum of type c.
135// Returns any error that may have occurred while reading.
136func (c ChecksumType) ChecksumReader(r io.Reader) (Checksum, error) {
137 h := c.Hasher()
138 if h == nil {
139 return Checksum{}, nil
140 }
141 _, err := io.Copy(h, r)
142 if err != nil {
143 return Checksum{}, err
144 }
145 return NewChecksum(c, h.Sum(nil)), nil
146}
147
148// ChecksumBytes returns a checksum of the content b with type c.
149func (c ChecksumType) ChecksumBytes(b []byte) Checksum {
150 h := c.Hasher()
151 if h == nil {
152 return Checksum{}
153 }
154 n, err := h.Write(b)
155 if err != nil || n != len(b) {
156 // Shouldn't happen with these checksummers.
157 return Checksum{}
158 }
159 return NewChecksum(c, h.Sum(nil))
160}
161
162// Checksum is a type and encoded value.
163type Checksum struct {
164 Type ChecksumType
165 r []byte
166}
167
168// NewChecksum sets the checksum to the value of b,
169// which is the raw hash output.
170// If the length of c does not match t.RawByteLen,
171// a checksum with ChecksumNone is returned.
172func NewChecksum(t ChecksumType, b []byte) Checksum {
173 if t.IsSet() && len(b) == t.RawByteLen() {
174 return Checksum{Type: t, r: b}
175 }
176 return Checksum{}
177}
178
179// NewChecksumString sets the checksum to the value of s,
180// which is the base 64 encoded raw hash output.
181// If the length of c does not match t.RawByteLen, it is not added.
182func NewChecksumString(t ChecksumType, s string) Checksum {
183 b, _ := base64.StdEncoding.DecodeString(s)
184 if t.IsSet() && len(b) == t.RawByteLen() {
185 return Checksum{Type: t, r: b}
186 }
187 return Checksum{}
188}
189
190// IsSet returns whether the checksum is valid and known.
191func (c Checksum) IsSet() bool {
192 return c.Type.IsSet() && len(c.r) == c.Type.RawByteLen()
193}
194
195// Encoded returns the encoded value.
196// Returns the empty string if not set or valid.
197func (c Checksum) Encoded() string {
198 if !c.IsSet() {
199 return ""
200 }
201 return base64.StdEncoding.EncodeToString(c.r)
202}
203
204// Raw returns the raw checksum value if set.
205func (c Checksum) Raw() []byte {
206 if !c.IsSet() {
207 return nil
208 }
209 return c.r
210}
diff --git a/vendor/github.com/minio/minio-go/v7/code_of_conduct.md b/vendor/github.com/minio/minio-go/v7/code_of_conduct.md
new file mode 100644
index 0000000..cb232c3
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/code_of_conduct.md
@@ -0,0 +1,80 @@
1# Contributor Covenant Code of Conduct
2
3## Our Pledge
4
5In the interest of fostering an open and welcoming environment, we as
6contributors and maintainers pledge to making participation in our project and
7our community a harassment-free experience for everyone, regardless of age, body
8size, disability, ethnicity, gender identity and expression, level of experience,
9nationality, personal appearance, race, religion, or sexual identity and
10orientation.
11
12## Our Standards
13
14Examples of behavior that contributes to creating a positive environment
15include:
16
17* Using welcoming and inclusive language
18* Being respectful of differing viewpoints and experiences
19* Gracefully accepting constructive criticism
20* Focusing on what is best for the community
21* Showing empathy towards other community members
22
23Examples of unacceptable behavior by participants include:
24
25* The use of sexualized language or imagery and unwelcome sexual attention or
26 advances
27* Trolling, insulting/derogatory comments, and personal or political attacks
28* Public or private harassment
29* Publishing others' private information, such as a physical or electronic
30 address, without explicit permission
31* Other conduct which could reasonably be considered inappropriate in a
32 professional setting
33
34## Our Responsibilities
35
36Project maintainers are responsible for clarifying the standards of acceptable
37behavior and are expected to take appropriate and fair corrective action in
38response to any instances of unacceptable behavior, in compliance with the
39licensing terms applying to the Project developments.
40
41Project maintainers have the right and responsibility to remove, edit, or
42reject comments, commits, code, wiki edits, issues, and other contributions
43that are not aligned to this Code of Conduct, or to ban temporarily or
44permanently any contributor for other behaviors that they deem inappropriate,
45threatening, offensive, or harmful. However, these actions shall respect the
46licensing terms of the Project Developments that will always supersede such
47Code of Conduct.
48
49## Scope
50
51This Code of Conduct applies both within project spaces and in public spaces
52when an individual is representing the project or its community. Examples of
53representing a project or community include using an official project e-mail
54address, posting via an official social media account, or acting as an appointed
55representative at an online or offline event. Representation of a project may be
56further defined and clarified by project maintainers.
57
58## Enforcement
59
60Instances of abusive, harassing, or otherwise unacceptable behavior may be
61reported by contacting the project team at [email protected]. The project team
62will review and investigate all complaints, and will respond in a way that it deems
63appropriate to the circumstances. The project team is obligated to maintain
64confidentiality with regard to the reporter of an incident.
65Further details of specific enforcement policies may be posted separately.
66
67Project maintainers who do not follow or enforce the Code of Conduct in good
68faith may face temporary or permanent repercussions as determined by other
69members of the project's leadership.
70
71## Attribution
72
73This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4,
74available at [http://contributor-covenant.org/version/1/4][version]
75
76This version includes a clarification to ensure that the code of conduct is in
77compliance with the free software licensing terms of the project.
78
79[homepage]: http://contributor-covenant.org
80[version]: http://contributor-covenant.org/version/1/4/
diff --git a/vendor/github.com/minio/minio-go/v7/constants.go b/vendor/github.com/minio/minio-go/v7/constants.go
new file mode 100644
index 0000000..401d2a7
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/constants.go
@@ -0,0 +1,110 @@
1/*
2 * MinIO Go Library for Amazon S3 Compatible Cloud Storage
3 * Copyright 2015-2017 MinIO, Inc.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18package minio
19
20// Multipart upload defaults.
21
22// absMinPartSize - absolute minimum part size (5 MiB) below which
23// a part in a multipart upload may not be uploaded.
24const absMinPartSize = 1024 * 1024 * 5
25
26// minPartSize - minimum part size 16MiB per object after which
27// putObject behaves internally as multipart.
28const minPartSize = 1024 * 1024 * 16
29
30// maxPartsCount - maximum number of parts for a single multipart session.
31const maxPartsCount = 10000
32
33// maxPartSize - maximum part size 5GiB for a single multipart upload
34// operation.
35const maxPartSize = 1024 * 1024 * 1024 * 5
36
37// maxSinglePutObjectSize - maximum size 5GiB of object per PUT
38// operation.
39const maxSinglePutObjectSize = 1024 * 1024 * 1024 * 5
40
41// maxMultipartPutObjectSize - maximum size 5TiB of object for
42// Multipart operation.
43const maxMultipartPutObjectSize = 1024 * 1024 * 1024 * 1024 * 5
44
45// unsignedPayload - value to be set to X-Amz-Content-Sha256 header when
46// we don't want to sign the request payload
47const unsignedPayload = "UNSIGNED-PAYLOAD"
48
49// unsignedPayloadTrailer value to be set to X-Amz-Content-Sha256 header when
50// we don't want to sign the request payload, but have a trailer.
51const unsignedPayloadTrailer = "STREAMING-UNSIGNED-PAYLOAD-TRAILER"
52
53// Total number of parallel workers used for multipart operation.
54const totalWorkers = 4
55
56// Signature related constants.
57const (
58 signV4Algorithm = "AWS4-HMAC-SHA256"
59 iso8601DateFormat = "20060102T150405Z"
60)
61
62const (
63 // Storage class header.
64 amzStorageClass = "X-Amz-Storage-Class"
65
66 // Website redirect location header
67 amzWebsiteRedirectLocation = "X-Amz-Website-Redirect-Location"
68
69 // Object Tagging headers
70 amzTaggingHeader = "X-Amz-Tagging"
71 amzTaggingHeaderDirective = "X-Amz-Tagging-Directive"
72
73 amzVersionID = "X-Amz-Version-Id"
74 amzTaggingCount = "X-Amz-Tagging-Count"
75 amzExpiration = "X-Amz-Expiration"
76 amzRestore = "X-Amz-Restore"
77 amzReplicationStatus = "X-Amz-Replication-Status"
78 amzDeleteMarker = "X-Amz-Delete-Marker"
79
80 // Object legal hold header
81 amzLegalHoldHeader = "X-Amz-Object-Lock-Legal-Hold"
82
83 // Object retention header
84 amzLockMode = "X-Amz-Object-Lock-Mode"
85 amzLockRetainUntil = "X-Amz-Object-Lock-Retain-Until-Date"
86 amzBypassGovernance = "X-Amz-Bypass-Governance-Retention"
87
88 // Replication status
89 amzBucketReplicationStatus = "X-Amz-Replication-Status"
90 // Minio specific Replication/lifecycle transition extension
91 minIOBucketSourceMTime = "X-Minio-Source-Mtime"
92
93 minIOBucketSourceETag = "X-Minio-Source-Etag"
94 minIOBucketReplicationDeleteMarker = "X-Minio-Source-DeleteMarker"
95 minIOBucketReplicationProxyRequest = "X-Minio-Source-Proxy-Request"
96 minIOBucketReplicationRequest = "X-Minio-Source-Replication-Request"
97 minIOBucketReplicationCheck = "X-Minio-Source-Replication-Check"
98
99 // Header indicates last tag update time on source
100 minIOBucketReplicationTaggingTimestamp = "X-Minio-Source-Replication-Tagging-Timestamp"
101 // Header indicates last retention update time on source
102 minIOBucketReplicationObjectRetentionTimestamp = "X-Minio-Source-Replication-Retention-Timestamp"
103 // Header indicates last legalhold update time on source
104 minIOBucketReplicationObjectLegalHoldTimestamp = "X-Minio-Source-Replication-LegalHold-Timestamp"
105 minIOForceDelete = "x-minio-force-delete"
106 // Header indicates delete marker replication request can be sent by source now.
107 minioTgtReplicationReady = "X-Minio-Replication-Ready"
108 // Header asks if delete marker replication request can be sent by source now.
109 isMinioTgtReplicationReady = "X-Minio-Check-Replication-Ready"
110)
diff --git a/vendor/github.com/minio/minio-go/v7/core.go b/vendor/github.com/minio/minio-go/v7/core.go
new file mode 100644
index 0000000..132ea70
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/core.go
@@ -0,0 +1,150 @@
1/*
2 * MinIO Go Library for Amazon S3 Compatible Cloud Storage
3 * Copyright 2015-2017 MinIO, Inc.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18package minio
19
20import (
21 "context"
22 "io"
23 "net/http"
24
25 "github.com/minio/minio-go/v7/pkg/encrypt"
26)
27
28// Core - Inherits Client and adds new methods to expose the low level S3 APIs.
29type Core struct {
30 *Client
31}
32
33// NewCore - Returns new initialized a Core client, this CoreClient should be
34// only used under special conditions such as need to access lower primitives
35// and being able to use them to write your own wrappers.
36func NewCore(endpoint string, opts *Options) (*Core, error) {
37 var s3Client Core
38 client, err := New(endpoint, opts)
39 if err != nil {
40 return nil, err
41 }
42 s3Client.Client = client
43 return &s3Client, nil
44}
45
46// ListObjects - List all the objects at a prefix, optionally with marker and delimiter
47// you can further filter the results.
48func (c Core) ListObjects(bucket, prefix, marker, delimiter string, maxKeys int) (result ListBucketResult, err error) {
49 return c.listObjectsQuery(context.Background(), bucket, prefix, marker, delimiter, maxKeys, nil)
50}
51
52// ListObjectsV2 - Lists all the objects at a prefix, similar to ListObjects() but uses
53// continuationToken instead of marker to support iteration over the results.
54func (c Core) ListObjectsV2(bucketName, objectPrefix, startAfter, continuationToken, delimiter string, maxkeys int) (ListBucketV2Result, error) {
55 return c.listObjectsV2Query(context.Background(), bucketName, objectPrefix, continuationToken, true, false, delimiter, startAfter, maxkeys, nil)
56}
57
58// CopyObject - copies an object from source object to destination object on server side.
59func (c Core) CopyObject(ctx context.Context, sourceBucket, sourceObject, destBucket, destObject string, metadata map[string]string, srcOpts CopySrcOptions, dstOpts PutObjectOptions) (ObjectInfo, error) {
60 return c.copyObjectDo(ctx, sourceBucket, sourceObject, destBucket, destObject, metadata, srcOpts, dstOpts)
61}
62
63// CopyObjectPart - creates a part in a multipart upload by copying (a
64// part of) an existing object.
65func (c Core) CopyObjectPart(ctx context.Context, srcBucket, srcObject, destBucket, destObject, uploadID string,
66 partID int, startOffset, length int64, metadata map[string]string,
67) (p CompletePart, err error) {
68 return c.copyObjectPartDo(ctx, srcBucket, srcObject, destBucket, destObject, uploadID,
69 partID, startOffset, length, metadata)
70}
71
72// PutObject - Upload object. Uploads using single PUT call.
73func (c Core) PutObject(ctx context.Context, bucket, object string, data io.Reader, size int64, md5Base64, sha256Hex string, opts PutObjectOptions) (UploadInfo, error) {
74 hookReader := newHook(data, opts.Progress)
75 return c.putObjectDo(ctx, bucket, object, hookReader, md5Base64, sha256Hex, size, opts)
76}
77
78// NewMultipartUpload - Initiates new multipart upload and returns the new uploadID.
79func (c Core) NewMultipartUpload(ctx context.Context, bucket, object string, opts PutObjectOptions) (uploadID string, err error) {
80 result, err := c.initiateMultipartUpload(ctx, bucket, object, opts)
81 return result.UploadID, err
82}
83
84// ListMultipartUploads - List incomplete uploads.
85func (c Core) ListMultipartUploads(ctx context.Context, bucket, prefix, keyMarker, uploadIDMarker, delimiter string, maxUploads int) (result ListMultipartUploadsResult, err error) {
86 return c.listMultipartUploadsQuery(ctx, bucket, keyMarker, uploadIDMarker, prefix, delimiter, maxUploads)
87}
88
89// PutObjectPartOptions contains options for PutObjectPart API
90type PutObjectPartOptions struct {
91 Md5Base64, Sha256Hex string
92 SSE encrypt.ServerSide
93 CustomHeader, Trailer http.Header
94}
95
96// PutObjectPart - Upload an object part.
97func (c Core) PutObjectPart(ctx context.Context, bucket, object, uploadID string, partID int,
98 data io.Reader, size int64, opts PutObjectPartOptions,
99) (ObjectPart, error) {
100 p := uploadPartParams{
101 bucketName: bucket,
102 objectName: object,
103 uploadID: uploadID,
104 reader: data,
105 partNumber: partID,
106 md5Base64: opts.Md5Base64,
107 sha256Hex: opts.Sha256Hex,
108 size: size,
109 sse: opts.SSE,
110 streamSha256: true,
111 customHeader: opts.CustomHeader,
112 trailer: opts.Trailer,
113 }
114 return c.uploadPart(ctx, p)
115}
116
117// ListObjectParts - List uploaded parts of an incomplete upload.x
118func (c Core) ListObjectParts(ctx context.Context, bucket, object, uploadID string, partNumberMarker, maxParts int) (result ListObjectPartsResult, err error) {
119 return c.listObjectPartsQuery(ctx, bucket, object, uploadID, partNumberMarker, maxParts)
120}
121
122// CompleteMultipartUpload - Concatenate uploaded parts and commit to an object.
123func (c Core) CompleteMultipartUpload(ctx context.Context, bucket, object, uploadID string, parts []CompletePart, opts PutObjectOptions) (UploadInfo, error) {
124 res, err := c.completeMultipartUpload(ctx, bucket, object, uploadID, completeMultipartUpload{
125 Parts: parts,
126 }, opts)
127 return res, err
128}
129
130// AbortMultipartUpload - Abort an incomplete upload.
131func (c Core) AbortMultipartUpload(ctx context.Context, bucket, object, uploadID string) error {
132 return c.abortMultipartUpload(ctx, bucket, object, uploadID)
133}
134
135// GetBucketPolicy - fetches bucket access policy for a given bucket.
136func (c Core) GetBucketPolicy(ctx context.Context, bucket string) (string, error) {
137 return c.getBucketPolicy(ctx, bucket)
138}
139
140// PutBucketPolicy - applies a new bucket access policy for a given bucket.
141func (c Core) PutBucketPolicy(ctx context.Context, bucket, bucketPolicy string) error {
142 return c.putBucketPolicy(ctx, bucket, bucketPolicy)
143}
144
145// GetObject is a lower level API implemented to support reading
146// partial objects and also downloading objects with special conditions
147// matching etag, modtime etc.
148func (c Core) GetObject(ctx context.Context, bucketName, objectName string, opts GetObjectOptions) (io.ReadCloser, ObjectInfo, http.Header, error) {
149 return c.getObject(ctx, bucketName, objectName, opts)
150}
diff --git a/vendor/github.com/minio/minio-go/v7/functional_tests.go b/vendor/github.com/minio/minio-go/v7/functional_tests.go
new file mode 100644
index 0000000..f951cd0
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/functional_tests.go
@@ -0,0 +1,13004 @@
1//go:build mint
2// +build mint
3
4/*
5 * MinIO Go Library for Amazon S3 Compatible Cloud Storage
6 * Copyright 2015-2020 MinIO, Inc.
7 *
8 * Licensed under the Apache License, Version 2.0 (the "License");
9 * you may not use this file except in compliance with the License.
10 * You may obtain a copy of the License at
11 *
12 * http://www.apache.org/licenses/LICENSE-2.0
13 *
14 * Unless required by applicable law or agreed to in writing, software
15 * distributed under the License is distributed on an "AS IS" BASIS,
16 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
17 * See the License for the specific language governing permissions and
18 * limitations under the License.
19 */
20
21package main
22
23import (
24 "archive/zip"
25 "bytes"
26 "context"
27 "crypto/sha1"
28 "encoding/base64"
29 "errors"
30 "fmt"
31 "hash"
32 "hash/crc32"
33 "io"
34 "math/rand"
35 "mime/multipart"
36 "net/http"
37 "net/url"
38 "os"
39 "path"
40 "path/filepath"
41 "reflect"
42 "runtime"
43 "sort"
44 "strconv"
45 "strings"
46 "sync"
47 "time"
48
49 "github.com/dustin/go-humanize"
50 jsoniter "github.com/json-iterator/go"
51 "github.com/minio/sha256-simd"
52 log "github.com/sirupsen/logrus"
53
54 "github.com/minio/minio-go/v7"
55 "github.com/minio/minio-go/v7/pkg/credentials"
56 "github.com/minio/minio-go/v7/pkg/encrypt"
57 "github.com/minio/minio-go/v7/pkg/notification"
58 "github.com/minio/minio-go/v7/pkg/tags"
59)
60
61const letterBytes = "abcdefghijklmnopqrstuvwxyz01234569"
62const (
63 letterIdxBits = 6 // 6 bits to represent a letter index
64 letterIdxMask = 1<<letterIdxBits - 1 // All 1-bits, as many as letterIdxBits
65 letterIdxMax = 63 / letterIdxBits // # of letter indices fitting in 63 bits
66)
67
68const (
69 serverEndpoint = "SERVER_ENDPOINT"
70 accessKey = "ACCESS_KEY"
71 secretKey = "SECRET_KEY"
72 enableHTTPS = "ENABLE_HTTPS"
73 enableKMS = "ENABLE_KMS"
74)
75
76type mintJSONFormatter struct{}
77
78func (f *mintJSONFormatter) Format(entry *log.Entry) ([]byte, error) {
79 data := make(log.Fields, len(entry.Data))
80 for k, v := range entry.Data {
81 switch v := v.(type) {
82 case error:
83 // Otherwise errors are ignored by `encoding/json`
84 // https://github.com/sirupsen/logrus/issues/137
85 data[k] = v.Error()
86 default:
87 data[k] = v
88 }
89 }
90 json := jsoniter.ConfigCompatibleWithStandardLibrary
91 serialized, err := json.Marshal(data)
92 if err != nil {
93 return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err)
94 }
95 return append(serialized, '\n'), nil
96}
97
98var readFull = func(r io.Reader, buf []byte) (n int, err error) {
99 // ReadFull reads exactly len(buf) bytes from r into buf.
100 // It returns the number of bytes copied and an error if
101 // fewer bytes were read. The error is EOF only if no bytes
102 // were read. If an EOF happens after reading some but not
103 // all the bytes, ReadFull returns ErrUnexpectedEOF.
104 // On return, n == len(buf) if and only if err == nil.
105 // If r returns an error having read at least len(buf) bytes,
106 // the error is dropped.
107 for n < len(buf) && err == nil {
108 var nn int
109 nn, err = r.Read(buf[n:])
110 // Some spurious io.Reader's return
111 // io.ErrUnexpectedEOF when nn == 0
112 // this behavior is undocumented
113 // so we are on purpose not using io.ReadFull
114 // implementation because this can lead
115 // to custom handling, to avoid that
116 // we simply modify the original io.ReadFull
117 // implementation to avoid this issue.
118 // io.ErrUnexpectedEOF with nn == 0 really
119 // means that io.EOF
120 if err == io.ErrUnexpectedEOF && nn == 0 {
121 err = io.EOF
122 }
123 n += nn
124 }
125 if n >= len(buf) {
126 err = nil
127 } else if n > 0 && err == io.EOF {
128 err = io.ErrUnexpectedEOF
129 }
130 return
131}
132
133func cleanEmptyEntries(fields log.Fields) log.Fields {
134 cleanFields := log.Fields{}
135 for k, v := range fields {
136 if v != "" {
137 cleanFields[k] = v
138 }
139 }
140 return cleanFields
141}
142
143// log successful test runs
144func successLogger(testName, function string, args map[string]interface{}, startTime time.Time) *log.Entry {
145 // calculate the test case duration
146 duration := time.Since(startTime)
147 // log with the fields as per mint
148 fields := log.Fields{"name": "minio-go: " + testName, "function": function, "args": args, "duration": duration.Nanoseconds() / 1000000, "status": "PASS"}
149 return log.WithFields(cleanEmptyEntries(fields))
150}
151
152// As few of the features are not available in Gateway(s) currently, Check if err value is NotImplemented,
153// and log as NA in that case and continue execution. Otherwise log as failure and return
154func logError(testName, function string, args map[string]interface{}, startTime time.Time, alert, message string, err error) {
155 // If server returns NotImplemented we assume it is gateway mode and hence log it as info and move on to next tests
156 // Special case for ComposeObject API as it is implemented on client side and adds specific error details like `Error in upload-part-copy` in
157 // addition to NotImplemented error returned from server
158 if isErrNotImplemented(err) {
159 ignoredLog(testName, function, args, startTime, message).Info()
160 } else if isRunOnFail() {
161 failureLog(testName, function, args, startTime, alert, message, err).Error()
162 } else {
163 failureLog(testName, function, args, startTime, alert, message, err).Fatal()
164 }
165}
166
167// log failed test runs
168func failureLog(testName, function string, args map[string]interface{}, startTime time.Time, alert, message string, err error) *log.Entry {
169 // calculate the test case duration
170 duration := time.Since(startTime)
171 var fields log.Fields
172 // log with the fields as per mint
173 if err != nil {
174 fields = log.Fields{
175 "name": "minio-go: " + testName, "function": function, "args": args,
176 "duration": duration.Nanoseconds() / 1000000, "status": "FAIL", "alert": alert, "message": message, "error": err,
177 }
178 } else {
179 fields = log.Fields{
180 "name": "minio-go: " + testName, "function": function, "args": args,
181 "duration": duration.Nanoseconds() / 1000000, "status": "FAIL", "alert": alert, "message": message,
182 }
183 }
184 return log.WithFields(cleanEmptyEntries(fields))
185}
186
187// log not applicable test runs
188func ignoredLog(testName, function string, args map[string]interface{}, startTime time.Time, alert string) *log.Entry {
189 // calculate the test case duration
190 duration := time.Since(startTime)
191 // log with the fields as per mint
192 fields := log.Fields{
193 "name": "minio-go: " + testName, "function": function, "args": args,
194 "duration": duration.Nanoseconds() / 1000000, "status": "NA", "alert": strings.Split(alert, " ")[0] + " is NotImplemented",
195 }
196 return log.WithFields(cleanEmptyEntries(fields))
197}
198
199// Delete objects in given bucket, recursively
200func cleanupBucket(bucketName string, c *minio.Client) error {
201 // Create a done channel to control 'ListObjectsV2' go routine.
202 doneCh := make(chan struct{})
203 // Exit cleanly upon return.
204 defer close(doneCh)
205 // Iterate over all objects in the bucket via listObjectsV2 and delete
206 for objCh := range c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{Recursive: true}) {
207 if objCh.Err != nil {
208 return objCh.Err
209 }
210 if objCh.Key != "" {
211 err := c.RemoveObject(context.Background(), bucketName, objCh.Key, minio.RemoveObjectOptions{})
212 if err != nil {
213 return err
214 }
215 }
216 }
217 for objPartInfo := range c.ListIncompleteUploads(context.Background(), bucketName, "", true) {
218 if objPartInfo.Err != nil {
219 return objPartInfo.Err
220 }
221 if objPartInfo.Key != "" {
222 err := c.RemoveIncompleteUpload(context.Background(), bucketName, objPartInfo.Key)
223 if err != nil {
224 return err
225 }
226 }
227 }
228 // objects are already deleted, clear the buckets now
229 err := c.RemoveBucket(context.Background(), bucketName)
230 if err != nil {
231 return err
232 }
233 return err
234}
235
236func cleanupVersionedBucket(bucketName string, c *minio.Client) error {
237 doneCh := make(chan struct{})
238 defer close(doneCh)
239 for obj := range c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{WithVersions: true, Recursive: true}) {
240 if obj.Err != nil {
241 return obj.Err
242 }
243 if obj.Key != "" {
244 err := c.RemoveObject(context.Background(), bucketName, obj.Key,
245 minio.RemoveObjectOptions{VersionID: obj.VersionID, GovernanceBypass: true})
246 if err != nil {
247 return err
248 }
249 }
250 }
251 for objPartInfo := range c.ListIncompleteUploads(context.Background(), bucketName, "", true) {
252 if objPartInfo.Err != nil {
253 return objPartInfo.Err
254 }
255 if objPartInfo.Key != "" {
256 err := c.RemoveIncompleteUpload(context.Background(), bucketName, objPartInfo.Key)
257 if err != nil {
258 return err
259 }
260 }
261 }
262 // objects are already deleted, clear the buckets now
263 err := c.RemoveBucket(context.Background(), bucketName)
264 if err != nil {
265 for obj := range c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{WithVersions: true, Recursive: true}) {
266 log.Println("found", obj.Key, obj.VersionID)
267 }
268 return err
269 }
270 return err
271}
272
273func isErrNotImplemented(err error) bool {
274 return minio.ToErrorResponse(err).Code == "NotImplemented"
275}
276
277func isRunOnFail() bool {
278 return os.Getenv("RUN_ON_FAIL") == "1"
279}
280
281func init() {
282 // If server endpoint is not set, all tests default to
283 // using https://play.min.io
284 if os.Getenv(serverEndpoint) == "" {
285 os.Setenv(serverEndpoint, "play.min.io")
286 os.Setenv(accessKey, "Q3AM3UQ867SPQQA43P2F")
287 os.Setenv(secretKey, "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG")
288 os.Setenv(enableHTTPS, "1")
289 }
290}
291
292var mintDataDir = os.Getenv("MINT_DATA_DIR")
293
294func getMintDataDirFilePath(filename string) (fp string) {
295 if mintDataDir == "" {
296 return
297 }
298 return filepath.Join(mintDataDir, filename)
299}
300
301func newRandomReader(seed, size int64) io.Reader {
302 return io.LimitReader(rand.New(rand.NewSource(seed)), size)
303}
304
305func mustCrcReader(r io.Reader) uint32 {
306 crc := crc32.NewIEEE()
307 _, err := io.Copy(crc, r)
308 if err != nil {
309 panic(err)
310 }
311 return crc.Sum32()
312}
313
314func crcMatches(r io.Reader, want uint32) error {
315 crc := crc32.NewIEEE()
316 _, err := io.Copy(crc, r)
317 if err != nil {
318 panic(err)
319 }
320 got := crc.Sum32()
321 if got != want {
322 return fmt.Errorf("crc mismatch, want %x, got %x", want, got)
323 }
324 return nil
325}
326
327func crcMatchesName(r io.Reader, name string) error {
328 want := dataFileCRC32[name]
329 crc := crc32.NewIEEE()
330 _, err := io.Copy(crc, r)
331 if err != nil {
332 panic(err)
333 }
334 got := crc.Sum32()
335 if got != want {
336 return fmt.Errorf("crc mismatch, want %x, got %x", want, got)
337 }
338 return nil
339}
340
341// read data from file if it exists or optionally create a buffer of particular size
342func getDataReader(fileName string) io.ReadCloser {
343 if mintDataDir == "" {
344 size := int64(dataFileMap[fileName])
345 if _, ok := dataFileCRC32[fileName]; !ok {
346 dataFileCRC32[fileName] = mustCrcReader(newRandomReader(size, size))
347 }
348 return io.NopCloser(newRandomReader(size, size))
349 }
350 reader, _ := os.Open(getMintDataDirFilePath(fileName))
351 if _, ok := dataFileCRC32[fileName]; !ok {
352 dataFileCRC32[fileName] = mustCrcReader(reader)
353 reader.Close()
354 reader, _ = os.Open(getMintDataDirFilePath(fileName))
355 }
356 return reader
357}
358
359// randString generates random names and prepends them with a known prefix.
360func randString(n int, src rand.Source, prefix string) string {
361 b := make([]byte, n)
362 // A rand.Int63() generates 63 random bits, enough for letterIdxMax letters!
363 for i, cache, remain := n-1, src.Int63(), letterIdxMax; i >= 0; {
364 if remain == 0 {
365 cache, remain = src.Int63(), letterIdxMax
366 }
367 if idx := int(cache & letterIdxMask); idx < len(letterBytes) {
368 b[i] = letterBytes[idx]
369 i--
370 }
371 cache >>= letterIdxBits
372 remain--
373 }
374 return prefix + string(b[0:30-len(prefix)])
375}
376
377var dataFileMap = map[string]int{
378 "datafile-0-b": 0,
379 "datafile-1-b": 1,
380 "datafile-1-kB": 1 * humanize.KiByte,
381 "datafile-10-kB": 10 * humanize.KiByte,
382 "datafile-33-kB": 33 * humanize.KiByte,
383 "datafile-100-kB": 100 * humanize.KiByte,
384 "datafile-1.03-MB": 1056 * humanize.KiByte,
385 "datafile-1-MB": 1 * humanize.MiByte,
386 "datafile-5-MB": 5 * humanize.MiByte,
387 "datafile-6-MB": 6 * humanize.MiByte,
388 "datafile-11-MB": 11 * humanize.MiByte,
389 "datafile-65-MB": 65 * humanize.MiByte,
390 "datafile-129-MB": 129 * humanize.MiByte,
391}
392
393var dataFileCRC32 = map[string]uint32{}
394
395func isFullMode() bool {
396 return os.Getenv("MINT_MODE") == "full"
397}
398
399func getFuncName() string {
400 return getFuncNameLoc(2)
401}
402
403func getFuncNameLoc(caller int) string {
404 pc, _, _, _ := runtime.Caller(caller)
405 return strings.TrimPrefix(runtime.FuncForPC(pc).Name(), "main.")
406}
407
408// Tests bucket re-create errors.
409func testMakeBucketError() {
410 region := "eu-central-1"
411
412 // initialize logging params
413 startTime := time.Now()
414 testName := getFuncName()
415 function := "MakeBucket(bucketName, region)"
416 // initialize logging params
417 args := map[string]interface{}{
418 "bucketName": "",
419 "region": region,
420 }
421
422 // Seed random based on current time.
423 rand.Seed(time.Now().Unix())
424
425 // Instantiate new minio client object.
426 c, err := minio.New(os.Getenv(serverEndpoint),
427 &minio.Options{
428 Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
429 Secure: mustParseBool(os.Getenv(enableHTTPS)),
430 })
431 if err != nil {
432 logError(testName, function, args, startTime, "", "MinIO client creation failed", err)
433 return
434 }
435
436 // Enable tracing, write to stderr.
437 // c.TraceOn(os.Stderr)
438
439 // Set user agent.
440 c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
441
442 // Generate a new random bucket name.
443 bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
444 args["bucketName"] = bucketName
445
446 // Make a new bucket in 'eu-central-1'.
447 if err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: region}); err != nil {
448 logError(testName, function, args, startTime, "", "MakeBucket Failed", err)
449 return
450 }
451 defer cleanupBucket(bucketName, c)
452
453 if err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: region}); err == nil {
454 logError(testName, function, args, startTime, "", "Bucket already exists", err)
455 return
456 }
457 // Verify valid error response from server.
458 if minio.ToErrorResponse(err).Code != "BucketAlreadyExists" &&
459 minio.ToErrorResponse(err).Code != "BucketAlreadyOwnedByYou" {
460 logError(testName, function, args, startTime, "", "Invalid error returned by server", err)
461 return
462 }
463
464 successLogger(testName, function, args, startTime).Info()
465}
466
467func testMetadataSizeLimit() {
468 startTime := time.Now()
469 testName := getFuncName()
470 function := "PutObject(bucketName, objectName, reader, objectSize, opts)"
471 args := map[string]interface{}{
472 "bucketName": "",
473 "objectName": "",
474 "opts.UserMetadata": "",
475 }
476 rand.Seed(startTime.Unix())
477
478 // Instantiate new minio client object.
479 c, err := minio.New(os.Getenv(serverEndpoint),
480 &minio.Options{
481 Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
482 Secure: mustParseBool(os.Getenv(enableHTTPS)),
483 })
484 if err != nil {
485 logError(testName, function, args, startTime, "", "MinIO client creation failed", err)
486 return
487 }
488 c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
489
490 bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
491 args["bucketName"] = bucketName
492
493 objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
494 args["objectName"] = objectName
495
496 err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"})
497 if err != nil {
498 logError(testName, function, args, startTime, "", "Make bucket failed", err)
499 return
500 }
501
502 defer cleanupBucket(bucketName, c)
503
504 const HeaderSizeLimit = 8 * 1024
505 const UserMetadataLimit = 2 * 1024
506
507 // Meta-data greater than the 2 KB limit of AWS - PUT calls with this meta-data should fail
508 metadata := make(map[string]string)
509 metadata["X-Amz-Meta-Mint-Test"] = string(bytes.Repeat([]byte("m"), 1+UserMetadataLimit-len("X-Amz-Meta-Mint-Test")))
510 args["metadata"] = fmt.Sprint(metadata)
511
512 _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(nil), 0, minio.PutObjectOptions{UserMetadata: metadata})
513 if err == nil {
514 logError(testName, function, args, startTime, "", "Created object with user-defined metadata exceeding metadata size limits", nil)
515 return
516 }
517
518 // Meta-data (headers) greater than the 8 KB limit of AWS - PUT calls with this meta-data should fail
519 metadata = make(map[string]string)
520 metadata["X-Amz-Mint-Test"] = string(bytes.Repeat([]byte("m"), 1+HeaderSizeLimit-len("X-Amz-Mint-Test")))
521 args["metadata"] = fmt.Sprint(metadata)
522 _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(nil), 0, minio.PutObjectOptions{UserMetadata: metadata})
523 if err == nil {
524 logError(testName, function, args, startTime, "", "Created object with headers exceeding header size limits", nil)
525 return
526 }
527
528 successLogger(testName, function, args, startTime).Info()
529}
530
531// Tests various bucket supported formats.
532func testMakeBucketRegions() {
533 region := "eu-central-1"
534 // initialize logging params
535 startTime := time.Now()
536 testName := getFuncName()
537 function := "MakeBucket(bucketName, region)"
538 // initialize logging params
539 args := map[string]interface{}{
540 "bucketName": "",
541 "region": region,
542 }
543
544 // Seed random based on current time.
545 rand.Seed(time.Now().Unix())
546
547 // Instantiate new minio client object.
548 c, err := minio.New(os.Getenv(serverEndpoint),
549 &minio.Options{
550 Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
551 Secure: mustParseBool(os.Getenv(enableHTTPS)),
552 })
553 if err != nil {
554 logError(testName, function, args, startTime, "", "MinIO client creation failed", err)
555 return
556 }
557
558 // Enable tracing, write to stderr.
559 // c.TraceOn(os.Stderr)
560
561 // Set user agent.
562 c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
563
564 // Generate a new random bucket name.
565 bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
566 args["bucketName"] = bucketName
567
568 // Make a new bucket in 'eu-central-1'.
569 if err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: region}); err != nil {
570 logError(testName, function, args, startTime, "", "MakeBucket failed", err)
571 return
572 }
573
574 // Delete all objects and buckets
575 if err = cleanupBucket(bucketName, c); err != nil {
576 logError(testName, function, args, startTime, "", "CleanupBucket failed", err)
577 return
578 }
579
580 // Make a new bucket with '.' in its name, in 'us-west-2'. This
581 // request is internally staged into a path style instead of
582 // virtual host style.
583 region = "us-west-2"
584 args["region"] = region
585 if err = c.MakeBucket(context.Background(), bucketName+".withperiod", minio.MakeBucketOptions{Region: region}); err != nil {
586 logError(testName, function, args, startTime, "", "MakeBucket failed", err)
587 return
588 }
589
590 // Delete all objects and buckets
591 if err = cleanupBucket(bucketName+".withperiod", c); err != nil {
592 logError(testName, function, args, startTime, "", "CleanupBucket failed", err)
593 return
594 }
595 successLogger(testName, function, args, startTime).Info()
596}
597
598// Test PutObject using a large data to trigger multipart readat
599func testPutObjectReadAt() {
600 // initialize logging params
601 startTime := time.Now()
602 testName := getFuncName()
603 function := "PutObject(bucketName, objectName, reader, opts)"
604 args := map[string]interface{}{
605 "bucketName": "",
606 "objectName": "",
607 "opts": "objectContentType",
608 }
609
610 // Seed random based on current time.
611 rand.Seed(time.Now().Unix())
612
613 // Instantiate new minio client object.
614 c, err := minio.New(os.Getenv(serverEndpoint),
615 &minio.Options{
616 Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
617 Secure: mustParseBool(os.Getenv(enableHTTPS)),
618 })
619 if err != nil {
620 logError(testName, function, args, startTime, "", "MinIO client object creation failed", err)
621 return
622 }
623
624 // Enable tracing, write to stderr.
625 // c.TraceOn(os.Stderr)
626
627 // Set user agent.
628 c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
629
630 // Generate a new random bucket name.
631 bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
632 args["bucketName"] = bucketName
633
634 // Make a new bucket.
635 err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"})
636 if err != nil {
637 logError(testName, function, args, startTime, "", "Make bucket failed", err)
638 return
639 }
640
641 defer cleanupBucket(bucketName, c)
642
643 bufSize := dataFileMap["datafile-129-MB"]
644 reader := getDataReader("datafile-129-MB")
645 defer reader.Close()
646
647 // Save the data
648 objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
649 args["objectName"] = objectName
650
651 // Object content type
652 objectContentType := "binary/octet-stream"
653 args["objectContentType"] = objectContentType
654
655 _, err = c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: objectContentType})
656 if err != nil {
657 logError(testName, function, args, startTime, "", "PutObject failed", err)
658 return
659 }
660
661 // Read the data back
662 r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{})
663 if err != nil {
664 logError(testName, function, args, startTime, "", "Get Object failed", err)
665 return
666 }
667
668 st, err := r.Stat()
669 if err != nil {
670 logError(testName, function, args, startTime, "", "Stat Object failed", err)
671 return
672 }
673 if st.Size != int64(bufSize) {
674 logError(testName, function, args, startTime, "", fmt.Sprintf("Number of bytes in stat does not match, expected %d got %d", bufSize, st.Size), err)
675 return
676 }
677 if st.ContentType != objectContentType && st.ContentType != "application/octet-stream" {
678 logError(testName, function, args, startTime, "", "Content types don't match", err)
679 return
680 }
681 if err := crcMatchesName(r, "datafile-129-MB"); err != nil {
682 logError(testName, function, args, startTime, "", "data CRC check failed", err)
683 return
684 }
685 if err := r.Close(); err != nil {
686 logError(testName, function, args, startTime, "", "Object Close failed", err)
687 return
688 }
689 if err := r.Close(); err == nil {
690 logError(testName, function, args, startTime, "", "Object is already closed, didn't return error on Close", err)
691 return
692 }
693
694 successLogger(testName, function, args, startTime).Info()
695}
696
697func testListObjectVersions() {
698 // initialize logging params
699 startTime := time.Now()
700 testName := getFuncName()
701 function := "ListObjectVersions(bucketName, prefix, recursive)"
702 args := map[string]interface{}{
703 "bucketName": "",
704 "prefix": "",
705 "recursive": "",
706 }
707
708 // Seed random based on current time.
709 rand.Seed(time.Now().Unix())
710
711 // Instantiate new minio client object.
712 c, err := minio.New(os.Getenv(serverEndpoint),
713 &minio.Options{
714 Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
715 Secure: mustParseBool(os.Getenv(enableHTTPS)),
716 })
717 if err != nil {
718 logError(testName, function, args, startTime, "", "MinIO client object creation failed", err)
719 return
720 }
721
722 // Enable tracing, write to stderr.
723 // c.TraceOn(os.Stderr)
724
725 // Set user agent.
726 c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
727
728 // Generate a new random bucket name.
729 bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
730 args["bucketName"] = bucketName
731
732 // Make a new bucket.
733 err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1", ObjectLocking: true})
734 if err != nil {
735 logError(testName, function, args, startTime, "", "Make bucket failed", err)
736 return
737 }
738
739 err = c.EnableVersioning(context.Background(), bucketName)
740 if err != nil {
741 logError(testName, function, args, startTime, "", "Enable versioning failed", err)
742 return
743 }
744
745 // Save the data
746 objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
747 args["objectName"] = objectName
748
749 bufSize := dataFileMap["datafile-10-kB"]
750 reader := getDataReader("datafile-10-kB")
751
752 _, err = c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{})
753 if err != nil {
754 logError(testName, function, args, startTime, "", "PutObject failed", err)
755 return
756 }
757 reader.Close()
758
759 bufSize = dataFileMap["datafile-1-b"]
760 reader = getDataReader("datafile-1-b")
761 _, err = c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{})
762 if err != nil {
763 logError(testName, function, args, startTime, "", "PutObject failed", err)
764 return
765 }
766 reader.Close()
767
768 err = c.RemoveObject(context.Background(), bucketName, objectName, minio.RemoveObjectOptions{})
769 if err != nil {
770 logError(testName, function, args, startTime, "", "Unexpected object deletion", err)
771 return
772 }
773
774 var deleteMarkers, versions int
775
776 objectsInfo := c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{WithVersions: true, Recursive: true})
777 for info := range objectsInfo {
778 if info.Err != nil {
779 logError(testName, function, args, startTime, "", "Unexpected error during listing objects", err)
780 return
781 }
782 if info.Key != objectName {
783 logError(testName, function, args, startTime, "", "Unexpected object name in listing objects", nil)
784 return
785 }
786 if info.VersionID == "" {
787 logError(testName, function, args, startTime, "", "Unexpected version id in listing objects", nil)
788 return
789 }
790 if info.IsDeleteMarker {
791 deleteMarkers++
792 if !info.IsLatest {
793 logError(testName, function, args, startTime, "", "Unexpected IsLatest field in listing objects", nil)
794 return
795 }
796 } else {
797 versions++
798 }
799 }
800
801 if deleteMarkers != 1 {
802 logError(testName, function, args, startTime, "", "Unexpected number of DeleteMarker elements in listing objects", nil)
803 return
804 }
805
806 if versions != 2 {
807 logError(testName, function, args, startTime, "", "Unexpected number of Version elements in listing objects", nil)
808 return
809 }
810
811 // Delete all objects and their versions as long as the bucket itself
812 if err = cleanupVersionedBucket(bucketName, c); err != nil {
813 logError(testName, function, args, startTime, "", "CleanupBucket failed", err)
814 return
815 }
816
817 successLogger(testName, function, args, startTime).Info()
818}
819
820func testStatObjectWithVersioning() {
821 // initialize logging params
822 startTime := time.Now()
823 testName := getFuncName()
824 function := "StatObject"
825 args := map[string]interface{}{}
826
827 // Seed random based on current time.
828 rand.Seed(time.Now().Unix())
829
830 // Instantiate new minio client object.
831 c, err := minio.New(os.Getenv(serverEndpoint),
832 &minio.Options{
833 Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
834 Secure: mustParseBool(os.Getenv(enableHTTPS)),
835 })
836 if err != nil {
837 logError(testName, function, args, startTime, "", "MinIO client object creation failed", err)
838 return
839 }
840
841 // Enable tracing, write to stderr.
842 // c.TraceOn(os.Stderr)
843
844 // Set user agent.
845 c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
846
847 // Generate a new random bucket name.
848 bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
849 args["bucketName"] = bucketName
850
851 // Make a new bucket.
852 err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1", ObjectLocking: true})
853 if err != nil {
854 logError(testName, function, args, startTime, "", "Make bucket failed", err)
855 return
856 }
857
858 err = c.EnableVersioning(context.Background(), bucketName)
859 if err != nil {
860 logError(testName, function, args, startTime, "", "Enable versioning failed", err)
861 return
862 }
863
864 // Save the data
865 objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
866 args["objectName"] = objectName
867
868 bufSize := dataFileMap["datafile-10-kB"]
869 reader := getDataReader("datafile-10-kB")
870
871 _, err = c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{})
872 if err != nil {
873 logError(testName, function, args, startTime, "", "PutObject failed", err)
874 return
875 }
876 reader.Close()
877
878 bufSize = dataFileMap["datafile-1-b"]
879 reader = getDataReader("datafile-1-b")
880 _, err = c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{})
881 if err != nil {
882 logError(testName, function, args, startTime, "", "PutObject failed", err)
883 return
884 }
885 reader.Close()
886
887 objectsInfo := c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{WithVersions: true, Recursive: true})
888
889 var results []minio.ObjectInfo
890 for info := range objectsInfo {
891 if info.Err != nil {
892 logError(testName, function, args, startTime, "", "Unexpected error during listing objects", err)
893 return
894 }
895 results = append(results, info)
896 }
897
898 if len(results) != 2 {
899 logError(testName, function, args, startTime, "", "Unexpected number of Version elements in listing objects", nil)
900 return
901 }
902
903 for i := 0; i < len(results); i++ {
904 opts := minio.StatObjectOptions{VersionID: results[i].VersionID}
905 statInfo, err := c.StatObject(context.Background(), bucketName, objectName, opts)
906 if err != nil {
907 logError(testName, function, args, startTime, "", "error during HEAD object", err)
908 return
909 }
910 if statInfo.VersionID == "" || statInfo.VersionID != results[i].VersionID {
911 logError(testName, function, args, startTime, "", "error during HEAD object, unexpected version id", err)
912 return
913 }
914 if statInfo.ETag != results[i].ETag {
915 logError(testName, function, args, startTime, "", "error during HEAD object, unexpected ETag", err)
916 return
917 }
918 if statInfo.LastModified.Unix() != results[i].LastModified.Unix() {
919 logError(testName, function, args, startTime, "", "error during HEAD object, unexpected Last-Modified", err)
920 return
921 }
922 if statInfo.Size != results[i].Size {
923 logError(testName, function, args, startTime, "", "error during HEAD object, unexpected Content-Length", err)
924 return
925 }
926 }
927
928 // Delete all objects and their versions as long as the bucket itself
929 if err = cleanupVersionedBucket(bucketName, c); err != nil {
930 logError(testName, function, args, startTime, "", "CleanupBucket failed", err)
931 return
932 }
933
934 successLogger(testName, function, args, startTime).Info()
935}
936
937func testGetObjectWithVersioning() {
938 // initialize logging params
939 startTime := time.Now()
940 testName := getFuncName()
941 function := "GetObject()"
942 args := map[string]interface{}{}
943
944 // Seed random based on current time.
945 rand.Seed(time.Now().Unix())
946
947 // Instantiate new minio client object.
948 c, err := minio.New(os.Getenv(serverEndpoint),
949 &minio.Options{
950 Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
951 Secure: mustParseBool(os.Getenv(enableHTTPS)),
952 })
953 if err != nil {
954 logError(testName, function, args, startTime, "", "MinIO client object creation failed", err)
955 return
956 }
957
958 // Enable tracing, write to stderr.
959 // c.TraceOn(os.Stderr)
960
961 // Set user agent.
962 c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
963
964 // Generate a new random bucket name.
965 bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
966 args["bucketName"] = bucketName
967
968 // Make a new bucket.
969 err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1", ObjectLocking: true})
970 if err != nil {
971 logError(testName, function, args, startTime, "", "Make bucket failed", err)
972 return
973 }
974
975 err = c.EnableVersioning(context.Background(), bucketName)
976 if err != nil {
977 logError(testName, function, args, startTime, "", "Enable versioning failed", err)
978 return
979 }
980
981 // Save the data
982 objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
983 args["objectName"] = objectName
984
985 // Save the contents of datafiles to check with GetObject() reader output later
986 var buffers [][]byte
987 testFiles := []string{"datafile-1-b", "datafile-10-kB"}
988
989 for _, testFile := range testFiles {
990 r := getDataReader(testFile)
991 buf, err := io.ReadAll(r)
992 if err != nil {
993 logError(testName, function, args, startTime, "", "unexpected failure", err)
994 return
995 }
996 r.Close()
997 _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{})
998 if err != nil {
999 logError(testName, function, args, startTime, "", "PutObject failed", err)
1000 return
1001 }
1002 buffers = append(buffers, buf)
1003 }
1004
1005 objectsInfo := c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{WithVersions: true, Recursive: true})
1006
1007 var results []minio.ObjectInfo
1008 for info := range objectsInfo {
1009 if info.Err != nil {
1010 logError(testName, function, args, startTime, "", "Unexpected error during listing objects", err)
1011 return
1012 }
1013 results = append(results, info)
1014 }
1015
1016 if len(results) != 2 {
1017 logError(testName, function, args, startTime, "", "Unexpected number of Version elements in listing objects", nil)
1018 return
1019 }
1020
1021 sort.SliceStable(results, func(i, j int) bool {
1022 return results[i].Size < results[j].Size
1023 })
1024
1025 sort.SliceStable(buffers, func(i, j int) bool {
1026 return len(buffers[i]) < len(buffers[j])
1027 })
1028
1029 for i := 0; i < len(results); i++ {
1030 opts := minio.GetObjectOptions{VersionID: results[i].VersionID}
1031 reader, err := c.GetObject(context.Background(), bucketName, objectName, opts)
1032 if err != nil {
1033 logError(testName, function, args, startTime, "", "error during GET object", err)
1034 return
1035 }
1036 statInfo, err := reader.Stat()
1037 if err != nil {
1038 logError(testName, function, args, startTime, "", "error during calling reader.Stat()", err)
1039 return
1040 }
1041 if statInfo.ETag != results[i].ETag {
1042 logError(testName, function, args, startTime, "", "error during HEAD object, unexpected ETag", err)
1043 return
1044 }
1045 if statInfo.LastModified.Unix() != results[i].LastModified.Unix() {
1046 logError(testName, function, args, startTime, "", "error during HEAD object, unexpected Last-Modified", err)
1047 return
1048 }
1049 if statInfo.Size != results[i].Size {
1050 logError(testName, function, args, startTime, "", "error during HEAD object, unexpected Content-Length", err)
1051 return
1052 }
1053
1054 tmpBuffer := bytes.NewBuffer([]byte{})
1055 _, err = io.Copy(tmpBuffer, reader)
1056 if err != nil {
1057 logError(testName, function, args, startTime, "", "unexpected io.Copy()", err)
1058 return
1059 }
1060
1061 if !bytes.Equal(tmpBuffer.Bytes(), buffers[i]) {
1062 logError(testName, function, args, startTime, "", "unexpected content of GetObject()", err)
1063 return
1064 }
1065 }
1066
1067 // Delete all objects and their versions as long as the bucket itself
1068 if err = cleanupVersionedBucket(bucketName, c); err != nil {
1069 logError(testName, function, args, startTime, "", "CleanupBucket failed", err)
1070 return
1071 }
1072
1073 successLogger(testName, function, args, startTime).Info()
1074}
1075
1076func testPutObjectWithVersioning() {
1077 // initialize logging params
1078 startTime := time.Now()
1079 testName := getFuncName()
1080 function := "GetObject()"
1081 args := map[string]interface{}{}
1082
1083 // Seed random based on current time.
1084 rand.Seed(time.Now().Unix())
1085
1086 // Instantiate new minio client object.
1087 c, err := minio.New(os.Getenv(serverEndpoint),
1088 &minio.Options{
1089 Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
1090 Secure: mustParseBool(os.Getenv(enableHTTPS)),
1091 })
1092 if err != nil {
1093 logError(testName, function, args, startTime, "", "MinIO client object creation failed", err)
1094 return
1095 }
1096
1097 // Enable tracing, write to stderr.
1098 // c.TraceOn(os.Stderr)
1099
1100 // Set user agent.
1101 c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
1102
1103 // Generate a new random bucket name.
1104 bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
1105 args["bucketName"] = bucketName
1106
1107 // Make a new bucket.
1108 err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1", ObjectLocking: true})
1109 if err != nil {
1110 logError(testName, function, args, startTime, "", "Make bucket failed", err)
1111 return
1112 }
1113
1114 err = c.EnableVersioning(context.Background(), bucketName)
1115 if err != nil {
1116 logError(testName, function, args, startTime, "", "Enable versioning failed", err)
1117 return
1118 }
1119
1120 objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
1121 args["objectName"] = objectName
1122
1123 const n = 10
1124 // Read input...
1125
1126 // Save the data concurrently.
1127 var wg sync.WaitGroup
1128 wg.Add(n)
1129 buffers := make([][]byte, n)
1130 var errs [n]error
1131 for i := 0; i < n; i++ {
1132 r := newRandomReader(int64((1<<20)*i+i), int64(i))
1133 buf, err := io.ReadAll(r)
1134 if err != nil {
1135 logError(testName, function, args, startTime, "", "unexpected failure", err)
1136 return
1137 }
1138 buffers[i] = buf
1139
1140 go func(i int) {
1141 defer wg.Done()
1142 _, errs[i] = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{PartSize: 5 << 20})
1143 }(i)
1144 }
1145 wg.Wait()
1146 for _, err := range errs {
1147 if err != nil {
1148 logError(testName, function, args, startTime, "", "PutObject failed", err)
1149 return
1150 }
1151 }
1152
1153 objectsInfo := c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{WithVersions: true, Recursive: true})
1154 var results []minio.ObjectInfo
1155 for info := range objectsInfo {
1156 if info.Err != nil {
1157 logError(testName, function, args, startTime, "", "Unexpected error during listing objects", err)
1158 return
1159 }
1160 results = append(results, info)
1161 }
1162
1163 if len(results) != n {
1164 logError(testName, function, args, startTime, "", "Unexpected number of Version elements in listing objects", nil)
1165 return
1166 }
1167
1168 sort.Slice(results, func(i, j int) bool {
1169 return results[i].Size < results[j].Size
1170 })
1171
1172 sort.Slice(buffers, func(i, j int) bool {
1173 return len(buffers[i]) < len(buffers[j])
1174 })
1175
1176 for i := 0; i < len(results); i++ {
1177 opts := minio.GetObjectOptions{VersionID: results[i].VersionID}
1178 reader, err := c.GetObject(context.Background(), bucketName, objectName, opts)
1179 if err != nil {
1180 logError(testName, function, args, startTime, "", "error during GET object", err)
1181 return
1182 }
1183 statInfo, err := reader.Stat()
1184 if err != nil {
1185 logError(testName, function, args, startTime, "", "error during calling reader.Stat()", err)
1186 return
1187 }
1188 if statInfo.ETag != results[i].ETag {
1189 logError(testName, function, args, startTime, "", "error during HEAD object, unexpected ETag", err)
1190 return
1191 }
1192 if statInfo.LastModified.Unix() != results[i].LastModified.Unix() {
1193 logError(testName, function, args, startTime, "", "error during HEAD object, unexpected Last-Modified", err)
1194 return
1195 }
1196 if statInfo.Size != results[i].Size {
1197 logError(testName, function, args, startTime, "", "error during HEAD object, unexpected Content-Length", err)
1198 return
1199 }
1200
1201 tmpBuffer := bytes.NewBuffer([]byte{})
1202 _, err = io.Copy(tmpBuffer, reader)
1203 if err != nil {
1204 logError(testName, function, args, startTime, "", "unexpected io.Copy()", err)
1205 return
1206 }
1207
1208 if !bytes.Equal(tmpBuffer.Bytes(), buffers[i]) {
1209 logError(testName, function, args, startTime, "", "unexpected content of GetObject()", err)
1210 return
1211 }
1212 }
1213
1214 // Delete all objects and their versions as long as the bucket itself
1215 if err = cleanupVersionedBucket(bucketName, c); err != nil {
1216 logError(testName, function, args, startTime, "", "CleanupBucket failed", err)
1217 return
1218 }
1219
1220 successLogger(testName, function, args, startTime).Info()
1221}
1222
1223func testCopyObjectWithVersioning() {
1224 // initialize logging params
1225 startTime := time.Now()
1226 testName := getFuncName()
1227 function := "CopyObject()"
1228 args := map[string]interface{}{}
1229
1230 // Seed random based on current time.
1231 rand.Seed(time.Now().Unix())
1232
1233 // Instantiate new minio client object.
1234 c, err := minio.New(os.Getenv(serverEndpoint),
1235 &minio.Options{
1236 Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
1237 Secure: mustParseBool(os.Getenv(enableHTTPS)),
1238 })
1239 if err != nil {
1240 logError(testName, function, args, startTime, "", "MinIO client object creation failed", err)
1241 return
1242 }
1243
1244 // Enable tracing, write to stderr.
1245 // c.TraceOn(os.Stderr)
1246
1247 // Set user agent.
1248 c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
1249
1250 // Generate a new random bucket name.
1251 bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
1252 args["bucketName"] = bucketName
1253
1254 // Make a new bucket.
1255 err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1", ObjectLocking: true})
1256 if err != nil {
1257 logError(testName, function, args, startTime, "", "Make bucket failed", err)
1258 return
1259 }
1260
1261 err = c.EnableVersioning(context.Background(), bucketName)
1262 if err != nil {
1263 logError(testName, function, args, startTime, "", "Enable versioning failed", err)
1264 return
1265 }
1266
1267 objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
1268 args["objectName"] = objectName
1269
1270 testFiles := []string{"datafile-1-b", "datafile-10-kB"}
1271 for _, testFile := range testFiles {
1272 r := getDataReader(testFile)
1273 buf, err := io.ReadAll(r)
1274 if err != nil {
1275 logError(testName, function, args, startTime, "", "unexpected failure", err)
1276 return
1277 }
1278 r.Close()
1279 _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{})
1280 if err != nil {
1281 logError(testName, function, args, startTime, "", "PutObject failed", err)
1282 return
1283 }
1284 }
1285
1286 objectsInfo := c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{WithVersions: true, Recursive: true})
1287 var infos []minio.ObjectInfo
1288 for info := range objectsInfo {
1289 if info.Err != nil {
1290 logError(testName, function, args, startTime, "", "Unexpected error during listing objects", err)
1291 return
1292 }
1293 infos = append(infos, info)
1294 }
1295
1296 sort.Slice(infos, func(i, j int) bool {
1297 return infos[i].Size < infos[j].Size
1298 })
1299
1300 reader, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{VersionID: infos[0].VersionID})
1301 if err != nil {
1302 logError(testName, function, args, startTime, "", "GetObject of the oldest version content failed", err)
1303 return
1304 }
1305
1306 oldestContent, err := io.ReadAll(reader)
1307 if err != nil {
1308 logError(testName, function, args, startTime, "", "Reading the oldest object version failed", err)
1309 return
1310 }
1311
1312 // Copy Source
1313 srcOpts := minio.CopySrcOptions{
1314 Bucket: bucketName,
1315 Object: objectName,
1316 VersionID: infos[0].VersionID,
1317 }
1318 args["src"] = srcOpts
1319
1320 dstOpts := minio.CopyDestOptions{
1321 Bucket: bucketName,
1322 Object: objectName + "-copy",
1323 }
1324 args["dst"] = dstOpts
1325
1326 // Perform the Copy
1327 if _, err = c.CopyObject(context.Background(), dstOpts, srcOpts); err != nil {
1328 logError(testName, function, args, startTime, "", "CopyObject failed", err)
1329 return
1330 }
1331
1332 // Destination object
1333 readerCopy, err := c.GetObject(context.Background(), bucketName, objectName+"-copy", minio.GetObjectOptions{})
1334 if err != nil {
1335 logError(testName, function, args, startTime, "", "GetObject failed", err)
1336 return
1337 }
1338 defer readerCopy.Close()
1339
1340 newestContent, err := io.ReadAll(readerCopy)
1341 if err != nil {
1342 logError(testName, function, args, startTime, "", "Reading from GetObject reader failed", err)
1343 return
1344 }
1345
1346 if len(newestContent) == 0 || !bytes.Equal(oldestContent, newestContent) {
1347 logError(testName, function, args, startTime, "", "Unexpected destination object content", err)
1348 return
1349 }
1350
1351 // Delete all objects and their versions as long as the bucket itself
1352 if err = cleanupVersionedBucket(bucketName, c); err != nil {
1353 logError(testName, function, args, startTime, "", "CleanupBucket failed", err)
1354 return
1355 }
1356
1357 successLogger(testName, function, args, startTime).Info()
1358}
1359
1360func testConcurrentCopyObjectWithVersioning() {
1361 // initialize logging params
1362 startTime := time.Now()
1363 testName := getFuncName()
1364 function := "CopyObject()"
1365 args := map[string]interface{}{}
1366
1367 // Seed random based on current time.
1368 rand.Seed(time.Now().Unix())
1369
1370 // Instantiate new minio client object.
1371 c, err := minio.New(os.Getenv(serverEndpoint),
1372 &minio.Options{
1373 Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
1374 Secure: mustParseBool(os.Getenv(enableHTTPS)),
1375 })
1376 if err != nil {
1377 logError(testName, function, args, startTime, "", "MinIO client object creation failed", err)
1378 return
1379 }
1380
1381 // Enable tracing, write to stderr.
1382 // c.TraceOn(os.Stderr)
1383
1384 // Set user agent.
1385 c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
1386
1387 // Generate a new random bucket name.
1388 bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
1389 args["bucketName"] = bucketName
1390
1391 // Make a new bucket.
1392 err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1", ObjectLocking: true})
1393 if err != nil {
1394 logError(testName, function, args, startTime, "", "Make bucket failed", err)
1395 return
1396 }
1397
1398 err = c.EnableVersioning(context.Background(), bucketName)
1399 if err != nil {
1400 logError(testName, function, args, startTime, "", "Enable versioning failed", err)
1401 return
1402 }
1403
1404 objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
1405 args["objectName"] = objectName
1406
1407 testFiles := []string{"datafile-10-kB"}
1408 for _, testFile := range testFiles {
1409 r := getDataReader(testFile)
1410 buf, err := io.ReadAll(r)
1411 if err != nil {
1412 logError(testName, function, args, startTime, "", "unexpected failure", err)
1413 return
1414 }
1415 r.Close()
1416 _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{})
1417 if err != nil {
1418 logError(testName, function, args, startTime, "", "PutObject failed", err)
1419 return
1420 }
1421 }
1422
1423 objectsInfo := c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{WithVersions: true, Recursive: true})
1424 var infos []minio.ObjectInfo
1425 for info := range objectsInfo {
1426 if info.Err != nil {
1427 logError(testName, function, args, startTime, "", "Unexpected error during listing objects", err)
1428 return
1429 }
1430 infos = append(infos, info)
1431 }
1432
1433 sort.Slice(infos, func(i, j int) bool {
1434 return infos[i].Size < infos[j].Size
1435 })
1436
1437 reader, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{VersionID: infos[0].VersionID})
1438 if err != nil {
1439 logError(testName, function, args, startTime, "", "GetObject of the oldest version content failed", err)
1440 return
1441 }
1442
1443 oldestContent, err := io.ReadAll(reader)
1444 if err != nil {
1445 logError(testName, function, args, startTime, "", "Reading the oldest object version failed", err)
1446 return
1447 }
1448
1449 // Copy Source
1450 srcOpts := minio.CopySrcOptions{
1451 Bucket: bucketName,
1452 Object: objectName,
1453 VersionID: infos[0].VersionID,
1454 }
1455 args["src"] = srcOpts
1456
1457 dstOpts := minio.CopyDestOptions{
1458 Bucket: bucketName,
1459 Object: objectName + "-copy",
1460 }
1461 args["dst"] = dstOpts
1462
1463 // Perform the Copy concurrently
1464 const n = 10
1465 var wg sync.WaitGroup
1466 wg.Add(n)
1467 var errs [n]error
1468 for i := 0; i < n; i++ {
1469 go func(i int) {
1470 defer wg.Done()
1471 _, errs[i] = c.CopyObject(context.Background(), dstOpts, srcOpts)
1472 }(i)
1473 }
1474 wg.Wait()
1475 for _, err := range errs {
1476 if err != nil {
1477 logError(testName, function, args, startTime, "", "CopyObject failed", err)
1478 return
1479 }
1480 }
1481
1482 objectsInfo = c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{WithVersions: true, Recursive: false, Prefix: dstOpts.Object})
1483 infos = []minio.ObjectInfo{}
1484 for info := range objectsInfo {
1485 // Destination object
1486 readerCopy, err := c.GetObject(context.Background(), bucketName, objectName+"-copy", minio.GetObjectOptions{VersionID: info.VersionID})
1487 if err != nil {
1488 logError(testName, function, args, startTime, "", "GetObject failed", err)
1489 return
1490 }
1491 defer readerCopy.Close()
1492
1493 newestContent, err := io.ReadAll(readerCopy)
1494 if err != nil {
1495 logError(testName, function, args, startTime, "", "Reading from GetObject reader failed", err)
1496 return
1497 }
1498
1499 if len(newestContent) == 0 || !bytes.Equal(oldestContent, newestContent) {
1500 logError(testName, function, args, startTime, "", "Unexpected destination object content", err)
1501 return
1502 }
1503 infos = append(infos, info)
1504 }
1505
1506 if len(infos) != n {
1507 logError(testName, function, args, startTime, "", "Unexpected number of Version elements in listing objects", nil)
1508 return
1509 }
1510
1511 // Delete all objects and their versions as long as the bucket itself
1512 if err = cleanupVersionedBucket(bucketName, c); err != nil {
1513 logError(testName, function, args, startTime, "", "CleanupBucket failed", err)
1514 return
1515 }
1516
1517 successLogger(testName, function, args, startTime).Info()
1518}
1519
1520func testComposeObjectWithVersioning() {
1521 // initialize logging params
1522 startTime := time.Now()
1523 testName := getFuncName()
1524 function := "ComposeObject()"
1525 args := map[string]interface{}{}
1526
1527 // Seed random based on current time.
1528 rand.Seed(time.Now().Unix())
1529
1530 // Instantiate new minio client object.
1531 c, err := minio.New(os.Getenv(serverEndpoint),
1532 &minio.Options{
1533 Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
1534 Secure: mustParseBool(os.Getenv(enableHTTPS)),
1535 })
1536 if err != nil {
1537 logError(testName, function, args, startTime, "", "MinIO client object creation failed", err)
1538 return
1539 }
1540
1541 // Enable tracing, write to stderr.
1542 // c.TraceOn(os.Stderr)
1543
1544 // Set user agent.
1545 c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
1546
1547 // Generate a new random bucket name.
1548 bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
1549 args["bucketName"] = bucketName
1550
1551 // Make a new bucket.
1552 err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1", ObjectLocking: true})
1553 if err != nil {
1554 logError(testName, function, args, startTime, "", "Make bucket failed", err)
1555 return
1556 }
1557
1558 err = c.EnableVersioning(context.Background(), bucketName)
1559 if err != nil {
1560 logError(testName, function, args, startTime, "", "Enable versioning failed", err)
1561 return
1562 }
1563
1564 objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
1565 args["objectName"] = objectName
1566
1567 // var testFiles = []string{"datafile-5-MB", "datafile-10-kB"}
1568 testFiles := []string{"datafile-5-MB", "datafile-10-kB"}
1569 var testFilesBytes [][]byte
1570
1571 for _, testFile := range testFiles {
1572 r := getDataReader(testFile)
1573 buf, err := io.ReadAll(r)
1574 if err != nil {
1575 logError(testName, function, args, startTime, "", "unexpected failure", err)
1576 return
1577 }
1578 r.Close()
1579 _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{})
1580 if err != nil {
1581 logError(testName, function, args, startTime, "", "PutObject failed", err)
1582 return
1583 }
1584 testFilesBytes = append(testFilesBytes, buf)
1585 }
1586
1587 objectsInfo := c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{WithVersions: true, Recursive: true})
1588
1589 var results []minio.ObjectInfo
1590 for info := range objectsInfo {
1591 if info.Err != nil {
1592 logError(testName, function, args, startTime, "", "Unexpected error during listing objects", err)
1593 return
1594 }
1595 results = append(results, info)
1596 }
1597
1598 sort.SliceStable(results, func(i, j int) bool {
1599 return results[i].Size > results[j].Size
1600 })
1601
1602 // Source objects to concatenate. We also specify decryption
1603 // key for each
1604 src1 := minio.CopySrcOptions{
1605 Bucket: bucketName,
1606 Object: objectName,
1607 VersionID: results[0].VersionID,
1608 }
1609
1610 src2 := minio.CopySrcOptions{
1611 Bucket: bucketName,
1612 Object: objectName,
1613 VersionID: results[1].VersionID,
1614 }
1615
1616 dst := minio.CopyDestOptions{
1617 Bucket: bucketName,
1618 Object: objectName + "-copy",
1619 }
1620
1621 _, err = c.ComposeObject(context.Background(), dst, src1, src2)
1622 if err != nil {
1623 logError(testName, function, args, startTime, "", "ComposeObject failed", err)
1624 return
1625 }
1626
1627 // Destination object
1628 readerCopy, err := c.GetObject(context.Background(), bucketName, objectName+"-copy", minio.GetObjectOptions{})
1629 if err != nil {
1630 logError(testName, function, args, startTime, "", "GetObject of the copy object failed", err)
1631 return
1632 }
1633 defer readerCopy.Close()
1634
1635 copyContentBytes, err := io.ReadAll(readerCopy)
1636 if err != nil {
1637 logError(testName, function, args, startTime, "", "Reading from the copy object reader failed", err)
1638 return
1639 }
1640
1641 var expectedContent []byte
1642 for _, fileBytes := range testFilesBytes {
1643 expectedContent = append(expectedContent, fileBytes...)
1644 }
1645
1646 if len(copyContentBytes) == 0 || !bytes.Equal(copyContentBytes, expectedContent) {
1647 logError(testName, function, args, startTime, "", "Unexpected destination object content", err)
1648 return
1649 }
1650
1651 // Delete all objects and their versions as long as the bucket itself
1652 if err = cleanupVersionedBucket(bucketName, c); err != nil {
1653 logError(testName, function, args, startTime, "", "CleanupBucket failed", err)
1654 return
1655 }
1656
1657 successLogger(testName, function, args, startTime).Info()
1658}
1659
1660func testRemoveObjectWithVersioning() {
1661 // initialize logging params
1662 startTime := time.Now()
1663 testName := getFuncName()
1664 function := "DeleteObject()"
1665 args := map[string]interface{}{}
1666
1667 // Seed random based on current time.
1668 rand.Seed(time.Now().Unix())
1669
1670 // Instantiate new minio client object.
1671 c, err := minio.New(os.Getenv(serverEndpoint),
1672 &minio.Options{
1673 Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
1674 Secure: mustParseBool(os.Getenv(enableHTTPS)),
1675 })
1676 if err != nil {
1677 logError(testName, function, args, startTime, "", "MinIO client object creation failed", err)
1678 return
1679 }
1680
1681 // Enable tracing, write to stderr.
1682 // c.TraceOn(os.Stderr)
1683
1684 // Set user agent.
1685 c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
1686
1687 // Generate a new random bucket name.
1688 bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
1689 args["bucketName"] = bucketName
1690
1691 // Make a new bucket.
1692 err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1", ObjectLocking: true})
1693 if err != nil {
1694 logError(testName, function, args, startTime, "", "Make bucket failed", err)
1695 return
1696 }
1697
1698 err = c.EnableVersioning(context.Background(), bucketName)
1699 if err != nil {
1700 logError(testName, function, args, startTime, "", "Enable versioning failed", err)
1701 return
1702 }
1703
1704 objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
1705 args["objectName"] = objectName
1706
1707 _, err = c.PutObject(context.Background(), bucketName, objectName, getDataReader("datafile-10-kB"), int64(dataFileMap["datafile-10-kB"]), minio.PutObjectOptions{})
1708 if err != nil {
1709 logError(testName, function, args, startTime, "", "PutObject failed", err)
1710 return
1711 }
1712
1713 objectsInfo := c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{WithVersions: true, Recursive: true})
1714 var version minio.ObjectInfo
1715 for info := range objectsInfo {
1716 if info.Err != nil {
1717 logError(testName, function, args, startTime, "", "Unexpected error during listing objects", err)
1718 return
1719 }
1720 version = info
1721 break
1722 }
1723
1724 err = c.RemoveObject(context.Background(), bucketName, objectName, minio.RemoveObjectOptions{VersionID: version.VersionID})
1725 if err != nil {
1726 logError(testName, function, args, startTime, "", "DeleteObject failed", err)
1727 return
1728 }
1729
1730 objectsInfo = c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{WithVersions: true, Recursive: true})
1731 for range objectsInfo {
1732 logError(testName, function, args, startTime, "", "Unexpected versioning info, should not have any one ", err)
1733 return
1734 }
1735 // test delete marker version id is non-null
1736 _, err = c.PutObject(context.Background(), bucketName, objectName, getDataReader("datafile-10-kB"), int64(dataFileMap["datafile-10-kB"]), minio.PutObjectOptions{})
1737 if err != nil {
1738 logError(testName, function, args, startTime, "", "PutObject failed", err)
1739 return
1740 }
1741 // create delete marker
1742 err = c.RemoveObject(context.Background(), bucketName, objectName, minio.RemoveObjectOptions{})
1743 if err != nil {
1744 logError(testName, function, args, startTime, "", "DeleteObject failed", err)
1745 return
1746 }
1747 objectsInfo = c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{WithVersions: true, Recursive: true})
1748 idx := 0
1749 for info := range objectsInfo {
1750 if info.Err != nil {
1751 logError(testName, function, args, startTime, "", "Unexpected error during listing objects", err)
1752 return
1753 }
1754 if idx == 0 {
1755 if !info.IsDeleteMarker {
1756 logError(testName, function, args, startTime, "", "Unexpected error - expected delete marker to have been created", err)
1757 return
1758 }
1759 if info.VersionID == "" {
1760 logError(testName, function, args, startTime, "", "Unexpected error - expected delete marker to be versioned", err)
1761 return
1762 }
1763 }
1764 idx++
1765 }
1766
1767 defer cleanupBucket(bucketName, c)
1768
1769 successLogger(testName, function, args, startTime).Info()
1770}
1771
1772func testRemoveObjectsWithVersioning() {
1773 // initialize logging params
1774 startTime := time.Now()
1775 testName := getFuncName()
1776 function := "DeleteObjects()"
1777 args := map[string]interface{}{}
1778
1779 // Seed random based on current time.
1780 rand.Seed(time.Now().Unix())
1781
1782 // Instantiate new minio client object.
1783 c, err := minio.New(os.Getenv(serverEndpoint),
1784 &minio.Options{
1785 Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
1786 Secure: mustParseBool(os.Getenv(enableHTTPS)),
1787 })
1788 if err != nil {
1789 logError(testName, function, args, startTime, "", "MinIO client object creation failed", err)
1790 return
1791 }
1792
1793 // Enable tracing, write to stderr.
1794 // c.TraceOn(os.Stderr)
1795
1796 // Set user agent.
1797 c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
1798
1799 // Generate a new random bucket name.
1800 bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
1801 args["bucketName"] = bucketName
1802
1803 // Make a new bucket.
1804 err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1", ObjectLocking: true})
1805 if err != nil {
1806 logError(testName, function, args, startTime, "", "Make bucket failed", err)
1807 return
1808 }
1809
1810 err = c.EnableVersioning(context.Background(), bucketName)
1811 if err != nil {
1812 logError(testName, function, args, startTime, "", "Enable versioning failed", err)
1813 return
1814 }
1815
1816 objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
1817 args["objectName"] = objectName
1818
1819 _, err = c.PutObject(context.Background(), bucketName, objectName, getDataReader("datafile-10-kB"), int64(dataFileMap["datafile-10-kB"]), minio.PutObjectOptions{})
1820 if err != nil {
1821 logError(testName, function, args, startTime, "", "PutObject failed", err)
1822 return
1823 }
1824
1825 objectsVersions := make(chan minio.ObjectInfo)
1826 go func() {
1827 objectsVersionsInfo := c.ListObjects(context.Background(), bucketName,
1828 minio.ListObjectsOptions{WithVersions: true, Recursive: true})
1829 for info := range objectsVersionsInfo {
1830 if info.Err != nil {
1831 logError(testName, function, args, startTime, "", "Unexpected error during listing objects", err)
1832 return
1833 }
1834 objectsVersions <- info
1835 }
1836 close(objectsVersions)
1837 }()
1838
1839 removeErrors := c.RemoveObjects(context.Background(), bucketName, objectsVersions, minio.RemoveObjectsOptions{})
1840 if err != nil {
1841 logError(testName, function, args, startTime, "", "DeleteObjects call failed", err)
1842 return
1843 }
1844
1845 for e := range removeErrors {
1846 if e.Err != nil {
1847 logError(testName, function, args, startTime, "", "Single delete operation failed", err)
1848 return
1849 }
1850 }
1851
1852 objectsVersionsInfo := c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{WithVersions: true, Recursive: true})
1853 for range objectsVersionsInfo {
1854 logError(testName, function, args, startTime, "", "Unexpected versioning info, should not have any one ", err)
1855 return
1856 }
1857
1858 err = c.RemoveBucket(context.Background(), bucketName)
1859 if err != nil {
1860 logError(testName, function, args, startTime, "", "CleanupBucket failed", err)
1861 return
1862 }
1863
1864 successLogger(testName, function, args, startTime).Info()
1865}
1866
1867func testObjectTaggingWithVersioning() {
1868 // initialize logging params
1869 startTime := time.Now()
1870 testName := getFuncName()
1871 function := "{Get,Set,Remove}ObjectTagging()"
1872 args := map[string]interface{}{}
1873
1874 // Seed random based on current time.
1875 rand.Seed(time.Now().Unix())
1876
1877 // Instantiate new minio client object.
1878 c, err := minio.New(os.Getenv(serverEndpoint),
1879 &minio.Options{
1880 Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
1881 Secure: mustParseBool(os.Getenv(enableHTTPS)),
1882 })
1883 if err != nil {
1884 logError(testName, function, args, startTime, "", "MinIO client object creation failed", err)
1885 return
1886 }
1887
1888 // Enable tracing, write to stderr.
1889 // c.TraceOn(os.Stderr)
1890
1891 // Set user agent.
1892 c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
1893
1894 // Generate a new random bucket name.
1895 bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
1896 args["bucketName"] = bucketName
1897
1898 // Make a new bucket.
1899 err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1", ObjectLocking: true})
1900 if err != nil {
1901 logError(testName, function, args, startTime, "", "Make bucket failed", err)
1902 return
1903 }
1904
1905 err = c.EnableVersioning(context.Background(), bucketName)
1906 if err != nil {
1907 logError(testName, function, args, startTime, "", "Enable versioning failed", err)
1908 return
1909 }
1910
1911 objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
1912 args["objectName"] = objectName
1913
1914 for _, file := range []string{"datafile-1-b", "datafile-10-kB"} {
1915 _, err = c.PutObject(context.Background(), bucketName, objectName, getDataReader(file), int64(dataFileMap[file]), minio.PutObjectOptions{})
1916 if err != nil {
1917 logError(testName, function, args, startTime, "", "PutObject failed", err)
1918 return
1919 }
1920 }
1921
1922 versionsInfo := c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{WithVersions: true, Recursive: true})
1923
1924 var versions []minio.ObjectInfo
1925 for info := range versionsInfo {
1926 if info.Err != nil {
1927 logError(testName, function, args, startTime, "", "Unexpected error during listing objects", err)
1928 return
1929 }
1930 versions = append(versions, info)
1931 }
1932
1933 sort.SliceStable(versions, func(i, j int) bool {
1934 return versions[i].Size < versions[j].Size
1935 })
1936
1937 tagsV1 := map[string]string{"key1": "val1"}
1938 t1, err := tags.MapToObjectTags(tagsV1)
1939 if err != nil {
1940 logError(testName, function, args, startTime, "", "PutObjectTagging (1) failed", err)
1941 return
1942 }
1943
1944 err = c.PutObjectTagging(context.Background(), bucketName, objectName, t1, minio.PutObjectTaggingOptions{VersionID: versions[0].VersionID})
1945 if err != nil {
1946 logError(testName, function, args, startTime, "", "PutObjectTagging (1) failed", err)
1947 return
1948 }
1949
1950 tagsV2 := map[string]string{"key2": "val2"}
1951 t2, err := tags.MapToObjectTags(tagsV2)
1952 if err != nil {
1953 logError(testName, function, args, startTime, "", "PutObjectTagging (1) failed", err)
1954 return
1955 }
1956
1957 err = c.PutObjectTagging(context.Background(), bucketName, objectName, t2, minio.PutObjectTaggingOptions{VersionID: versions[1].VersionID})
1958 if err != nil {
1959 logError(testName, function, args, startTime, "", "PutObjectTagging (2) failed", err)
1960 return
1961 }
1962
1963 tagsEqual := func(tags1, tags2 map[string]string) bool {
1964 for k1, v1 := range tags1 {
1965 v2, found := tags2[k1]
1966 if found {
1967 if v1 != v2 {
1968 return false
1969 }
1970 }
1971 }
1972 return true
1973 }
1974
1975 gotTagsV1, err := c.GetObjectTagging(context.Background(), bucketName, objectName, minio.GetObjectTaggingOptions{VersionID: versions[0].VersionID})
1976 if err != nil {
1977 logError(testName, function, args, startTime, "", "GetObjectTagging failed", err)
1978 return
1979 }
1980
1981 if !tagsEqual(t1.ToMap(), gotTagsV1.ToMap()) {
1982 logError(testName, function, args, startTime, "", "Unexpected tags content (1)", err)
1983 return
1984 }
1985
1986 gotTagsV2, err := c.GetObjectTagging(context.Background(), bucketName, objectName, minio.GetObjectTaggingOptions{})
1987 if err != nil {
1988 logError(testName, function, args, startTime, "", "GetObjectTaggingContext failed", err)
1989 return
1990 }
1991
1992 if !tagsEqual(t2.ToMap(), gotTagsV2.ToMap()) {
1993 logError(testName, function, args, startTime, "", "Unexpected tags content (2)", err)
1994 return
1995 }
1996
1997 err = c.RemoveObjectTagging(context.Background(), bucketName, objectName, minio.RemoveObjectTaggingOptions{VersionID: versions[0].VersionID})
1998 if err != nil {
1999 logError(testName, function, args, startTime, "", "PutObjectTagging (2) failed", err)
2000 return
2001 }
2002
2003 emptyTags, err := c.GetObjectTagging(context.Background(), bucketName, objectName,
2004 minio.GetObjectTaggingOptions{VersionID: versions[0].VersionID})
2005 if err != nil {
2006 logError(testName, function, args, startTime, "", "GetObjectTagging failed", err)
2007 return
2008 }
2009
2010 if len(emptyTags.ToMap()) != 0 {
2011 logError(testName, function, args, startTime, "", "Unexpected tags content (2)", err)
2012 return
2013 }
2014
2015 // Delete all objects and their versions as long as the bucket itself
2016 if err = cleanupVersionedBucket(bucketName, c); err != nil {
2017 logError(testName, function, args, startTime, "", "CleanupBucket failed", err)
2018 return
2019 }
2020
2021 successLogger(testName, function, args, startTime).Info()
2022}
2023
2024// Test PutObject with custom checksums.
2025func testPutObjectWithChecksums() {
2026 // initialize logging params
2027 startTime := time.Now()
2028 testName := getFuncName()
2029 function := "PutObject(bucketName, objectName, reader,size, opts)"
2030 args := map[string]interface{}{
2031 "bucketName": "",
2032 "objectName": "",
2033 "opts": "minio.PutObjectOptions{UserMetadata: metadata, Progress: progress}",
2034 }
2035
2036 if !isFullMode() {
2037 ignoredLog(testName, function, args, startTime, "Skipping functional tests for short/quick runs").Info()
2038 return
2039 }
2040
2041 // Seed random based on current time.
2042 rand.Seed(time.Now().Unix())
2043
2044 // Instantiate new minio client object.
2045 c, err := minio.New(os.Getenv(serverEndpoint),
2046 &minio.Options{
2047 Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
2048 Secure: mustParseBool(os.Getenv(enableHTTPS)),
2049 })
2050 if err != nil {
2051 logError(testName, function, args, startTime, "", "MinIO client object creation failed", err)
2052 return
2053 }
2054
2055 // Enable tracing, write to stderr.
2056 // c.TraceOn(os.Stderr)
2057
2058 // Set user agent.
2059 c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
2060
2061 // Generate a new random bucket name.
2062 bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
2063 args["bucketName"] = bucketName
2064
2065 // Make a new bucket.
2066 err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"})
2067 if err != nil {
2068 logError(testName, function, args, startTime, "", "Make bucket failed", err)
2069 return
2070 }
2071
2072 defer cleanupBucket(bucketName, c)
2073 tests := []struct {
2074 header string
2075 hasher hash.Hash
2076
2077 // Checksum values
2078 ChecksumCRC32 string
2079 ChecksumCRC32C string
2080 ChecksumSHA1 string
2081 ChecksumSHA256 string
2082 }{
2083 {header: "x-amz-checksum-crc32", hasher: crc32.NewIEEE()},
2084 {header: "x-amz-checksum-crc32c", hasher: crc32.New(crc32.MakeTable(crc32.Castagnoli))},
2085 {header: "x-amz-checksum-sha1", hasher: sha1.New()},
2086 {header: "x-amz-checksum-sha256", hasher: sha256.New()},
2087 }
2088
2089 for i, test := range tests {
2090 bufSize := dataFileMap["datafile-10-kB"]
2091
2092 // Save the data
2093 objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
2094 args["objectName"] = objectName
2095
2096 cmpChecksum := func(got, want string) {
2097 if want != got {
2098 logError(testName, function, args, startTime, "", "checksum mismatch", fmt.Errorf("want %s, got %s", want, got))
2099 return
2100 }
2101 }
2102
2103 meta := map[string]string{}
2104 reader := getDataReader("datafile-10-kB")
2105 b, err := io.ReadAll(reader)
2106 if err != nil {
2107 logError(testName, function, args, startTime, "", "Read failed", err)
2108 return
2109 }
2110 h := test.hasher
2111 h.Reset()
2112 // Wrong CRC.
2113 meta[test.header] = base64.StdEncoding.EncodeToString(h.Sum(nil))
2114 args["metadata"] = meta
2115 args["range"] = "false"
2116
2117 resp, err := c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(b), int64(bufSize), minio.PutObjectOptions{
2118 DisableMultipart: true,
2119 UserMetadata: meta,
2120 })
2121 if err == nil {
2122 if i == 0 && resp.ChecksumCRC32 == "" {
2123 ignoredLog(testName, function, args, startTime, "Checksums does not appear to be supported by backend").Info()
2124 return
2125 }
2126 logError(testName, function, args, startTime, "", "PutObject failed", err)
2127 return
2128 }
2129
2130 // Set correct CRC.
2131 h.Write(b)
2132 meta[test.header] = base64.StdEncoding.EncodeToString(h.Sum(nil))
2133 reader.Close()
2134
2135 resp, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(b), int64(bufSize), minio.PutObjectOptions{
2136 DisableMultipart: true,
2137 DisableContentSha256: true,
2138 UserMetadata: meta,
2139 })
2140 if err != nil {
2141 logError(testName, function, args, startTime, "", "PutObject failed", err)
2142 return
2143 }
2144 cmpChecksum(resp.ChecksumSHA256, meta["x-amz-checksum-sha256"])
2145 cmpChecksum(resp.ChecksumSHA1, meta["x-amz-checksum-sha1"])
2146 cmpChecksum(resp.ChecksumCRC32, meta["x-amz-checksum-crc32"])
2147 cmpChecksum(resp.ChecksumCRC32C, meta["x-amz-checksum-crc32c"])
2148
2149 // Read the data back
2150 gopts := minio.GetObjectOptions{Checksum: true}
2151
2152 r, err := c.GetObject(context.Background(), bucketName, objectName, gopts)
2153 if err != nil {
2154 logError(testName, function, args, startTime, "", "GetObject failed", err)
2155 return
2156 }
2157
2158 st, err := r.Stat()
2159 if err != nil {
2160 logError(testName, function, args, startTime, "", "Stat failed", err)
2161 return
2162 }
2163 cmpChecksum(st.ChecksumSHA256, meta["x-amz-checksum-sha256"])
2164 cmpChecksum(st.ChecksumSHA1, meta["x-amz-checksum-sha1"])
2165 cmpChecksum(st.ChecksumCRC32, meta["x-amz-checksum-crc32"])
2166 cmpChecksum(st.ChecksumCRC32C, meta["x-amz-checksum-crc32c"])
2167
2168 if st.Size != int64(bufSize) {
2169 logError(testName, function, args, startTime, "", "Number of bytes returned by PutObject does not match GetObject, expected "+string(bufSize)+" got "+string(st.Size), err)
2170 return
2171 }
2172
2173 if err := r.Close(); err != nil {
2174 logError(testName, function, args, startTime, "", "Object Close failed", err)
2175 return
2176 }
2177 if err := r.Close(); err == nil {
2178 logError(testName, function, args, startTime, "", "Object already closed, should respond with error", err)
2179 return
2180 }
2181
2182 args["range"] = "true"
2183 err = gopts.SetRange(100, 1000)
2184 if err != nil {
2185 logError(testName, function, args, startTime, "", "SetRange failed", err)
2186 return
2187 }
2188 r, err = c.GetObject(context.Background(), bucketName, objectName, gopts)
2189 if err != nil {
2190 logError(testName, function, args, startTime, "", "GetObject failed", err)
2191 return
2192 }
2193
2194 b, err = io.ReadAll(r)
2195 if err != nil {
2196 logError(testName, function, args, startTime, "", "Read failed", err)
2197 return
2198 }
2199 st, err = r.Stat()
2200 if err != nil {
2201 logError(testName, function, args, startTime, "", "Stat failed", err)
2202 return
2203 }
2204
2205 // Range requests should return empty checksums...
2206 cmpChecksum(st.ChecksumSHA256, "")
2207 cmpChecksum(st.ChecksumSHA1, "")
2208 cmpChecksum(st.ChecksumCRC32, "")
2209 cmpChecksum(st.ChecksumCRC32C, "")
2210
2211 delete(args, "range")
2212 delete(args, "metadata")
2213 }
2214
2215 successLogger(testName, function, args, startTime).Info()
2216}
2217
2218// Test PutObject with custom checksums.
2219func testPutMultipartObjectWithChecksums() {
2220 // initialize logging params
2221 startTime := time.Now()
2222 testName := getFuncName()
2223 function := "PutObject(bucketName, objectName, reader,size, opts)"
2224 args := map[string]interface{}{
2225 "bucketName": "",
2226 "objectName": "",
2227 "opts": "minio.PutObjectOptions{UserMetadata: metadata, Progress: progress}",
2228 }
2229
2230 if !isFullMode() {
2231 ignoredLog(testName, function, args, startTime, "Skipping functional tests for short/quick runs").Info()
2232 return
2233 }
2234
2235 // Seed random based on current time.
2236 rand.Seed(time.Now().Unix())
2237
2238 // Instantiate new minio client object.
2239 c, err := minio.New(os.Getenv(serverEndpoint),
2240 &minio.Options{
2241 Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
2242 Secure: mustParseBool(os.Getenv(enableHTTPS)),
2243 })
2244 if err != nil {
2245 logError(testName, function, args, startTime, "", "MinIO client object creation failed", err)
2246 return
2247 }
2248
2249 // Enable tracing, write to stderr.
2250 // c.TraceOn(os.Stderr)
2251
2252 // Set user agent.
2253 c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
2254
2255 // Generate a new random bucket name.
2256 bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
2257 args["bucketName"] = bucketName
2258
2259 // Make a new bucket.
2260 err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"})
2261 if err != nil {
2262 logError(testName, function, args, startTime, "", "Make bucket failed", err)
2263 return
2264 }
2265
2266 hashMultiPart := func(b []byte, partSize int, hasher hash.Hash) string {
2267 r := bytes.NewReader(b)
2268 tmp := make([]byte, partSize)
2269 parts := 0
2270 var all []byte
2271 for {
2272 n, err := io.ReadFull(r, tmp)
2273 if err != nil && err != io.ErrUnexpectedEOF {
2274 logError(testName, function, args, startTime, "", "Calc crc failed", err)
2275 }
2276 if n == 0 {
2277 break
2278 }
2279 parts++
2280 hasher.Reset()
2281 hasher.Write(tmp[:n])
2282 all = append(all, hasher.Sum(nil)...)
2283 if err != nil {
2284 break
2285 }
2286 }
2287 hasher.Reset()
2288 hasher.Write(all)
2289 return fmt.Sprintf("%s-%d", base64.StdEncoding.EncodeToString(hasher.Sum(nil)), parts)
2290 }
2291 defer cleanupBucket(bucketName, c)
2292 tests := []struct {
2293 header string
2294 hasher hash.Hash
2295
2296 // Checksum values
2297 ChecksumCRC32 string
2298 ChecksumCRC32C string
2299 ChecksumSHA1 string
2300 ChecksumSHA256 string
2301 }{
2302 // Currently there is no way to override the checksum type.
2303 {header: "x-amz-checksum-crc32c", hasher: crc32.New(crc32.MakeTable(crc32.Castagnoli)), ChecksumCRC32C: "OpEx0Q==-13"},
2304 }
2305
2306 for _, test := range tests {
2307 bufSize := dataFileMap["datafile-129-MB"]
2308
2309 // Save the data
2310 objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
2311 args["objectName"] = objectName
2312
2313 cmpChecksum := func(got, want string) {
2314 if want != got {
2315 // logError(testName, function, args, startTime, "", "checksum mismatch", fmt.Errorf("want %s, got %s", want, got))
2316 fmt.Printf("want %s, got %s\n", want, got)
2317 return
2318 }
2319 }
2320
2321 const partSize = 10 << 20
2322 reader := getDataReader("datafile-129-MB")
2323 b, err := io.ReadAll(reader)
2324 if err != nil {
2325 logError(testName, function, args, startTime, "", "Read failed", err)
2326 return
2327 }
2328 reader.Close()
2329 h := test.hasher
2330 h.Reset()
2331 test.ChecksumCRC32C = hashMultiPart(b, partSize, test.hasher)
2332
2333 // Set correct CRC.
2334
2335 resp, err := c.PutObject(context.Background(), bucketName, objectName, io.NopCloser(bytes.NewReader(b)), int64(bufSize), minio.PutObjectOptions{
2336 DisableContentSha256: true,
2337 DisableMultipart: false,
2338 UserMetadata: nil,
2339 PartSize: partSize,
2340 })
2341 if err != nil {
2342 logError(testName, function, args, startTime, "", "PutObject failed", err)
2343 return
2344 }
2345 cmpChecksum(resp.ChecksumSHA256, test.ChecksumSHA256)
2346 cmpChecksum(resp.ChecksumSHA1, test.ChecksumSHA1)
2347 cmpChecksum(resp.ChecksumCRC32, test.ChecksumCRC32)
2348 cmpChecksum(resp.ChecksumCRC32C, test.ChecksumCRC32C)
2349
2350 // Read the data back
2351 gopts := minio.GetObjectOptions{Checksum: true}
2352 gopts.PartNumber = 2
2353
2354 // We cannot use StatObject, since it ignores partnumber.
2355 r, err := c.GetObject(context.Background(), bucketName, objectName, gopts)
2356 if err != nil {
2357 logError(testName, function, args, startTime, "", "GetObject failed", err)
2358 return
2359 }
2360 io.Copy(io.Discard, r)
2361 st, err := r.Stat()
2362 if err != nil {
2363 logError(testName, function, args, startTime, "", "Stat failed", err)
2364 return
2365 }
2366
2367 // Test part 2 checksum...
2368 h.Reset()
2369 h.Write(b[partSize : 2*partSize])
2370 got := base64.StdEncoding.EncodeToString(h.Sum(nil))
2371 if test.ChecksumSHA256 != "" {
2372 cmpChecksum(st.ChecksumSHA256, got)
2373 }
2374 if test.ChecksumSHA1 != "" {
2375 cmpChecksum(st.ChecksumSHA1, got)
2376 }
2377 if test.ChecksumCRC32 != "" {
2378 cmpChecksum(st.ChecksumCRC32, got)
2379 }
2380 if test.ChecksumCRC32C != "" {
2381 cmpChecksum(st.ChecksumCRC32C, got)
2382 }
2383
2384 delete(args, "metadata")
2385 }
2386
2387 successLogger(testName, function, args, startTime).Info()
2388}
2389
2390// Test PutObject with trailing checksums.
2391func testTrailingChecksums() {
2392 // initialize logging params
2393 startTime := time.Now()
2394 testName := getFuncName()
2395 function := "PutObject(bucketName, objectName, reader,size, opts)"
2396 args := map[string]interface{}{
2397 "bucketName": "",
2398 "objectName": "",
2399 "opts": "minio.PutObjectOptions{UserMetadata: metadata, Progress: progress}",
2400 }
2401
2402 if !isFullMode() {
2403 ignoredLog(testName, function, args, startTime, "Skipping functional tests for short/quick runs").Info()
2404 return
2405 }
2406
2407 // Instantiate new minio client object.
2408 c, err := minio.New(os.Getenv(serverEndpoint),
2409 &minio.Options{
2410 Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
2411 Secure: mustParseBool(os.Getenv(enableHTTPS)),
2412 TrailingHeaders: true,
2413 })
2414 if err != nil {
2415 logError(testName, function, args, startTime, "", "MinIO client object creation failed", err)
2416 return
2417 }
2418
2419 // Enable tracing, write to stderr.
2420 // c.TraceOn(os.Stderr)
2421
2422 // Set user agent.
2423 c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
2424
2425 // Generate a new random bucket name.
2426 bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
2427 args["bucketName"] = bucketName
2428
2429 // Make a new bucket.
2430 err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"})
2431 if err != nil {
2432 logError(testName, function, args, startTime, "", "Make bucket failed", err)
2433 return
2434 }
2435
2436 hashMultiPart := func(b []byte, partSize int, hasher hash.Hash) string {
2437 r := bytes.NewReader(b)
2438 tmp := make([]byte, partSize)
2439 parts := 0
2440 var all []byte
2441 for {
2442 n, err := io.ReadFull(r, tmp)
2443 if err != nil && err != io.ErrUnexpectedEOF {
2444 logError(testName, function, args, startTime, "", "Calc crc failed", err)
2445 }
2446 if n == 0 {
2447 break
2448 }
2449 parts++
2450 hasher.Reset()
2451 hasher.Write(tmp[:n])
2452 all = append(all, hasher.Sum(nil)...)
2453 if err != nil {
2454 break
2455 }
2456 }
2457 hasher.Reset()
2458 hasher.Write(all)
2459 return fmt.Sprintf("%s-%d", base64.StdEncoding.EncodeToString(hasher.Sum(nil)), parts)
2460 }
2461 defer cleanupBucket(bucketName, c)
2462 tests := []struct {
2463 header string
2464 hasher hash.Hash
2465
2466 // Checksum values
2467 ChecksumCRC32 string
2468 ChecksumCRC32C string
2469 ChecksumSHA1 string
2470 ChecksumSHA256 string
2471 PO minio.PutObjectOptions
2472 }{
2473 // Currently there is no way to override the checksum type.
2474 {
2475 header: "x-amz-checksum-crc32c",
2476 hasher: crc32.New(crc32.MakeTable(crc32.Castagnoli)),
2477 ChecksumCRC32C: "set",
2478 PO: minio.PutObjectOptions{
2479 DisableContentSha256: true,
2480 DisableMultipart: false,
2481 UserMetadata: nil,
2482 PartSize: 5 << 20,
2483 },
2484 },
2485 {
2486 header: "x-amz-checksum-crc32c",
2487 hasher: crc32.New(crc32.MakeTable(crc32.Castagnoli)),
2488 ChecksumCRC32C: "set",
2489 PO: minio.PutObjectOptions{
2490 DisableContentSha256: true,
2491 DisableMultipart: false,
2492 UserMetadata: nil,
2493 PartSize: 6_645_654, // Rather arbitrary size
2494 },
2495 },
2496 {
2497 header: "x-amz-checksum-crc32c",
2498 hasher: crc32.New(crc32.MakeTable(crc32.Castagnoli)),
2499 ChecksumCRC32C: "set",
2500 PO: minio.PutObjectOptions{
2501 DisableContentSha256: false,
2502 DisableMultipart: false,
2503 UserMetadata: nil,
2504 PartSize: 5 << 20,
2505 },
2506 },
2507 {
2508 header: "x-amz-checksum-crc32c",
2509 hasher: crc32.New(crc32.MakeTable(crc32.Castagnoli)),
2510 ChecksumCRC32C: "set",
2511 PO: minio.PutObjectOptions{
2512 DisableContentSha256: false,
2513 DisableMultipart: false,
2514 UserMetadata: nil,
2515 PartSize: 6_645_654, // Rather arbitrary size
2516 },
2517 },
2518 }
2519
2520 for _, test := range tests {
2521 bufSize := dataFileMap["datafile-11-MB"]
2522
2523 // Save the data
2524 objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
2525 args["objectName"] = objectName
2526
2527 cmpChecksum := func(got, want string) {
2528 if want != got {
2529 logError(testName, function, args, startTime, "", "checksum mismatch", fmt.Errorf("want %q, got %q", want, got))
2530 return
2531 }
2532 }
2533
2534 reader := getDataReader("datafile-11-MB")
2535 b, err := io.ReadAll(reader)
2536 if err != nil {
2537 logError(testName, function, args, startTime, "", "Read failed", err)
2538 return
2539 }
2540 reader.Close()
2541 h := test.hasher
2542 h.Reset()
2543 test.ChecksumCRC32C = hashMultiPart(b, int(test.PO.PartSize), test.hasher)
2544
2545 // Set correct CRC.
2546 // c.TraceOn(os.Stderr)
2547 resp, err := c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(b), int64(bufSize), test.PO)
2548 if err != nil {
2549 logError(testName, function, args, startTime, "", "PutObject failed", err)
2550 return
2551 }
2552 // c.TraceOff()
2553 cmpChecksum(resp.ChecksumSHA256, test.ChecksumSHA256)
2554 cmpChecksum(resp.ChecksumSHA1, test.ChecksumSHA1)
2555 cmpChecksum(resp.ChecksumCRC32, test.ChecksumCRC32)
2556 cmpChecksum(resp.ChecksumCRC32C, test.ChecksumCRC32C)
2557
2558 // Read the data back
2559 gopts := minio.GetObjectOptions{Checksum: true}
2560 gopts.PartNumber = 2
2561
2562 // We cannot use StatObject, since it ignores partnumber.
2563 r, err := c.GetObject(context.Background(), bucketName, objectName, gopts)
2564 if err != nil {
2565 logError(testName, function, args, startTime, "", "GetObject failed", err)
2566 return
2567 }
2568 io.Copy(io.Discard, r)
2569 st, err := r.Stat()
2570 if err != nil {
2571 logError(testName, function, args, startTime, "", "Stat failed", err)
2572 return
2573 }
2574
2575 // Test part 2 checksum...
2576 h.Reset()
2577 p2 := b[test.PO.PartSize:]
2578 if len(p2) > int(test.PO.PartSize) {
2579 p2 = p2[:test.PO.PartSize]
2580 }
2581 h.Write(p2)
2582 got := base64.StdEncoding.EncodeToString(h.Sum(nil))
2583 if test.ChecksumSHA256 != "" {
2584 cmpChecksum(st.ChecksumSHA256, got)
2585 }
2586 if test.ChecksumSHA1 != "" {
2587 cmpChecksum(st.ChecksumSHA1, got)
2588 }
2589 if test.ChecksumCRC32 != "" {
2590 cmpChecksum(st.ChecksumCRC32, got)
2591 }
2592 if test.ChecksumCRC32C != "" {
2593 cmpChecksum(st.ChecksumCRC32C, got)
2594 }
2595
2596 delete(args, "metadata")
2597 }
2598}
2599
2600// Test PutObject with custom checksums.
2601func testPutObjectWithAutomaticChecksums() {
2602 // initialize logging params
2603 startTime := time.Now()
2604 testName := getFuncName()
2605 function := "PutObject(bucketName, objectName, reader,size, opts)"
2606 args := map[string]interface{}{
2607 "bucketName": "",
2608 "objectName": "",
2609 "opts": "minio.PutObjectOptions{UserMetadata: metadata, Progress: progress}",
2610 }
2611
2612 if !isFullMode() {
2613 ignoredLog(testName, function, args, startTime, "Skipping functional tests for short/quick runs").Info()
2614 return
2615 }
2616
2617 // Seed random based on current time.
2618 rand.Seed(time.Now().Unix())
2619
2620 // Instantiate new minio client object.
2621 c, err := minio.New(os.Getenv(serverEndpoint),
2622 &minio.Options{
2623 Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
2624 Secure: mustParseBool(os.Getenv(enableHTTPS)),
2625 TrailingHeaders: true,
2626 })
2627 if err != nil {
2628 logError(testName, function, args, startTime, "", "MinIO client object creation failed", err)
2629 return
2630 }
2631
2632 // Set user agent.
2633 c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
2634
2635 // Generate a new random bucket name.
2636 bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
2637 args["bucketName"] = bucketName
2638
2639 // Make a new bucket.
2640 err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"})
2641 if err != nil {
2642 logError(testName, function, args, startTime, "", "Make bucket failed", err)
2643 return
2644 }
2645
2646 defer cleanupBucket(bucketName, c)
2647 tests := []struct {
2648 header string
2649 hasher hash.Hash
2650
2651 // Checksum values
2652 ChecksumCRC32 string
2653 ChecksumCRC32C string
2654 ChecksumSHA1 string
2655 ChecksumSHA256 string
2656 }{
2657 // Built-in will only add crc32c, when no MD5 nor SHA256.
2658 {header: "x-amz-checksum-crc32c", hasher: crc32.New(crc32.MakeTable(crc32.Castagnoli))},
2659 }
2660
2661 // Enable tracing, write to stderr.
2662 // c.TraceOn(os.Stderr)
2663 // defer c.TraceOff()
2664
2665 for i, test := range tests {
2666 bufSize := dataFileMap["datafile-10-kB"]
2667
2668 // Save the data
2669 objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
2670 args["objectName"] = objectName
2671
2672 cmpChecksum := func(got, want string) {
2673 if want != got {
2674 logError(testName, function, args, startTime, "", "checksum mismatch", fmt.Errorf("want %s, got %s", want, got))
2675 return
2676 }
2677 }
2678
2679 meta := map[string]string{}
2680 reader := getDataReader("datafile-10-kB")
2681 b, err := io.ReadAll(reader)
2682 if err != nil {
2683 logError(testName, function, args, startTime, "", "Read failed", err)
2684 return
2685 }
2686
2687 h := test.hasher
2688 h.Reset()
2689 h.Write(b)
2690 meta[test.header] = base64.StdEncoding.EncodeToString(h.Sum(nil))
2691 args["metadata"] = meta
2692
2693 resp, err := c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(b), int64(bufSize), minio.PutObjectOptions{
2694 DisableMultipart: true,
2695 UserMetadata: nil,
2696 DisableContentSha256: true,
2697 SendContentMd5: false,
2698 })
2699 if err == nil {
2700 if i == 0 && resp.ChecksumCRC32C == "" {
2701 ignoredLog(testName, function, args, startTime, "Checksums does not appear to be supported by backend").Info()
2702 return
2703 }
2704 } else {
2705 logError(testName, function, args, startTime, "", "PutObject failed", err)
2706 return
2707 }
2708 cmpChecksum(resp.ChecksumSHA256, meta["x-amz-checksum-sha256"])
2709 cmpChecksum(resp.ChecksumSHA1, meta["x-amz-checksum-sha1"])
2710 cmpChecksum(resp.ChecksumCRC32, meta["x-amz-checksum-crc32"])
2711 cmpChecksum(resp.ChecksumCRC32C, meta["x-amz-checksum-crc32c"])
2712
2713 // Usually this will be the same as above, since we skip automatic checksum when SHA256 content is sent.
2714 // When/if we add a checksum control to PutObjectOptions this will make more sense.
2715 resp, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(b), int64(bufSize), minio.PutObjectOptions{
2716 DisableMultipart: true,
2717 UserMetadata: nil,
2718 DisableContentSha256: false,
2719 SendContentMd5: false,
2720 })
2721 if err != nil {
2722 logError(testName, function, args, startTime, "", "PutObject failed", err)
2723 return
2724 }
2725 // The checksum will not be enabled on HTTP, since it uses SHA256 blocks.
2726 if mustParseBool(os.Getenv(enableHTTPS)) {
2727 cmpChecksum(resp.ChecksumSHA256, meta["x-amz-checksum-sha256"])
2728 cmpChecksum(resp.ChecksumSHA1, meta["x-amz-checksum-sha1"])
2729 cmpChecksum(resp.ChecksumCRC32, meta["x-amz-checksum-crc32"])
2730 cmpChecksum(resp.ChecksumCRC32C, meta["x-amz-checksum-crc32c"])
2731 }
2732
2733 // Set SHA256 header manually
2734 sh256 := sha256.Sum256(b)
2735 meta = map[string]string{"x-amz-checksum-sha256": base64.StdEncoding.EncodeToString(sh256[:])}
2736 args["metadata"] = meta
2737 resp, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(b), int64(bufSize), minio.PutObjectOptions{
2738 DisableMultipart: true,
2739 UserMetadata: meta,
2740 DisableContentSha256: true,
2741 SendContentMd5: false,
2742 })
2743 if err != nil {
2744 logError(testName, function, args, startTime, "", "PutObject failed", err)
2745 return
2746 }
2747 cmpChecksum(resp.ChecksumSHA256, meta["x-amz-checksum-sha256"])
2748 cmpChecksum(resp.ChecksumSHA1, meta["x-amz-checksum-sha1"])
2749 cmpChecksum(resp.ChecksumCRC32, meta["x-amz-checksum-crc32"])
2750 cmpChecksum(resp.ChecksumCRC32C, meta["x-amz-checksum-crc32c"])
2751 delete(args, "metadata")
2752 }
2753
2754 successLogger(testName, function, args, startTime).Info()
2755}
2756
2757// Test PutObject using a large data to trigger multipart readat
2758func testPutObjectWithMetadata() {
2759 // initialize logging params
2760 startTime := time.Now()
2761 testName := getFuncName()
2762 function := "PutObject(bucketName, objectName, reader,size, opts)"
2763 args := map[string]interface{}{
2764 "bucketName": "",
2765 "objectName": "",
2766 "opts": "minio.PutObjectOptions{UserMetadata: metadata, Progress: progress}",
2767 }
2768
2769 if !isFullMode() {
2770 ignoredLog(testName, function, args, startTime, "Skipping functional tests for short/quick runs").Info()
2771 return
2772 }
2773
2774 // Seed random based on current time.
2775 rand.Seed(time.Now().Unix())
2776
2777 // Instantiate new minio client object.
2778 c, err := minio.New(os.Getenv(serverEndpoint),
2779 &minio.Options{
2780 Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
2781 Secure: mustParseBool(os.Getenv(enableHTTPS)),
2782 })
2783 if err != nil {
2784 logError(testName, function, args, startTime, "", "MinIO client object creation failed", err)
2785 return
2786 }
2787
2788 // Enable tracing, write to stderr.
2789 // c.TraceOn(os.Stderr)
2790
2791 // Set user agent.
2792 c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
2793
2794 // Generate a new random bucket name.
2795 bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
2796 args["bucketName"] = bucketName
2797
2798 // Make a new bucket.
2799 err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"})
2800 if err != nil {
2801 logError(testName, function, args, startTime, "", "Make bucket failed", err)
2802 return
2803 }
2804
2805 defer cleanupBucket(bucketName, c)
2806
2807 bufSize := dataFileMap["datafile-129-MB"]
2808 reader := getDataReader("datafile-129-MB")
2809 defer reader.Close()
2810
2811 // Save the data
2812 objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
2813 args["objectName"] = objectName
2814
2815 // Object custom metadata
2816 customContentType := "custom/contenttype"
2817
2818 args["metadata"] = map[string][]string{
2819 "Content-Type": {customContentType},
2820 "X-Amz-Meta-CustomKey": {"extra spaces in value"},
2821 }
2822
2823 _, err = c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{
2824 ContentType: customContentType,
2825 })
2826 if err != nil {
2827 logError(testName, function, args, startTime, "", "PutObject failed", err)
2828 return
2829 }
2830
2831 // Read the data back
2832 r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{})
2833 if err != nil {
2834 logError(testName, function, args, startTime, "", "GetObject failed", err)
2835 return
2836 }
2837
2838 st, err := r.Stat()
2839 if err != nil {
2840 logError(testName, function, args, startTime, "", "Stat failed", err)
2841 return
2842 }
2843 if st.Size != int64(bufSize) {
2844 logError(testName, function, args, startTime, "", "Number of bytes returned by PutObject does not match GetObject, expected "+string(bufSize)+" got "+string(st.Size), err)
2845 return
2846 }
2847 if st.ContentType != customContentType && st.ContentType != "application/octet-stream" {
2848 logError(testName, function, args, startTime, "", "ContentType does not match, expected "+customContentType+" got "+st.ContentType, err)
2849 return
2850 }
2851 if err := crcMatchesName(r, "datafile-129-MB"); err != nil {
2852 logError(testName, function, args, startTime, "", "data CRC check failed", err)
2853 return
2854 }
2855 if err := r.Close(); err != nil {
2856 logError(testName, function, args, startTime, "", "Object Close failed", err)
2857 return
2858 }
2859 if err := r.Close(); err == nil {
2860 logError(testName, function, args, startTime, "", "Object already closed, should respond with error", err)
2861 return
2862 }
2863
2864 successLogger(testName, function, args, startTime).Info()
2865}
2866
2867func testPutObjectWithContentLanguage() {
2868 // initialize logging params
2869 objectName := "test-object"
2870 startTime := time.Now()
2871 testName := getFuncName()
2872 function := "PutObject(bucketName, objectName, reader, size, opts)"
2873 args := map[string]interface{}{
2874 "bucketName": "",
2875 "objectName": objectName,
2876 "size": -1,
2877 "opts": "",
2878 }
2879
2880 // Seed random based on current time.
2881 rand.Seed(time.Now().Unix())
2882
2883 // Instantiate new minio client object.
2884 c, err := minio.New(os.Getenv(serverEndpoint),
2885 &minio.Options{
2886 Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
2887 Secure: mustParseBool(os.Getenv(enableHTTPS)),
2888 })
2889 if err != nil {
2890 logError(testName, function, args, startTime, "", "MinIO client object creation failed", err)
2891 return
2892 }
2893
2894 // Enable tracing, write to stderr.
2895 // c.TraceOn(os.Stderr)
2896
2897 // Set user agent.
2898 c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
2899
2900 // Generate a new random bucket name.
2901 bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
2902 args["bucketName"] = bucketName
2903 // Make a new bucket.
2904 err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"})
2905 if err != nil {
2906 logError(testName, function, args, startTime, "", "MakeBucket failed", err)
2907 return
2908 }
2909
2910 defer cleanupBucket(bucketName, c)
2911
2912 data := []byte{}
2913 _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(data), int64(0), minio.PutObjectOptions{
2914 ContentLanguage: "en",
2915 })
2916 if err != nil {
2917 logError(testName, function, args, startTime, "", "PutObject failed", err)
2918 return
2919 }
2920
2921 objInfo, err := c.StatObject(context.Background(), bucketName, objectName, minio.StatObjectOptions{})
2922 if err != nil {
2923 logError(testName, function, args, startTime, "", "StatObject failed", err)
2924 return
2925 }
2926
2927 if objInfo.Metadata.Get("Content-Language") != "en" {
2928 logError(testName, function, args, startTime, "", "Expected content-language 'en' doesn't match with StatObject return value", err)
2929 return
2930 }
2931
2932 successLogger(testName, function, args, startTime).Info()
2933}
2934
2935// Test put object with streaming signature.
2936func testPutObjectStreaming() {
2937 // initialize logging params
2938 objectName := "test-object"
2939 startTime := time.Now()
2940 testName := getFuncName()
2941 function := "PutObject(bucketName, objectName, reader,size,opts)"
2942 args := map[string]interface{}{
2943 "bucketName": "",
2944 "objectName": objectName,
2945 "size": -1,
2946 "opts": "",
2947 }
2948
2949 // Seed random based on current time.
2950 rand.Seed(time.Now().Unix())
2951
2952 // Instantiate new minio client object.
2953 c, err := minio.New(os.Getenv(serverEndpoint),
2954 &minio.Options{
2955 Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
2956 Secure: mustParseBool(os.Getenv(enableHTTPS)),
2957 })
2958 if err != nil {
2959 logError(testName, function, args, startTime, "", "MinIO client object creation failed", err)
2960 return
2961 }
2962
2963 // Enable tracing, write to stderr.
2964 // c.TraceOn(os.Stderr)
2965
2966 // Set user agent.
2967 c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
2968
2969 // Generate a new random bucket name.
2970 bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
2971 args["bucketName"] = bucketName
2972 // Make a new bucket.
2973 err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"})
2974 if err != nil {
2975 logError(testName, function, args, startTime, "", "MakeBucket failed", err)
2976 return
2977 }
2978
2979 defer cleanupBucket(bucketName, c)
2980
2981 // Upload an object.
2982 sizes := []int64{0, 64*1024 - 1, 64 * 1024}
2983
2984 for _, size := range sizes {
2985 data := newRandomReader(size, size)
2986 ui, err := c.PutObject(context.Background(), bucketName, objectName, data, int64(size), minio.PutObjectOptions{})
2987 if err != nil {
2988 logError(testName, function, args, startTime, "", "PutObjectStreaming failed", err)
2989 return
2990 }
2991
2992 if ui.Size != size {
2993 logError(testName, function, args, startTime, "", "PutObjectStreaming result has unexpected size", nil)
2994 return
2995 }
2996
2997 objInfo, err := c.StatObject(context.Background(), bucketName, objectName, minio.StatObjectOptions{})
2998 if err != nil {
2999 logError(testName, function, args, startTime, "", "StatObject failed", err)
3000 return
3001 }
3002 if objInfo.Size != size {
3003 logError(testName, function, args, startTime, "", "Unexpected size", err)
3004 return
3005 }
3006
3007 }
3008
3009 successLogger(testName, function, args, startTime).Info()
3010}
3011
3012// Test get object seeker from the end, using whence set to '2'.
3013func testGetObjectSeekEnd() {
3014 // initialize logging params
3015 startTime := time.Now()
3016 testName := getFuncName()
3017 function := "GetObject(bucketName, objectName)"
3018 args := map[string]interface{}{}
3019
3020 // Seed random based on current time.
3021 rand.Seed(time.Now().Unix())
3022
3023 // Instantiate new minio client object.
3024 c, err := minio.New(os.Getenv(serverEndpoint),
3025 &minio.Options{
3026 Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
3027 Secure: mustParseBool(os.Getenv(enableHTTPS)),
3028 })
3029 if err != nil {
3030 logError(testName, function, args, startTime, "", "MinIO client object creation failed", err)
3031 return
3032 }
3033
3034 // Enable tracing, write to stderr.
3035 // c.TraceOn(os.Stderr)
3036
3037 // Set user agent.
3038 c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
3039
3040 // Generate a new random bucket name.
3041 bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
3042 args["bucketName"] = bucketName
3043
3044 // Make a new bucket.
3045 err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"})
3046 if err != nil {
3047 logError(testName, function, args, startTime, "", "MakeBucket failed", err)
3048 return
3049 }
3050
3051 defer cleanupBucket(bucketName, c)
3052
3053 // Generate 33K of data.
3054 bufSize := dataFileMap["datafile-33-kB"]
3055 reader := getDataReader("datafile-33-kB")
3056 defer reader.Close()
3057
3058 // Save the data
3059 objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
3060 args["objectName"] = objectName
3061
3062 buf, err := io.ReadAll(reader)
3063 if err != nil {
3064 logError(testName, function, args, startTime, "", "ReadAll failed", err)
3065 return
3066 }
3067
3068 _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ContentType: "binary/octet-stream"})
3069 if err != nil {
3070 logError(testName, function, args, startTime, "", "PutObject failed", err)
3071 return
3072 }
3073
3074 // Read the data back
3075 r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{})
3076 if err != nil {
3077 logError(testName, function, args, startTime, "", "GetObject failed", err)
3078 return
3079 }
3080
3081 st, err := r.Stat()
3082 if err != nil {
3083 logError(testName, function, args, startTime, "", "Stat failed", err)
3084 return
3085 }
3086
3087 if st.Size != int64(bufSize) {
3088 logError(testName, function, args, startTime, "", "Number of bytes read does not match, expected "+string(int64(bufSize))+" got "+string(st.Size), err)
3089 return
3090 }
3091
3092 pos, err := r.Seek(-100, 2)
3093 if err != nil {
3094 logError(testName, function, args, startTime, "", "Object Seek failed", err)
3095 return
3096 }
3097 if pos != st.Size-100 {
3098 logError(testName, function, args, startTime, "", "Incorrect position", err)
3099 return
3100 }
3101 buf2 := make([]byte, 100)
3102 m, err := readFull(r, buf2)
3103 if err != nil {
3104 logError(testName, function, args, startTime, "", "Error reading through readFull", err)
3105 return
3106 }
3107 if m != len(buf2) {
3108 logError(testName, function, args, startTime, "", "Number of bytes dont match, expected "+string(len(buf2))+" got "+string(m), err)
3109 return
3110 }
3111 hexBuf1 := fmt.Sprintf("%02x", buf[len(buf)-100:])
3112 hexBuf2 := fmt.Sprintf("%02x", buf2[:m])
3113 if hexBuf1 != hexBuf2 {
3114 logError(testName, function, args, startTime, "", "Values at same index dont match", err)
3115 return
3116 }
3117 pos, err = r.Seek(-100, 2)
3118 if err != nil {
3119 logError(testName, function, args, startTime, "", "Object Seek failed", err)
3120 return
3121 }
3122 if pos != st.Size-100 {
3123 logError(testName, function, args, startTime, "", "Incorrect position", err)
3124 return
3125 }
3126 if err = r.Close(); err != nil {
3127 logError(testName, function, args, startTime, "", "ObjectClose failed", err)
3128 return
3129 }
3130
3131 successLogger(testName, function, args, startTime).Info()
3132}
3133
3134// Test get object reader to not throw error on being closed twice.
3135func testGetObjectClosedTwice() {
3136 // initialize logging params
3137 startTime := time.Now()
3138 testName := getFuncName()
3139 function := "GetObject(bucketName, objectName)"
3140 args := map[string]interface{}{}
3141
3142 // Seed random based on current time.
3143 rand.Seed(time.Now().Unix())
3144
3145 // Instantiate new minio client object.
3146 c, err := minio.New(os.Getenv(serverEndpoint),
3147 &minio.Options{
3148 Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
3149 Secure: mustParseBool(os.Getenv(enableHTTPS)),
3150 })
3151 if err != nil {
3152 logError(testName, function, args, startTime, "", "MinIO client object creation failed", err)
3153 return
3154 }
3155
3156 // Enable tracing, write to stderr.
3157 // c.TraceOn(os.Stderr)
3158
3159 // Set user agent.
3160 c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
3161
3162 // Generate a new random bucket name.
3163 bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
3164 args["bucketName"] = bucketName
3165
3166 // Make a new bucket.
3167 err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"})
3168 if err != nil {
3169 logError(testName, function, args, startTime, "", "MakeBucket failed", err)
3170 return
3171 }
3172
3173 defer cleanupBucket(bucketName, c)
3174
3175 // Generate 33K of data.
3176 bufSize := dataFileMap["datafile-33-kB"]
3177 reader := getDataReader("datafile-33-kB")
3178 defer reader.Close()
3179
3180 // Save the data
3181 objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
3182 args["objectName"] = objectName
3183
3184 _, err = c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"})
3185 if err != nil {
3186 logError(testName, function, args, startTime, "", "PutObject failed", err)
3187 return
3188 }
3189
3190 // Read the data back
3191 r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{})
3192 if err != nil {
3193 logError(testName, function, args, startTime, "", "GetObject failed", err)
3194 return
3195 }
3196
3197 st, err := r.Stat()
3198 if err != nil {
3199 logError(testName, function, args, startTime, "", "Stat failed", err)
3200 return
3201 }
3202 if st.Size != int64(bufSize) {
3203 logError(testName, function, args, startTime, "", "Number of bytes in stat does not match, expected "+string(int64(bufSize))+" got "+string(st.Size), err)
3204 return
3205 }
3206 if err := crcMatchesName(r, "datafile-33-kB"); err != nil {
3207 logError(testName, function, args, startTime, "", "data CRC check failed", err)
3208 return
3209 }
3210 if err := r.Close(); err != nil {
3211 logError(testName, function, args, startTime, "", "Object Close failed", err)
3212 return
3213 }
3214 if err := r.Close(); err == nil {
3215 logError(testName, function, args, startTime, "", "Already closed object. No error returned", err)
3216 return
3217 }
3218
3219 successLogger(testName, function, args, startTime).Info()
3220}
3221
3222// Test RemoveObjects request where context cancels after timeout
3223func testRemoveObjectsContext() {
3224 // Initialize logging params.
3225 startTime := time.Now()
3226 testName := getFuncName()
3227 function := "RemoveObjects(ctx, bucketName, objectsCh)"
3228 args := map[string]interface{}{
3229 "bucketName": "",
3230 }
3231
3232 // Seed random based on current tie.
3233 rand.Seed(time.Now().Unix())
3234
3235 // Instantiate new minio client.
3236 c, err := minio.New(os.Getenv(serverEndpoint),
3237 &minio.Options{
3238 Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
3239 Secure: mustParseBool(os.Getenv(enableHTTPS)),
3240 })
3241 if err != nil {
3242 logError(testName, function, args, startTime, "", "MinIO client object creation failed", err)
3243 return
3244 }
3245
3246 // Set user agent.
3247 c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
3248 // Enable tracing, write to stdout.
3249 // c.TraceOn(os.Stderr)
3250
3251 // Generate a new random bucket name.
3252 bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
3253 args["bucketName"] = bucketName
3254
3255 // Make a new bucket.
3256 err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"})
3257 if err != nil {
3258 logError(testName, function, args, startTime, "", "MakeBucket failed", err)
3259 return
3260 }
3261
3262 defer cleanupBucket(bucketName, c)
3263
3264 // Generate put data.
3265 r := bytes.NewReader(bytes.Repeat([]byte("a"), 8))
3266
3267 // Multi remove of 20 objects.
3268 nrObjects := 20
3269 objectsCh := make(chan minio.ObjectInfo)
3270 go func() {
3271 defer close(objectsCh)
3272 for i := 0; i < nrObjects; i++ {
3273 objectName := "sample" + strconv.Itoa(i) + ".txt"
3274 info, err := c.PutObject(context.Background(), bucketName, objectName, r, 8,
3275 minio.PutObjectOptions{ContentType: "application/octet-stream"})
3276 if err != nil {
3277 logError(testName, function, args, startTime, "", "PutObject failed", err)
3278 continue
3279 }
3280 objectsCh <- minio.ObjectInfo{
3281 Key: info.Key,
3282 VersionID: info.VersionID,
3283 }
3284 }
3285 }()
3286 // Set context to cancel in 1 nanosecond.
3287 ctx, cancel := context.WithTimeout(context.Background(), 1*time.Nanosecond)
3288 args["ctx"] = ctx
3289 defer cancel()
3290
3291 // Call RemoveObjects API with short timeout.
3292 errorCh := c.RemoveObjects(ctx, bucketName, objectsCh, minio.RemoveObjectsOptions{})
3293 // Check for error.
3294 select {
3295 case r := <-errorCh:
3296 if r.Err == nil {
3297 logError(testName, function, args, startTime, "", "RemoveObjects should fail on short timeout", err)
3298 return
3299 }
3300 }
3301 // Set context with longer timeout.
3302 ctx, cancel = context.WithTimeout(context.Background(), 1*time.Hour)
3303 args["ctx"] = ctx
3304 defer cancel()
3305 // Perform RemoveObjects with the longer timeout. Expect the removals to succeed.
3306 errorCh = c.RemoveObjects(ctx, bucketName, objectsCh, minio.RemoveObjectsOptions{})
3307 select {
3308 case r, more := <-errorCh:
3309 if more || r.Err != nil {
3310 logError(testName, function, args, startTime, "", "Unexpected error", r.Err)
3311 return
3312 }
3313 }
3314
3315 successLogger(testName, function, args, startTime).Info()
3316}
3317
3318// Test removing multiple objects with Remove API
3319func testRemoveMultipleObjects() {
3320 // initialize logging params
3321 startTime := time.Now()
3322 testName := getFuncName()
3323 function := "RemoveObjects(bucketName, objectsCh)"
3324 args := map[string]interface{}{
3325 "bucketName": "",
3326 }
3327
3328 // Seed random based on current time.
3329 rand.Seed(time.Now().Unix())
3330
3331 // Instantiate new minio client object.
3332 c, err := minio.New(os.Getenv(serverEndpoint),
3333 &minio.Options{
3334 Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
3335 Secure: mustParseBool(os.Getenv(enableHTTPS)),
3336 })
3337 if err != nil {
3338 logError(testName, function, args, startTime, "", "MinIO client object creation failed", err)
3339 return
3340 }
3341
3342 // Set user agent.
3343 c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
3344
3345 // Enable tracing, write to stdout.
3346 // c.TraceOn(os.Stderr)
3347
3348 // Generate a new random bucket name.
3349 bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
3350 args["bucketName"] = bucketName
3351
3352 // Make a new bucket.
3353 err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"})
3354 if err != nil {
3355 logError(testName, function, args, startTime, "", "MakeBucket failed", err)
3356 return
3357 }
3358
3359 defer cleanupBucket(bucketName, c)
3360
3361 r := bytes.NewReader(bytes.Repeat([]byte("a"), 8))
3362
3363 // Multi remove of 1100 objects
3364 nrObjects := 200
3365
3366 objectsCh := make(chan minio.ObjectInfo)
3367
3368 go func() {
3369 defer close(objectsCh)
3370 // Upload objects and send them to objectsCh
3371 for i := 0; i < nrObjects; i++ {
3372 objectName := "sample" + strconv.Itoa(i) + ".txt"
3373 info, err := c.PutObject(context.Background(), bucketName, objectName, r, 8,
3374 minio.PutObjectOptions{ContentType: "application/octet-stream"})
3375 if err != nil {
3376 logError(testName, function, args, startTime, "", "PutObject failed", err)
3377 continue
3378 }
3379 objectsCh <- minio.ObjectInfo{
3380 Key: info.Key,
3381 VersionID: info.VersionID,
3382 }
3383 }
3384 }()
3385
3386 // Call RemoveObjects API
3387 errorCh := c.RemoveObjects(context.Background(), bucketName, objectsCh, minio.RemoveObjectsOptions{})
3388
3389 // Check if errorCh doesn't receive any error
3390 select {
3391 case r, more := <-errorCh:
3392 if more {
3393 logError(testName, function, args, startTime, "", "Unexpected error", r.Err)
3394 return
3395 }
3396 }
3397
3398 successLogger(testName, function, args, startTime).Info()
3399}
3400
3401// Test removing multiple objects and check for results
3402func testRemoveMultipleObjectsWithResult() {
3403 // initialize logging params
3404 startTime := time.Now()
3405 testName := getFuncName()
3406 function := "RemoveObjects(bucketName, objectsCh)"
3407 args := map[string]interface{}{
3408 "bucketName": "",
3409 }
3410
3411 // Seed random based on current time.
3412 rand.Seed(time.Now().Unix())
3413
3414 // Instantiate new minio client object.
3415 c, err := minio.New(os.Getenv(serverEndpoint),
3416 &minio.Options{
3417 Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
3418 Secure: mustParseBool(os.Getenv(enableHTTPS)),
3419 })
3420 if err != nil {
3421 logError(testName, function, args, startTime, "", "MinIO client object creation failed", err)
3422 return
3423 }
3424
3425 // Set user agent.
3426 c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
3427
3428 // Enable tracing, write to stdout.
3429 // c.TraceOn(os.Stderr)
3430
3431 // Generate a new random bucket name.
3432 bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
3433 args["bucketName"] = bucketName
3434
3435 // Make a new bucket.
3436 err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1", ObjectLocking: true})
3437 if err != nil {
3438 logError(testName, function, args, startTime, "", "MakeBucket failed", err)
3439 return
3440 }
3441
3442 defer cleanupVersionedBucket(bucketName, c)
3443
3444 r := bytes.NewReader(bytes.Repeat([]byte("a"), 8))
3445
3446 nrObjects := 10
3447 nrLockedObjects := 5
3448
3449 objectsCh := make(chan minio.ObjectInfo)
3450
3451 go func() {
3452 defer close(objectsCh)
3453 // Upload objects and send them to objectsCh
3454 for i := 0; i < nrObjects; i++ {
3455 objectName := "sample" + strconv.Itoa(i) + ".txt"
3456 info, err := c.PutObject(context.Background(), bucketName, objectName, r, 8,
3457 minio.PutObjectOptions{ContentType: "application/octet-stream"})
3458 if err != nil {
3459 logError(testName, function, args, startTime, "", "PutObject failed", err)
3460 return
3461 }
3462 if i < nrLockedObjects {
3463 // t := time.Date(2130, time.April, 25, 14, 0, 0, 0, time.UTC)
3464 t := time.Now().Add(5 * time.Minute)
3465 m := minio.RetentionMode(minio.Governance)
3466 opts := minio.PutObjectRetentionOptions{
3467 GovernanceBypass: false,
3468 RetainUntilDate: &t,
3469 Mode: &m,
3470 VersionID: info.VersionID,
3471 }
3472 err = c.PutObjectRetention(context.Background(), bucketName, objectName, opts)
3473 if err != nil {
3474 logError(testName, function, args, startTime, "", "Error setting retention", err)
3475 return
3476 }
3477 }
3478
3479 objectsCh <- minio.ObjectInfo{
3480 Key: info.Key,
3481 VersionID: info.VersionID,
3482 }
3483 }
3484 }()
3485
3486 // Call RemoveObjects API
3487 resultCh := c.RemoveObjectsWithResult(context.Background(), bucketName, objectsCh, minio.RemoveObjectsOptions{})
3488
3489 var foundNil, foundErr int
3490
3491 for {
3492 // Check if errorCh doesn't receive any error
3493 select {
3494 case deleteRes, ok := <-resultCh:
3495 if !ok {
3496 goto out
3497 }
3498 if deleteRes.ObjectName == "" {
3499 logError(testName, function, args, startTime, "", "Unexpected object name", nil)
3500 return
3501 }
3502 if deleteRes.ObjectVersionID == "" {
3503 logError(testName, function, args, startTime, "", "Unexpected object version ID", nil)
3504 return
3505 }
3506
3507 if deleteRes.Err == nil {
3508 foundNil++
3509 } else {
3510 foundErr++
3511 }
3512 }
3513 }
3514out:
3515 if foundNil+foundErr != nrObjects {
3516 logError(testName, function, args, startTime, "", "Unexpected number of results", nil)
3517 return
3518 }
3519
3520 if foundNil != nrObjects-nrLockedObjects {
3521 logError(testName, function, args, startTime, "", "Unexpected number of nil errors", nil)
3522 return
3523 }
3524
3525 if foundErr != nrLockedObjects {
3526 logError(testName, function, args, startTime, "", "Unexpected number of errors", nil)
3527 return
3528 }
3529
3530 successLogger(testName, function, args, startTime).Info()
3531}
3532
3533// Tests FPutObject of a big file to trigger multipart
3534func testFPutObjectMultipart() {
3535 // initialize logging params
3536 startTime := time.Now()
3537 testName := getFuncName()
3538 function := "FPutObject(bucketName, objectName, fileName, opts)"
3539 args := map[string]interface{}{
3540 "bucketName": "",
3541 "objectName": "",
3542 "fileName": "",
3543 "opts": "",
3544 }
3545
3546 // Seed random based on current time.
3547 rand.Seed(time.Now().Unix())
3548
3549 // Instantiate new minio client object.
3550 c, err := minio.New(os.Getenv(serverEndpoint),
3551 &minio.Options{
3552 Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
3553 Secure: mustParseBool(os.Getenv(enableHTTPS)),
3554 })
3555 if err != nil {
3556 logError(testName, function, args, startTime, "", "MinIO client object creation failed", err)
3557 return
3558 }
3559
3560 // Enable tracing, write to stderr.
3561 // c.TraceOn(os.Stderr)
3562
3563 // Set user agent.
3564 c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
3565
3566 // Generate a new random bucket name.
3567 bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
3568 args["bucketName"] = bucketName
3569
3570 // Make a new bucket.
3571 err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"})
3572 if err != nil {
3573 logError(testName, function, args, startTime, "", "MakeBucket failed", err)
3574 return
3575 }
3576
3577 defer cleanupBucket(bucketName, c)
3578
3579 // Upload 4 parts to utilize all 3 'workers' in multipart and still have a part to upload.
3580 fileName := getMintDataDirFilePath("datafile-129-MB")
3581 if fileName == "" {
3582 // Make a temp file with minPartSize bytes of data.
3583 file, err := os.CreateTemp(os.TempDir(), "FPutObjectTest")
3584 if err != nil {
3585 logError(testName, function, args, startTime, "", "TempFile creation failed", err)
3586 return
3587 }
3588 // Upload 2 parts to utilize all 3 'workers' in multipart and still have a part to upload.
3589 if _, err = io.Copy(file, getDataReader("datafile-129-MB")); err != nil {
3590 logError(testName, function, args, startTime, "", "Copy failed", err)
3591 return
3592 }
3593 if err = file.Close(); err != nil {
3594 logError(testName, function, args, startTime, "", "File Close failed", err)
3595 return
3596 }
3597 fileName = file.Name()
3598 args["fileName"] = fileName
3599 }
3600 totalSize := dataFileMap["datafile-129-MB"]
3601 // Set base object name
3602 objectName := bucketName + "FPutObject" + "-standard"
3603 args["objectName"] = objectName
3604
3605 objectContentType := "testapplication/octet-stream"
3606 args["objectContentType"] = objectContentType
3607
3608 // Perform standard FPutObject with contentType provided (Expecting application/octet-stream)
3609 _, err = c.FPutObject(context.Background(), bucketName, objectName, fileName, minio.PutObjectOptions{ContentType: objectContentType})
3610 if err != nil {
3611 logError(testName, function, args, startTime, "", "FPutObject failed", err)
3612 return
3613 }
3614
3615 r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{})
3616 if err != nil {
3617 logError(testName, function, args, startTime, "", "GetObject failed", err)
3618 return
3619 }
3620 objInfo, err := r.Stat()
3621 if err != nil {
3622 logError(testName, function, args, startTime, "", "Unexpected error", err)
3623 return
3624 }
3625 if objInfo.Size != int64(totalSize) {
3626 logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(totalSize))+" got "+string(objInfo.Size), err)
3627 return
3628 }
3629 if objInfo.ContentType != objectContentType && objInfo.ContentType != "application/octet-stream" {
3630 logError(testName, function, args, startTime, "", "ContentType doesn't match", err)
3631 return
3632 }
3633
3634 successLogger(testName, function, args, startTime).Info()
3635}
3636
3637// Tests FPutObject with null contentType (default = application/octet-stream)
3638func testFPutObject() {
3639 // initialize logging params
3640 startTime := time.Now()
3641 testName := getFuncName()
3642 function := "FPutObject(bucketName, objectName, fileName, opts)"
3643
3644 args := map[string]interface{}{
3645 "bucketName": "",
3646 "objectName": "",
3647 "fileName": "",
3648 "opts": "",
3649 }
3650
3651 // Seed random based on current time.
3652 rand.Seed(time.Now().Unix())
3653
3654 // Instantiate new minio client object.
3655 c, err := minio.New(os.Getenv(serverEndpoint),
3656 &minio.Options{
3657 Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
3658 Secure: mustParseBool(os.Getenv(enableHTTPS)),
3659 })
3660 if err != nil {
3661 logError(testName, function, args, startTime, "", "MinIO client object creation failed", err)
3662 return
3663 }
3664
3665 // Enable tracing, write to stderr.
3666 // c.TraceOn(os.Stderr)
3667
3668 // Set user agent.
3669 c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
3670
3671 // Generate a new random bucket name.
3672 bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
3673 location := "us-east-1"
3674
3675 // Make a new bucket.
3676 args["bucketName"] = bucketName
3677 args["location"] = location
3678 function = "MakeBucket(bucketName, location)"
3679 err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: location})
3680 if err != nil {
3681 logError(testName, function, args, startTime, "", "MakeBucket failed", err)
3682 return
3683 }
3684
3685 defer cleanupBucket(bucketName, c)
3686
3687 // Upload 3 parts worth of data to use all 3 of multiparts 'workers' and have an extra part.
3688 // Use different data in part for multipart tests to check parts are uploaded in correct order.
3689 fName := getMintDataDirFilePath("datafile-129-MB")
3690 if fName == "" {
3691 // Make a temp file with minPartSize bytes of data.
3692 file, err := os.CreateTemp(os.TempDir(), "FPutObjectTest")
3693 if err != nil {
3694 logError(testName, function, args, startTime, "", "TempFile creation failed", err)
3695 return
3696 }
3697
3698 // Upload 3 parts to utilize all 3 'workers' in multipart and still have a part to upload.
3699 if _, err = io.Copy(file, getDataReader("datafile-129-MB")); err != nil {
3700 logError(testName, function, args, startTime, "", "File copy failed", err)
3701 return
3702 }
3703 // Close the file pro-actively for windows.
3704 if err = file.Close(); err != nil {
3705 logError(testName, function, args, startTime, "", "File close failed", err)
3706 return
3707 }
3708 defer os.Remove(file.Name())
3709 fName = file.Name()
3710 }
3711
3712 // Set base object name
3713 function = "FPutObject(bucketName, objectName, fileName, opts)"
3714 objectName := bucketName + "FPutObject"
3715 args["objectName"] = objectName + "-standard"
3716 args["fileName"] = fName
3717 args["opts"] = minio.PutObjectOptions{ContentType: "application/octet-stream"}
3718
3719 // Perform standard FPutObject with contentType provided (Expecting application/octet-stream)
3720 ui, err := c.FPutObject(context.Background(), bucketName, objectName+"-standard", fName, minio.PutObjectOptions{ContentType: "application/octet-stream"})
3721 if err != nil {
3722 logError(testName, function, args, startTime, "", "FPutObject failed", err)
3723 return
3724 }
3725
3726 if ui.Size != int64(dataFileMap["datafile-129-MB"]) {
3727 logError(testName, function, args, startTime, "", "FPutObject returned an unexpected upload size", err)
3728 return
3729 }
3730
3731 // Perform FPutObject with no contentType provided (Expecting application/octet-stream)
3732 args["objectName"] = objectName + "-Octet"
3733 _, err = c.FPutObject(context.Background(), bucketName, objectName+"-Octet", fName, minio.PutObjectOptions{})
3734 if err != nil {
3735 logError(testName, function, args, startTime, "", "File close failed", err)
3736 return
3737 }
3738
3739 srcFile, err := os.Open(fName)
3740 if err != nil {
3741 logError(testName, function, args, startTime, "", "File open failed", err)
3742 return
3743 }
3744 defer srcFile.Close()
3745 // Add extension to temp file name
3746 tmpFile, err := os.Create(fName + ".gtar")
3747 if err != nil {
3748 logError(testName, function, args, startTime, "", "File create failed", err)
3749 return
3750 }
3751 _, err = io.Copy(tmpFile, srcFile)
3752 if err != nil {
3753 logError(testName, function, args, startTime, "", "File copy failed", err)
3754 return
3755 }
3756 tmpFile.Close()
3757
3758 // Perform FPutObject with no contentType provided (Expecting application/x-gtar)
3759 args["objectName"] = objectName + "-GTar"
3760 args["opts"] = minio.PutObjectOptions{}
3761 _, err = c.FPutObject(context.Background(), bucketName, objectName+"-GTar", fName+".gtar", minio.PutObjectOptions{})
3762 if err != nil {
3763 logError(testName, function, args, startTime, "", "FPutObject failed", err)
3764 return
3765 }
3766
3767 // Check headers
3768 function = "StatObject(bucketName, objectName, opts)"
3769 args["objectName"] = objectName + "-standard"
3770 rStandard, err := c.StatObject(context.Background(), bucketName, objectName+"-standard", minio.StatObjectOptions{})
3771 if err != nil {
3772 logError(testName, function, args, startTime, "", "StatObject failed", err)
3773 return
3774 }
3775 if rStandard.ContentType != "application/octet-stream" {
3776 logError(testName, function, args, startTime, "", "ContentType does not match, expected application/octet-stream, got "+rStandard.ContentType, err)
3777 return
3778 }
3779
3780 function = "StatObject(bucketName, objectName, opts)"
3781 args["objectName"] = objectName + "-Octet"
3782 rOctet, err := c.StatObject(context.Background(), bucketName, objectName+"-Octet", minio.StatObjectOptions{})
3783 if err != nil {
3784 logError(testName, function, args, startTime, "", "StatObject failed", err)
3785 return
3786 }
3787 if rOctet.ContentType != "application/octet-stream" {
3788 logError(testName, function, args, startTime, "", "ContentType does not match, expected application/octet-stream, got "+rOctet.ContentType, err)
3789 return
3790 }
3791
3792 function = "StatObject(bucketName, objectName, opts)"
3793 args["objectName"] = objectName + "-GTar"
3794 rGTar, err := c.StatObject(context.Background(), bucketName, objectName+"-GTar", minio.StatObjectOptions{})
3795 if err != nil {
3796 logError(testName, function, args, startTime, "", "StatObject failed", err)
3797 return
3798 }
3799 if rGTar.ContentType != "application/x-gtar" && rGTar.ContentType != "application/octet-stream" && rGTar.ContentType != "application/x-tar" {
3800 logError(testName, function, args, startTime, "", "ContentType does not match, expected application/x-tar or application/octet-stream, got "+rGTar.ContentType, err)
3801 return
3802 }
3803
3804 os.Remove(fName + ".gtar")
3805 successLogger(testName, function, args, startTime).Info()
3806}
3807
3808// Tests FPutObject request when context cancels after timeout
3809func testFPutObjectContext() {
3810 // initialize logging params
3811 startTime := time.Now()
3812 testName := getFuncName()
3813 function := "FPutObject(bucketName, objectName, fileName, opts)"
3814 args := map[string]interface{}{
3815 "bucketName": "",
3816 "objectName": "",
3817 "fileName": "",
3818 "opts": "",
3819 }
3820 // Seed random based on current time.
3821 rand.Seed(time.Now().Unix())
3822
3823 // Instantiate new minio client object.
3824 c, err := minio.New(os.Getenv(serverEndpoint),
3825 &minio.Options{
3826 Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
3827 Secure: mustParseBool(os.Getenv(enableHTTPS)),
3828 })
3829 if err != nil {
3830 logError(testName, function, args, startTime, "", "MinIO client object creation failed", err)
3831 return
3832 }
3833
3834 // Enable tracing, write to stderr.
3835 // c.TraceOn(os.Stderr)
3836
3837 // Set user agent.
3838 c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
3839
3840 // Generate a new random bucket name.
3841 bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
3842 args["bucketName"] = bucketName
3843
3844 // Make a new bucket.
3845 err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"})
3846 if err != nil {
3847 logError(testName, function, args, startTime, "", "MakeBucket failed", err)
3848 return
3849 }
3850
3851 defer cleanupBucket(bucketName, c)
3852
3853 // Upload 1 parts worth of data to use multipart upload.
3854 // Use different data in part for multipart tests to check parts are uploaded in correct order.
3855 fName := getMintDataDirFilePath("datafile-1-MB")
3856 if fName == "" {
3857 // Make a temp file with 1 MiB bytes of data.
3858 file, err := os.CreateTemp(os.TempDir(), "FPutObjectContextTest")
3859 if err != nil {
3860 logError(testName, function, args, startTime, "", "TempFile creation failed", err)
3861 return
3862 }
3863
3864 // Upload 1 parts to trigger multipart upload
3865 if _, err = io.Copy(file, getDataReader("datafile-1-MB")); err != nil {
3866 logError(testName, function, args, startTime, "", "File copy failed", err)
3867 return
3868 }
3869 // Close the file pro-actively for windows.
3870 if err = file.Close(); err != nil {
3871 logError(testName, function, args, startTime, "", "File close failed", err)
3872 return
3873 }
3874 defer os.Remove(file.Name())
3875 fName = file.Name()
3876 }
3877
3878 // Set base object name
3879 objectName := bucketName + "FPutObjectContext"
3880 args["objectName"] = objectName
3881 ctx, cancel := context.WithTimeout(context.Background(), 1*time.Nanosecond)
3882 args["ctx"] = ctx
3883 defer cancel()
3884
3885 // Perform FPutObject with contentType provided (Expecting application/octet-stream)
3886 _, err = c.FPutObject(ctx, bucketName, objectName+"-Shorttimeout", fName, minio.PutObjectOptions{ContentType: "application/octet-stream"})
3887 if err == nil {
3888 logError(testName, function, args, startTime, "", "FPutObject should fail on short timeout", err)
3889 return
3890 }
3891 ctx, cancel = context.WithTimeout(context.Background(), 1*time.Hour)
3892 defer cancel()
3893 // Perform FPutObject with a long timeout. Expect the put object to succeed
3894 _, err = c.FPutObject(ctx, bucketName, objectName+"-Longtimeout", fName, minio.PutObjectOptions{})
3895 if err != nil {
3896 logError(testName, function, args, startTime, "", "FPutObject shouldn't fail on long timeout", err)
3897 return
3898 }
3899
3900 _, err = c.StatObject(context.Background(), bucketName, objectName+"-Longtimeout", minio.StatObjectOptions{})
3901 if err != nil {
3902 logError(testName, function, args, startTime, "", "StatObject failed", err)
3903 return
3904 }
3905
3906 successLogger(testName, function, args, startTime).Info()
3907}
3908
3909// Tests FPutObject request when context cancels after timeout
3910func testFPutObjectContextV2() {
3911 // initialize logging params
3912 startTime := time.Now()
3913 testName := getFuncName()
3914 function := "FPutObjectContext(ctx, bucketName, objectName, fileName, opts)"
3915 args := map[string]interface{}{
3916 "bucketName": "",
3917 "objectName": "",
3918 "opts": "minio.PutObjectOptions{ContentType:objectContentType}",
3919 }
3920 // Seed random based on current time.
3921 rand.Seed(time.Now().Unix())
3922
3923 // Instantiate new minio client object.
3924 c, err := minio.New(os.Getenv(serverEndpoint),
3925 &minio.Options{
3926 Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""),
3927 Secure: mustParseBool(os.Getenv(enableHTTPS)),
3928 })
3929 if err != nil {
3930 logError(testName, function, args, startTime, "", "MinIO client object creation failed", err)
3931 return
3932 }
3933
3934 // Enable tracing, write to stderr.
3935 // c.TraceOn(os.Stderr)
3936
3937 // Set user agent.
3938 c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
3939
3940 // Generate a new random bucket name.
3941 bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
3942 args["bucketName"] = bucketName
3943
3944 // Make a new bucket.
3945 err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"})
3946 if err != nil {
3947 logError(testName, function, args, startTime, "", "MakeBucket failed", err)
3948 return
3949 }
3950
3951 defer cleanupBucket(bucketName, c)
3952
3953 // Upload 1 parts worth of data to use multipart upload.
3954 // Use different data in part for multipart tests to check parts are uploaded in correct order.
3955 fName := getMintDataDirFilePath("datafile-1-MB")
3956 if fName == "" {
3957 // Make a temp file with 1 MiB bytes of data.
3958 file, err := os.CreateTemp(os.TempDir(), "FPutObjectContextTest")
3959 if err != nil {
3960 logError(testName, function, args, startTime, "", "Temp file creation failed", err)
3961 return
3962 }
3963
3964 // Upload 1 parts to trigger multipart upload
3965 if _, err = io.Copy(file, getDataReader("datafile-1-MB")); err != nil {
3966 logError(testName, function, args, startTime, "", "File copy failed", err)
3967 return
3968 }
3969
3970 // Close the file pro-actively for windows.
3971 if err = file.Close(); err != nil {
3972 logError(testName, function, args, startTime, "", "File close failed", err)
3973 return
3974 }
3975 defer os.Remove(file.Name())
3976 fName = file.Name()
3977 }
3978
3979 // Set base object name
3980 objectName := bucketName + "FPutObjectContext"
3981 args["objectName"] = objectName
3982
3983 ctx, cancel := context.WithTimeout(context.Background(), 1*time.Nanosecond)
3984 args["ctx"] = ctx
3985 defer cancel()
3986
3987 // Perform FPutObject with contentType provided (Expecting application/octet-stream)
3988 _, err = c.FPutObject(ctx, bucketName, objectName+"-Shorttimeout", fName, minio.PutObjectOptions{ContentType: "application/octet-stream"})
3989 if err == nil {
3990 logError(testName, function, args, startTime, "", "FPutObject should fail on short timeout", err)
3991 return
3992 }
3993 ctx, cancel = context.WithTimeout(context.Background(), 1*time.Hour)
3994 defer cancel()
3995 // Perform FPutObject with a long timeout. Expect the put object to succeed
3996 _, err = c.FPutObject(ctx, bucketName, objectName+"-Longtimeout", fName, minio.PutObjectOptions{})
3997 if err != nil {
3998 logError(testName, function, args, startTime, "", "FPutObject shouldn't fail on longer timeout", err)
3999 return
4000 }
4001
4002 _, err = c.StatObject(context.Background(), bucketName, objectName+"-Longtimeout", minio.StatObjectOptions{})
4003 if err != nil {
4004 logError(testName, function, args, startTime, "", "StatObject failed", err)
4005 return
4006 }
4007
4008 successLogger(testName, function, args, startTime).Info()
4009}
4010
4011// Test validates putObject with context to see if request cancellation is honored.
4012func testPutObjectContext() {
4013 // initialize logging params
4014 startTime := time.Now()
4015 testName := getFuncName()
4016 function := "PutObject(ctx, bucketName, objectName, fileName, opts)"
4017 args := map[string]interface{}{
4018 "ctx": "",
4019 "bucketName": "",
4020 "objectName": "",
4021 "opts": "",
4022 }
4023
4024 // Instantiate new minio client object.
4025 c, err := minio.New(os.Getenv(serverEndpoint),
4026 &minio.Options{
4027 Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
4028 Secure: mustParseBool(os.Getenv(enableHTTPS)),
4029 })
4030 if err != nil {
4031 logError(testName, function, args, startTime, "", "MinIO client object creation failed", err)
4032 return
4033 }
4034
4035 // Enable tracing, write to stderr.
4036 // c.TraceOn(os.Stderr)
4037
4038 // Set user agent.
4039 c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
4040
4041 // Make a new bucket.
4042 bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
4043 args["bucketName"] = bucketName
4044
4045 err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"})
4046 if err != nil {
4047 logError(testName, function, args, startTime, "", "MakeBucket call failed", err)
4048 return
4049 }
4050
4051 defer cleanupBucket(bucketName, c)
4052
4053 bufSize := dataFileMap["datafile-33-kB"]
4054 reader := getDataReader("datafile-33-kB")
4055 defer reader.Close()
4056 objectName := fmt.Sprintf("test-file-%v", rand.Uint32())
4057 args["objectName"] = objectName
4058
4059 ctx, cancel := context.WithTimeout(context.Background(), 1*time.Nanosecond)
4060 cancel()
4061 args["ctx"] = ctx
4062 args["opts"] = minio.PutObjectOptions{ContentType: "binary/octet-stream"}
4063
4064 _, err = c.PutObject(ctx, bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"})
4065 if err == nil {
4066 logError(testName, function, args, startTime, "", "PutObject should fail on short timeout", err)
4067 return
4068 }
4069
4070 ctx, cancel = context.WithTimeout(context.Background(), 1*time.Hour)
4071 args["ctx"] = ctx
4072
4073 defer cancel()
4074 reader = getDataReader("datafile-33-kB")
4075 defer reader.Close()
4076 _, err = c.PutObject(ctx, bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"})
4077 if err != nil {
4078 logError(testName, function, args, startTime, "", "PutObject with long timeout failed", err)
4079 return
4080 }
4081
4082 successLogger(testName, function, args, startTime).Info()
4083}
4084
4085// Tests get object with s3zip extensions.
4086func testGetObjectS3Zip() {
4087 // initialize logging params
4088 startTime := time.Now()
4089 testName := getFuncName()
4090 function := "GetObject(bucketName, objectName)"
4091 args := map[string]interface{}{"x-minio-extract": true}
4092
4093 // Seed random based on current time.
4094 rand.Seed(time.Now().Unix())
4095
4096 // Instantiate new minio client object.
4097 c, err := minio.New(os.Getenv(serverEndpoint),
4098 &minio.Options{
4099 Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
4100 Secure: mustParseBool(os.Getenv(enableHTTPS)),
4101 })
4102 if err != nil {
4103 logError(testName, function, args, startTime, "", "MinIO client object creation failed", err)
4104 return
4105 }
4106
4107 // Enable tracing, write to stderr.
4108 // c.TraceOn(os.Stderr)
4109
4110 // Set user agent.
4111 c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
4112
4113 // Generate a new random bucket name.
4114 bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
4115 args["bucketName"] = bucketName
4116
4117 // Make a new bucket.
4118 err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"})
4119 if err != nil {
4120 logError(testName, function, args, startTime, "", "MakeBucket failed", err)
4121 return
4122 }
4123
4124 defer func() {
4125 // Delete all objects and buckets
4126 if err = cleanupBucket(bucketName, c); err != nil {
4127 logError(testName, function, args, startTime, "", "CleanupBucket failed", err)
4128 return
4129 }
4130 }()
4131
4132 objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + ".zip"
4133 args["objectName"] = objectName
4134
4135 var zipFile bytes.Buffer
4136 zw := zip.NewWriter(&zipFile)
4137 rng := rand.New(rand.NewSource(0xc0cac01a))
4138 const nFiles = 500
4139 for i := 0; i <= nFiles; i++ {
4140 if i == nFiles {
4141 // Make one large, compressible file.
4142 i = 1000000
4143 }
4144 b := make([]byte, i)
4145 if i < nFiles {
4146 rng.Read(b)
4147 }
4148 wc, err := zw.Create(fmt.Sprintf("test/small/file-%d.bin", i))
4149 if err != nil {
4150 logError(testName, function, args, startTime, "", "zw.Create failed", err)
4151 return
4152 }
4153 wc.Write(b)
4154 }
4155 err = zw.Close()
4156 if err != nil {
4157 logError(testName, function, args, startTime, "", "zw.Close failed", err)
4158 return
4159 }
4160 buf := zipFile.Bytes()
4161
4162 // Save the data
4163 _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ContentType: "binary/octet-stream"})
4164 if err != nil {
4165 logError(testName, function, args, startTime, "", "PutObject failed", err)
4166 return
4167 }
4168
4169 // Read the data back
4170 r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{})
4171 if err != nil {
4172 logError(testName, function, args, startTime, "", "GetObject failed", err)
4173 return
4174 }
4175
4176 st, err := r.Stat()
4177 if err != nil {
4178 logError(testName, function, args, startTime, "", "Stat object failed", err)
4179 return
4180 }
4181
4182 if st.Size != int64(len(buf)) {
4183 logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(len(buf))+", got "+string(st.Size), err)
4184 return
4185 }
4186 r.Close()
4187
4188 zr, err := zip.NewReader(bytes.NewReader(buf), int64(len(buf)))
4189 if err != nil {
4190 logError(testName, function, args, startTime, "", "zip.NewReader failed", err)
4191 return
4192 }
4193 lOpts := minio.ListObjectsOptions{}
4194 lOpts.Set("x-minio-extract", "true")
4195 lOpts.Prefix = objectName + "/"
4196 lOpts.Recursive = true
4197 list := c.ListObjects(context.Background(), bucketName, lOpts)
4198 listed := map[string]minio.ObjectInfo{}
4199 for item := range list {
4200 if item.Err != nil {
4201 break
4202 }
4203 listed[item.Key] = item
4204 }
4205 if len(listed) == 0 {
4206 // Assume we are running against non-minio.
4207 args["SKIPPED"] = true
4208 ignoredLog(testName, function, args, startTime, "s3zip does not appear to be present").Info()
4209 return
4210 }
4211
4212 for _, file := range zr.File {
4213 if file.FileInfo().IsDir() {
4214 continue
4215 }
4216 args["zipfile"] = file.Name
4217 zfr, err := file.Open()
4218 if err != nil {
4219 logError(testName, function, args, startTime, "", "file.Open failed", err)
4220 return
4221 }
4222 want, err := io.ReadAll(zfr)
4223 if err != nil {
4224 logError(testName, function, args, startTime, "", "fzip file read failed", err)
4225 return
4226 }
4227
4228 opts := minio.GetObjectOptions{}
4229 opts.Set("x-minio-extract", "true")
4230 key := path.Join(objectName, file.Name)
4231 r, err = c.GetObject(context.Background(), bucketName, key, opts)
4232 if err != nil {
4233 terr := minio.ToErrorResponse(err)
4234 if terr.StatusCode != http.StatusNotFound {
4235 logError(testName, function, args, startTime, "", "GetObject failed", err)
4236 }
4237 return
4238 }
4239 got, err := io.ReadAll(r)
4240 if err != nil {
4241 logError(testName, function, args, startTime, "", "ReadAll failed", err)
4242 return
4243 }
4244 r.Close()
4245 if !bytes.Equal(want, got) {
4246 logError(testName, function, args, startTime, "", "Content mismatch", err)
4247 return
4248 }
4249 oi, ok := listed[key]
4250 if !ok {
4251 logError(testName, function, args, startTime, "", "Object Missing", fmt.Errorf("%s not present in listing", key))
4252 return
4253 }
4254 if int(oi.Size) != len(got) {
4255 logError(testName, function, args, startTime, "", "Object Size Incorrect", fmt.Errorf("listing %d, read %d", oi.Size, len(got)))
4256 return
4257 }
4258 delete(listed, key)
4259 }
4260 delete(args, "zipfile")
4261 if len(listed) > 0 {
4262 logError(testName, function, args, startTime, "", "Extra listed objects", fmt.Errorf("left over: %v", listed))
4263 return
4264 }
4265 successLogger(testName, function, args, startTime).Info()
4266}
4267
4268// Tests get object ReaderSeeker interface methods.
4269func testGetObjectReadSeekFunctional() {
4270 // initialize logging params
4271 startTime := time.Now()
4272 testName := getFuncName()
4273 function := "GetObject(bucketName, objectName)"
4274 args := map[string]interface{}{}
4275
4276 // Seed random based on current time.
4277 rand.Seed(time.Now().Unix())
4278
4279 // Instantiate new minio client object.
4280 c, err := minio.New(os.Getenv(serverEndpoint),
4281 &minio.Options{
4282 Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
4283 Secure: mustParseBool(os.Getenv(enableHTTPS)),
4284 })
4285 if err != nil {
4286 logError(testName, function, args, startTime, "", "MinIO client object creation failed", err)
4287 return
4288 }
4289
4290 // Enable tracing, write to stderr.
4291 // c.TraceOn(os.Stderr)
4292
4293 // Set user agent.
4294 c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
4295
4296 // Generate a new random bucket name.
4297 bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
4298 args["bucketName"] = bucketName
4299
4300 // Make a new bucket.
4301 err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"})
4302 if err != nil {
4303 logError(testName, function, args, startTime, "", "MakeBucket failed", err)
4304 return
4305 }
4306
4307 defer func() {
4308 // Delete all objects and buckets
4309 if err = cleanupBucket(bucketName, c); err != nil {
4310 logError(testName, function, args, startTime, "", "CleanupBucket failed", err)
4311 return
4312 }
4313 }()
4314
4315 // Generate 33K of data.
4316 bufSize := dataFileMap["datafile-33-kB"]
4317 reader := getDataReader("datafile-33-kB")
4318 defer reader.Close()
4319
4320 objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
4321 args["objectName"] = objectName
4322
4323 buf, err := io.ReadAll(reader)
4324 if err != nil {
4325 logError(testName, function, args, startTime, "", "ReadAll failed", err)
4326 return
4327 }
4328
4329 // Save the data
4330 _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ContentType: "binary/octet-stream"})
4331 if err != nil {
4332 logError(testName, function, args, startTime, "", "PutObject failed", err)
4333 return
4334 }
4335
4336 // Read the data back
4337 r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{})
4338 if err != nil {
4339 logError(testName, function, args, startTime, "", "GetObject failed", err)
4340 return
4341 }
4342
4343 st, err := r.Stat()
4344 if err != nil {
4345 logError(testName, function, args, startTime, "", "Stat object failed", err)
4346 return
4347 }
4348
4349 if st.Size != int64(bufSize) {
4350 logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(bufSize))+", got "+string(st.Size), err)
4351 return
4352 }
4353
4354 // This following function helps us to compare data from the reader after seek
4355 // with the data from the original buffer
4356 cmpData := func(r io.Reader, start, end int) {
4357 if end-start == 0 {
4358 return
4359 }
4360 buffer := bytes.NewBuffer([]byte{})
4361 if _, err := io.CopyN(buffer, r, int64(bufSize)); err != nil {
4362 if err != io.EOF {
4363 logError(testName, function, args, startTime, "", "CopyN failed", err)
4364 return
4365 }
4366 }
4367 if !bytes.Equal(buf[start:end], buffer.Bytes()) {
4368 logError(testName, function, args, startTime, "", "Incorrect read bytes v/s original buffer", err)
4369 return
4370 }
4371 }
4372
4373 // Generic seek error for errors other than io.EOF
4374 seekErr := errors.New("seek error")
4375
4376 testCases := []struct {
4377 offset int64
4378 whence int
4379 pos int64
4380 err error
4381 shouldCmp bool
4382 start int
4383 end int
4384 }{
4385 // Start from offset 0, fetch data and compare
4386 {0, 0, 0, nil, true, 0, 0},
4387 // Start from offset 2048, fetch data and compare
4388 {2048, 0, 2048, nil, true, 2048, bufSize},
4389 // Start from offset larger than possible
4390 {int64(bufSize) + 1024, 0, 0, seekErr, false, 0, 0},
4391 // Move to offset 0 without comparing
4392 {0, 0, 0, nil, false, 0, 0},
4393 // Move one step forward and compare
4394 {1, 1, 1, nil, true, 1, bufSize},
4395 // Move larger than possible
4396 {int64(bufSize), 1, 0, seekErr, false, 0, 0},
4397 // Provide negative offset with CUR_SEEK
4398 {int64(-1), 1, 0, seekErr, false, 0, 0},
4399 // Test with whence SEEK_END and with positive offset
4400 {1024, 2, int64(bufSize) - 1024, io.EOF, true, 0, 0},
4401 // Test with whence SEEK_END and with negative offset
4402 {-1024, 2, int64(bufSize) - 1024, nil, true, bufSize - 1024, bufSize},
4403 // Test with whence SEEK_END and with large negative offset
4404 {-int64(bufSize) * 2, 2, 0, seekErr, true, 0, 0},
4405 }
4406
4407 for i, testCase := range testCases {
4408 // Perform seek operation
4409 n, err := r.Seek(testCase.offset, testCase.whence)
4410 // We expect an error
4411 if testCase.err == seekErr && err == nil {
4412 logError(testName, function, args, startTime, "", "Test "+string(i+1)+", unexpected err value: expected: "+testCase.err.Error()+", found: "+err.Error(), err)
4413 return
4414 }
4415 // We expect a specific error
4416 if testCase.err != seekErr && testCase.err != err {
4417 logError(testName, function, args, startTime, "", "Test "+string(i+1)+", unexpected err value: expected: "+testCase.err.Error()+", found: "+err.Error(), err)
4418 return
4419 }
4420 // If we expect an error go to the next loop
4421 if testCase.err != nil {
4422 continue
4423 }
4424 // Check the returned seek pos
4425 if n != testCase.pos {
4426 logError(testName, function, args, startTime, "", "Test "+string(i+1)+", number of bytes seeked does not match, expected "+string(testCase.pos)+", got "+string(n), err)
4427 return
4428 }
4429 // Compare only if shouldCmp is activated
4430 if testCase.shouldCmp {
4431 cmpData(r, testCase.start, testCase.end)
4432 }
4433 }
4434 successLogger(testName, function, args, startTime).Info()
4435}
4436
4437// Tests get object ReaderAt interface methods.
4438func testGetObjectReadAtFunctional() {
4439 // initialize logging params
4440 startTime := time.Now()
4441 testName := getFuncName()
4442 function := "GetObject(bucketName, objectName)"
4443 args := map[string]interface{}{}
4444
4445 // Seed random based on current time.
4446 rand.Seed(time.Now().Unix())
4447
4448 // Instantiate new minio client object.
4449 c, err := minio.New(os.Getenv(serverEndpoint),
4450 &minio.Options{
4451 Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
4452 Secure: mustParseBool(os.Getenv(enableHTTPS)),
4453 })
4454 if err != nil {
4455 logError(testName, function, args, startTime, "", "MinIO client object creation failed", err)
4456 return
4457 }
4458
4459 // Enable tracing, write to stderr.
4460 // c.TraceOn(os.Stderr)
4461
4462 // Set user agent.
4463 c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
4464
4465 // Generate a new random bucket name.
4466 bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
4467 args["bucketName"] = bucketName
4468
4469 // Make a new bucket.
4470 err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"})
4471 if err != nil {
4472 logError(testName, function, args, startTime, "", "MakeBucket failed", err)
4473 return
4474 }
4475
4476 defer cleanupBucket(bucketName, c)
4477
4478 // Generate 33K of data.
4479 bufSize := dataFileMap["datafile-33-kB"]
4480 reader := getDataReader("datafile-33-kB")
4481 defer reader.Close()
4482
4483 objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
4484 args["objectName"] = objectName
4485
4486 buf, err := io.ReadAll(reader)
4487 if err != nil {
4488 logError(testName, function, args, startTime, "", "ReadAll failed", err)
4489 return
4490 }
4491
4492 // Save the data
4493 _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ContentType: "binary/octet-stream"})
4494 if err != nil {
4495 logError(testName, function, args, startTime, "", "PutObject failed", err)
4496 return
4497 }
4498
4499 // read the data back
4500 r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{})
4501 if err != nil {
4502 logError(testName, function, args, startTime, "", "PutObject failed", err)
4503 return
4504 }
4505 offset := int64(2048)
4506
4507 // read directly
4508 buf1 := make([]byte, 512)
4509 buf2 := make([]byte, 512)
4510 buf3 := make([]byte, 512)
4511 buf4 := make([]byte, 512)
4512
4513 // Test readAt before stat is called such that objectInfo doesn't change.
4514 m, err := r.ReadAt(buf1, offset)
4515 if err != nil {
4516 logError(testName, function, args, startTime, "", "ReadAt failed", err)
4517 return
4518 }
4519 if m != len(buf1) {
4520 logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf1))+", got "+string(m), err)
4521 return
4522 }
4523 if !bytes.Equal(buf1, buf[offset:offset+512]) {
4524 logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err)
4525 return
4526 }
4527 offset += 512
4528
4529 st, err := r.Stat()
4530 if err != nil {
4531 logError(testName, function, args, startTime, "", "Stat failed", err)
4532 return
4533 }
4534
4535 if st.Size != int64(bufSize) {
4536 logError(testName, function, args, startTime, "", "Number of bytes in stat does not match, expected "+string(int64(bufSize))+", got "+string(st.Size), err)
4537 return
4538 }
4539
4540 m, err = r.ReadAt(buf2, offset)
4541 if err != nil {
4542 logError(testName, function, args, startTime, "", "ReadAt failed", err)
4543 return
4544 }
4545 if m != len(buf2) {
4546 logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf2))+", got "+string(m), err)
4547 return
4548 }
4549 if !bytes.Equal(buf2, buf[offset:offset+512]) {
4550 logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err)
4551 return
4552 }
4553
4554 offset += 512
4555 m, err = r.ReadAt(buf3, offset)
4556 if err != nil {
4557 logError(testName, function, args, startTime, "", "ReadAt failed", err)
4558 return
4559 }
4560 if m != len(buf3) {
4561 logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf3))+", got "+string(m), err)
4562 return
4563 }
4564 if !bytes.Equal(buf3, buf[offset:offset+512]) {
4565 logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err)
4566 return
4567 }
4568 offset += 512
4569 m, err = r.ReadAt(buf4, offset)
4570 if err != nil {
4571 logError(testName, function, args, startTime, "", "ReadAt failed", err)
4572 return
4573 }
4574 if m != len(buf4) {
4575 logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf4))+", got "+string(m), err)
4576 return
4577 }
4578 if !bytes.Equal(buf4, buf[offset:offset+512]) {
4579 logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err)
4580 return
4581 }
4582
4583 buf5 := make([]byte, len(buf))
4584 // Read the whole object.
4585 m, err = r.ReadAt(buf5, 0)
4586 if err != nil {
4587 if err != io.EOF {
4588 logError(testName, function, args, startTime, "", "ReadAt failed", err)
4589 return
4590 }
4591 }
4592 if m != len(buf5) {
4593 logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf5))+", got "+string(m), err)
4594 return
4595 }
4596 if !bytes.Equal(buf, buf5) {
4597 logError(testName, function, args, startTime, "", "Incorrect data read in GetObject, than what was previously uploaded", err)
4598 return
4599 }
4600
4601 buf6 := make([]byte, len(buf)+1)
4602 // Read the whole object and beyond.
4603 _, err = r.ReadAt(buf6, 0)
4604 if err != nil {
4605 if err != io.EOF {
4606 logError(testName, function, args, startTime, "", "ReadAt failed", err)
4607 return
4608 }
4609 }
4610
4611 successLogger(testName, function, args, startTime).Info()
4612}
4613
4614// Reproduces issue https://github.com/minio/minio-go/issues/1137
4615func testGetObjectReadAtWhenEOFWasReached() {
4616 // initialize logging params
4617 startTime := time.Now()
4618 testName := getFuncName()
4619 function := "GetObject(bucketName, objectName)"
4620 args := map[string]interface{}{}
4621
4622 // Seed random based on current time.
4623 rand.Seed(time.Now().Unix())
4624
4625 // Instantiate new minio client object.
4626 c, err := minio.New(os.Getenv(serverEndpoint),
4627 &minio.Options{
4628 Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
4629 Secure: mustParseBool(os.Getenv(enableHTTPS)),
4630 })
4631 if err != nil {
4632 logError(testName, function, args, startTime, "", "MinIO client object creation failed", err)
4633 return
4634 }
4635
4636 // Enable tracing, write to stderr.
4637 // c.TraceOn(os.Stderr)
4638
4639 // Set user agent.
4640 c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
4641
4642 // Generate a new random bucket name.
4643 bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
4644 args["bucketName"] = bucketName
4645
4646 // Make a new bucket.
4647 err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"})
4648 if err != nil {
4649 logError(testName, function, args, startTime, "", "MakeBucket failed", err)
4650 return
4651 }
4652
4653 defer cleanupBucket(bucketName, c)
4654
4655 // Generate 33K of data.
4656 bufSize := dataFileMap["datafile-33-kB"]
4657 reader := getDataReader("datafile-33-kB")
4658 defer reader.Close()
4659
4660 objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
4661 args["objectName"] = objectName
4662
4663 buf, err := io.ReadAll(reader)
4664 if err != nil {
4665 logError(testName, function, args, startTime, "", "ReadAll failed", err)
4666 return
4667 }
4668
4669 // Save the data
4670 _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ContentType: "binary/octet-stream"})
4671 if err != nil {
4672 logError(testName, function, args, startTime, "", "PutObject failed", err)
4673 return
4674 }
4675
4676 // read the data back
4677 r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{})
4678 if err != nil {
4679 logError(testName, function, args, startTime, "", "PutObject failed", err)
4680 return
4681 }
4682
4683 // read directly
4684 buf1 := make([]byte, len(buf))
4685 buf2 := make([]byte, 512)
4686
4687 m, err := r.Read(buf1)
4688 if err != nil {
4689 if err != io.EOF {
4690 logError(testName, function, args, startTime, "", "Read failed", err)
4691 return
4692 }
4693 }
4694 if m != len(buf1) {
4695 logError(testName, function, args, startTime, "", "Read read shorter bytes before reaching EOF, expected "+string(len(buf1))+", got "+string(m), err)
4696 return
4697 }
4698 if !bytes.Equal(buf1, buf) {
4699 logError(testName, function, args, startTime, "", "Incorrect count of Read data", err)
4700 return
4701 }
4702
4703 st, err := r.Stat()
4704 if err != nil {
4705 logError(testName, function, args, startTime, "", "Stat failed", err)
4706 return
4707 }
4708
4709 if st.Size != int64(bufSize) {
4710 logError(testName, function, args, startTime, "", "Number of bytes in stat does not match, expected "+string(int64(bufSize))+", got "+string(st.Size), err)
4711 return
4712 }
4713
4714 m, err = r.ReadAt(buf2, 512)
4715 if err != nil {
4716 logError(testName, function, args, startTime, "", "ReadAt failed", err)
4717 return
4718 }
4719 if m != len(buf2) {
4720 logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf2))+", got "+string(m), err)
4721 return
4722 }
4723 if !bytes.Equal(buf2, buf[512:1024]) {
4724 logError(testName, function, args, startTime, "", "Incorrect count of ReadAt data", err)
4725 return
4726 }
4727
4728 successLogger(testName, function, args, startTime).Info()
4729}
4730
4731// Test Presigned Post Policy
4732func testPresignedPostPolicy() {
4733 // initialize logging params
4734 startTime := time.Now()
4735 testName := getFuncName()
4736 function := "PresignedPostPolicy(policy)"
4737 args := map[string]interface{}{
4738 "policy": "",
4739 }
4740
4741 // Seed random based on current time.
4742 rand.Seed(time.Now().Unix())
4743
4744 // Instantiate new minio client object
4745 c, err := minio.New(os.Getenv(serverEndpoint),
4746 &minio.Options{
4747 Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
4748 Secure: mustParseBool(os.Getenv(enableHTTPS)),
4749 })
4750 if err != nil {
4751 logError(testName, function, args, startTime, "", "MinIO client object creation failed", err)
4752 return
4753 }
4754
4755 // Enable tracing, write to stderr.
4756 // c.TraceOn(os.Stderr)
4757
4758 // Set user agent.
4759 c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
4760
4761 // Generate a new random bucket name.
4762 bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
4763
4764 // Make a new bucket in 'us-east-1' (source bucket).
4765 err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"})
4766 if err != nil {
4767 logError(testName, function, args, startTime, "", "MakeBucket failed", err)
4768 return
4769 }
4770
4771 defer cleanupBucket(bucketName, c)
4772
4773 // Generate 33K of data.
4774 reader := getDataReader("datafile-33-kB")
4775 defer reader.Close()
4776
4777 objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
4778 // Azure requires the key to not start with a number
4779 metadataKey := randString(60, rand.NewSource(time.Now().UnixNano()), "user")
4780 metadataValue := randString(60, rand.NewSource(time.Now().UnixNano()), "")
4781
4782 buf, err := io.ReadAll(reader)
4783 if err != nil {
4784 logError(testName, function, args, startTime, "", "ReadAll failed", err)
4785 return
4786 }
4787
4788 // Save the data
4789 _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ContentType: "binary/octet-stream"})
4790 if err != nil {
4791 logError(testName, function, args, startTime, "", "PutObject failed", err)
4792 return
4793 }
4794
4795 policy := minio.NewPostPolicy()
4796
4797 if err := policy.SetBucket(""); err == nil {
4798 logError(testName, function, args, startTime, "", "SetBucket did not fail for invalid conditions", err)
4799 return
4800 }
4801 if err := policy.SetKey(""); err == nil {
4802 logError(testName, function, args, startTime, "", "SetKey did not fail for invalid conditions", err)
4803 return
4804 }
4805 if err := policy.SetExpires(time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC)); err == nil {
4806 logError(testName, function, args, startTime, "", "SetExpires did not fail for invalid conditions", err)
4807 return
4808 }
4809 if err := policy.SetContentType(""); err == nil {
4810 logError(testName, function, args, startTime, "", "SetContentType did not fail for invalid conditions", err)
4811 return
4812 }
4813 if err := policy.SetContentLengthRange(1024*1024, 1024); err == nil {
4814 logError(testName, function, args, startTime, "", "SetContentLengthRange did not fail for invalid conditions", err)
4815 return
4816 }
4817 if err := policy.SetUserMetadata("", ""); err == nil {
4818 logError(testName, function, args, startTime, "", "SetUserMetadata did not fail for invalid conditions", err)
4819 return
4820 }
4821
4822 policy.SetBucket(bucketName)
4823 policy.SetKey(objectName)
4824 policy.SetExpires(time.Now().UTC().AddDate(0, 0, 10)) // expires in 10 days
4825 policy.SetContentType("binary/octet-stream")
4826 policy.SetContentLengthRange(10, 1024*1024)
4827 policy.SetUserMetadata(metadataKey, metadataValue)
4828
4829 // Add CRC32C
4830 checksum := minio.ChecksumCRC32C.ChecksumBytes(buf)
4831 policy.SetChecksum(checksum)
4832
4833 args["policy"] = policy.String()
4834
4835 presignedPostPolicyURL, formData, err := c.PresignedPostPolicy(context.Background(), policy)
4836 if err != nil {
4837 logError(testName, function, args, startTime, "", "PresignedPostPolicy failed", err)
4838 return
4839 }
4840
4841 var formBuf bytes.Buffer
4842 writer := multipart.NewWriter(&formBuf)
4843 for k, v := range formData {
4844 writer.WriteField(k, v)
4845 }
4846
4847 // Get a 33KB file to upload and test if set post policy works
4848 filePath := getMintDataDirFilePath("datafile-33-kB")
4849 if filePath == "" {
4850 // Make a temp file with 33 KB data.
4851 file, err := os.CreateTemp(os.TempDir(), "PresignedPostPolicyTest")
4852 if err != nil {
4853 logError(testName, function, args, startTime, "", "TempFile creation failed", err)
4854 return
4855 }
4856 if _, err = io.Copy(file, getDataReader("datafile-33-kB")); err != nil {
4857 logError(testName, function, args, startTime, "", "Copy failed", err)
4858 return
4859 }
4860 if err = file.Close(); err != nil {
4861 logError(testName, function, args, startTime, "", "File Close failed", err)
4862 return
4863 }
4864 filePath = file.Name()
4865 }
4866
4867 // add file to post request
4868 f, err := os.Open(filePath)
4869 defer f.Close()
4870 if err != nil {
4871 logError(testName, function, args, startTime, "", "File open failed", err)
4872 return
4873 }
4874 w, err := writer.CreateFormFile("file", filePath)
4875 if err != nil {
4876 logError(testName, function, args, startTime, "", "CreateFormFile failed", err)
4877 return
4878 }
4879
4880 _, err = io.Copy(w, f)
4881 if err != nil {
4882 logError(testName, function, args, startTime, "", "Copy failed", err)
4883 return
4884 }
4885 writer.Close()
4886
4887 transport, err := minio.DefaultTransport(mustParseBool(os.Getenv(enableHTTPS)))
4888 if err != nil {
4889 logError(testName, function, args, startTime, "", "DefaultTransport failed", err)
4890 return
4891 }
4892
4893 httpClient := &http.Client{
4894 // Setting a sensible time out of 30secs to wait for response
4895 // headers. Request is pro-actively canceled after 30secs
4896 // with no response.
4897 Timeout: 30 * time.Second,
4898 Transport: transport,
4899 }
4900 args["url"] = presignedPostPolicyURL.String()
4901
4902 req, err := http.NewRequest(http.MethodPost, presignedPostPolicyURL.String(), bytes.NewReader(formBuf.Bytes()))
4903 if err != nil {
4904 logError(testName, function, args, startTime, "", "Http request failed", err)
4905 return
4906 }
4907
4908 req.Header.Set("Content-Type", writer.FormDataContentType())
4909
4910 // make post request with correct form data
4911 res, err := httpClient.Do(req)
4912 if err != nil {
4913 logError(testName, function, args, startTime, "", "Http request failed", err)
4914 return
4915 }
4916 defer res.Body.Close()
4917 if res.StatusCode != http.StatusNoContent {
4918 logError(testName, function, args, startTime, "", "Http request failed", errors.New(res.Status))
4919 return
4920 }
4921
4922 // expected path should be absolute path of the object
4923 var scheme string
4924 if mustParseBool(os.Getenv(enableHTTPS)) {
4925 scheme = "https://"
4926 } else {
4927 scheme = "http://"
4928 }
4929
4930 expectedLocation := scheme + os.Getenv(serverEndpoint) + "/" + bucketName + "/" + objectName
4931 expectedLocationBucketDNS := scheme + bucketName + "." + os.Getenv(serverEndpoint) + "/" + objectName
4932
4933 if !strings.Contains(expectedLocation, "s3.amazonaws.com/") {
4934 // Test when not against AWS S3.
4935 if val, ok := res.Header["Location"]; ok {
4936 if val[0] != expectedLocation && val[0] != expectedLocationBucketDNS {
4937 logError(testName, function, args, startTime, "", fmt.Sprintf("Location in header response is incorrect. Want %q or %q, got %q", expectedLocation, expectedLocationBucketDNS, val[0]), err)
4938 return
4939 }
4940 } else {
4941 logError(testName, function, args, startTime, "", "Location not found in header response", err)
4942 return
4943 }
4944 }
4945 want := checksum.Encoded()
4946 if got := res.Header.Get("X-Amz-Checksum-Crc32c"); got != want {
4947 logError(testName, function, args, startTime, "", fmt.Sprintf("Want checksum %q, got %q", want, got), nil)
4948 return
4949 }
4950
4951 successLogger(testName, function, args, startTime).Info()
4952}
4953
4954// Tests copy object
4955func testCopyObject() {
4956 // initialize logging params
4957 startTime := time.Now()
4958 testName := getFuncName()
4959 function := "CopyObject(dst, src)"
4960 args := map[string]interface{}{}
4961
4962 // Seed random based on current time.
4963 rand.Seed(time.Now().Unix())
4964
4965 // Instantiate new minio client object
4966 c, err := minio.New(os.Getenv(serverEndpoint),
4967 &minio.Options{
4968 Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
4969 Secure: mustParseBool(os.Getenv(enableHTTPS)),
4970 })
4971 if err != nil {
4972 logError(testName, function, args, startTime, "", "MinIO client object creation failed", err)
4973 return
4974 }
4975
4976 // Enable tracing, write to stderr.
4977 // c.TraceOn(os.Stderr)
4978
4979 // Set user agent.
4980 c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
4981
4982 // Generate a new random bucket name.
4983 bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
4984
4985 // Make a new bucket in 'us-east-1' (source bucket).
4986 err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"})
4987 if err != nil {
4988 logError(testName, function, args, startTime, "", "MakeBucket failed", err)
4989 return
4990 }
4991
4992 defer cleanupBucket(bucketName, c)
4993
4994 // Make a new bucket in 'us-east-1' (destination bucket).
4995 err = c.MakeBucket(context.Background(), bucketName+"-copy", minio.MakeBucketOptions{Region: "us-east-1"})
4996 if err != nil {
4997 logError(testName, function, args, startTime, "", "MakeBucket failed", err)
4998 return
4999 }
5000 defer cleanupBucket(bucketName+"-copy", c)
5001
5002 // Generate 33K of data.
5003 bufSize := dataFileMap["datafile-33-kB"]
5004 reader := getDataReader("datafile-33-kB")
5005
5006 // Save the data
5007 objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
5008 _, err = c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"})
5009 if err != nil {
5010 logError(testName, function, args, startTime, "", "PutObject failed", err)
5011 return
5012 }
5013
5014 r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{})
5015 if err != nil {
5016 logError(testName, function, args, startTime, "", "GetObject failed", err)
5017 return
5018 }
5019 // Check the various fields of source object against destination object.
5020 objInfo, err := r.Stat()
5021 if err != nil {
5022 logError(testName, function, args, startTime, "", "Stat failed", err)
5023 return
5024 }
5025
5026 // Copy Source
5027 src := minio.CopySrcOptions{
5028 Bucket: bucketName,
5029 Object: objectName,
5030 // Set copy conditions.
5031 MatchETag: objInfo.ETag,
5032 MatchModifiedSince: time.Date(2014, time.April, 0, 0, 0, 0, 0, time.UTC),
5033 }
5034 args["src"] = src
5035
5036 dst := minio.CopyDestOptions{
5037 Bucket: bucketName + "-copy",
5038 Object: objectName + "-copy",
5039 }
5040
5041 // Perform the Copy
5042 if _, err = c.CopyObject(context.Background(), dst, src); err != nil {
5043 logError(testName, function, args, startTime, "", "CopyObject failed", err)
5044 return
5045 }
5046
5047 // Source object
5048 r, err = c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{})
5049 if err != nil {
5050 logError(testName, function, args, startTime, "", "GetObject failed", err)
5051 return
5052 }
5053
5054 // Destination object
5055 readerCopy, err := c.GetObject(context.Background(), bucketName+"-copy", objectName+"-copy", minio.GetObjectOptions{})
5056 if err != nil {
5057 logError(testName, function, args, startTime, "", "GetObject failed", err)
5058 return
5059 }
5060
5061 // Check the various fields of source object against destination object.
5062 objInfo, err = r.Stat()
5063 if err != nil {
5064 logError(testName, function, args, startTime, "", "Stat failed", err)
5065 return
5066 }
5067 objInfoCopy, err := readerCopy.Stat()
5068 if err != nil {
5069 logError(testName, function, args, startTime, "", "Stat failed", err)
5070 return
5071 }
5072 if objInfo.Size != objInfoCopy.Size {
5073 logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(objInfoCopy.Size)+", got "+string(objInfo.Size), err)
5074 return
5075 }
5076
5077 if err := crcMatchesName(r, "datafile-33-kB"); err != nil {
5078 logError(testName, function, args, startTime, "", "data CRC check failed", err)
5079 return
5080 }
5081 if err := crcMatchesName(readerCopy, "datafile-33-kB"); err != nil {
5082 logError(testName, function, args, startTime, "", "copy data CRC check failed", err)
5083 return
5084 }
5085 // Close all the get readers before proceeding with CopyObject operations.
5086 r.Close()
5087 readerCopy.Close()
5088
5089 // CopyObject again but with wrong conditions
5090 src = minio.CopySrcOptions{
5091 Bucket: bucketName,
5092 Object: objectName,
5093 MatchUnmodifiedSince: time.Date(2014, time.April, 0, 0, 0, 0, 0, time.UTC),
5094 NoMatchETag: objInfo.ETag,
5095 }
5096
5097 // Perform the Copy which should fail
5098 _, err = c.CopyObject(context.Background(), dst, src)
5099 if err == nil {
5100 logError(testName, function, args, startTime, "", "CopyObject did not fail for invalid conditions", err)
5101 return
5102 }
5103
5104 src = minio.CopySrcOptions{
5105 Bucket: bucketName,
5106 Object: objectName,
5107 }
5108
5109 dst = minio.CopyDestOptions{
5110 Bucket: bucketName,
5111 Object: objectName,
5112 ReplaceMetadata: true,
5113 UserMetadata: map[string]string{
5114 "Copy": "should be same",
5115 },
5116 }
5117 args["dst"] = dst
5118 args["src"] = src
5119
5120 _, err = c.CopyObject(context.Background(), dst, src)
5121 if err != nil {
5122 logError(testName, function, args, startTime, "", "CopyObject shouldn't fail", err)
5123 return
5124 }
5125
5126 oi, err := c.StatObject(context.Background(), bucketName, objectName, minio.StatObjectOptions{})
5127 if err != nil {
5128 logError(testName, function, args, startTime, "", "StatObject failed", err)
5129 return
5130 }
5131
5132 stOpts := minio.StatObjectOptions{}
5133 stOpts.SetMatchETag(oi.ETag)
5134 objInfo, err = c.StatObject(context.Background(), bucketName, objectName, stOpts)
5135 if err != nil {
5136 logError(testName, function, args, startTime, "", "CopyObject ETag should match and not fail", err)
5137 return
5138 }
5139
5140 if objInfo.Metadata.Get("x-amz-meta-copy") != "should be same" {
5141 logError(testName, function, args, startTime, "", "CopyObject modified metadata should match", err)
5142 return
5143 }
5144
5145 successLogger(testName, function, args, startTime).Info()
5146}
5147
5148// Tests SSE-C get object ReaderSeeker interface methods.
5149func testSSECEncryptedGetObjectReadSeekFunctional() {
5150 // initialize logging params
5151 startTime := time.Now()
5152 testName := getFuncName()
5153 function := "GetObject(bucketName, objectName)"
5154 args := map[string]interface{}{}
5155
5156 // Seed random based on current time.
5157 rand.Seed(time.Now().Unix())
5158
5159 // Instantiate new minio client object.
5160 c, err := minio.New(os.Getenv(serverEndpoint),
5161 &minio.Options{
5162 Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
5163 Secure: mustParseBool(os.Getenv(enableHTTPS)),
5164 })
5165 if err != nil {
5166 logError(testName, function, args, startTime, "", "MinIO client object creation failed", err)
5167 return
5168 }
5169
5170 // Enable tracing, write to stderr.
5171 // c.TraceOn(os.Stderr)
5172
5173 // Set user agent.
5174 c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
5175
5176 // Generate a new random bucket name.
5177 bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
5178 args["bucketName"] = bucketName
5179
5180 // Make a new bucket.
5181 err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"})
5182 if err != nil {
5183 logError(testName, function, args, startTime, "", "MakeBucket failed", err)
5184 return
5185 }
5186
5187 defer func() {
5188 // Delete all objects and buckets
5189 if err = cleanupBucket(bucketName, c); err != nil {
5190 logError(testName, function, args, startTime, "", "CleanupBucket failed", err)
5191 return
5192 }
5193 }()
5194
5195 // Generate 129MiB of data.
5196 bufSize := dataFileMap["datafile-129-MB"]
5197 reader := getDataReader("datafile-129-MB")
5198 defer reader.Close()
5199
5200 objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
5201 args["objectName"] = objectName
5202
5203 buf, err := io.ReadAll(reader)
5204 if err != nil {
5205 logError(testName, function, args, startTime, "", "ReadAll failed", err)
5206 return
5207 }
5208
5209 // Save the data
5210 _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{
5211 ContentType: "binary/octet-stream",
5212 ServerSideEncryption: encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+objectName)),
5213 })
5214 if err != nil {
5215 logError(testName, function, args, startTime, "", "PutObject failed", err)
5216 return
5217 }
5218
5219 // Read the data back
5220 r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{
5221 ServerSideEncryption: encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+objectName)),
5222 })
5223 if err != nil {
5224 logError(testName, function, args, startTime, "", "GetObject failed", err)
5225 return
5226 }
5227 defer r.Close()
5228
5229 st, err := r.Stat()
5230 if err != nil {
5231 logError(testName, function, args, startTime, "", "Stat object failed", err)
5232 return
5233 }
5234
5235 if st.Size != int64(bufSize) {
5236 logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(bufSize))+", got "+string(st.Size), err)
5237 return
5238 }
5239
5240 // This following function helps us to compare data from the reader after seek
5241 // with the data from the original buffer
5242 cmpData := func(r io.Reader, start, end int) {
5243 if end-start == 0 {
5244 return
5245 }
5246 buffer := bytes.NewBuffer([]byte{})
5247 if _, err := io.CopyN(buffer, r, int64(bufSize)); err != nil {
5248 if err != io.EOF {
5249 logError(testName, function, args, startTime, "", "CopyN failed", err)
5250 return
5251 }
5252 }
5253 if !bytes.Equal(buf[start:end], buffer.Bytes()) {
5254 logError(testName, function, args, startTime, "", "Incorrect read bytes v/s original buffer", err)
5255 return
5256 }
5257 }
5258
5259 testCases := []struct {
5260 offset int64
5261 whence int
5262 pos int64
5263 err error
5264 shouldCmp bool
5265 start int
5266 end int
5267 }{
5268 // Start from offset 0, fetch data and compare
5269 {0, 0, 0, nil, true, 0, 0},
5270 // Start from offset 2048, fetch data and compare
5271 {2048, 0, 2048, nil, true, 2048, bufSize},
5272 // Start from offset larger than possible
5273 {int64(bufSize) + 1024, 0, 0, io.EOF, false, 0, 0},
5274 // Move to offset 0 without comparing
5275 {0, 0, 0, nil, false, 0, 0},
5276 // Move one step forward and compare
5277 {1, 1, 1, nil, true, 1, bufSize},
5278 // Move larger than possible
5279 {int64(bufSize), 1, 0, io.EOF, false, 0, 0},
5280 // Provide negative offset with CUR_SEEK
5281 {int64(-1), 1, 0, fmt.Errorf("Negative position not allowed for 1"), false, 0, 0},
5282 // Test with whence SEEK_END and with positive offset
5283 {1024, 2, 0, io.EOF, false, 0, 0},
5284 // Test with whence SEEK_END and with negative offset
5285 {-1024, 2, int64(bufSize) - 1024, nil, true, bufSize - 1024, bufSize},
5286 // Test with whence SEEK_END and with large negative offset
5287 {-int64(bufSize) * 2, 2, 0, fmt.Errorf("Seeking at negative offset not allowed for 2"), false, 0, 0},
5288 // Test with invalid whence
5289 {0, 3, 0, fmt.Errorf("Invalid whence 3"), false, 0, 0},
5290 }
5291
5292 for i, testCase := range testCases {
5293 // Perform seek operation
5294 n, err := r.Seek(testCase.offset, testCase.whence)
5295 if err != nil && testCase.err == nil {
5296 // We expected success.
5297 logError(testName, function, args, startTime, "",
5298 fmt.Sprintf("Test %d, unexpected err value: expected: %s, found: %s", i+1, testCase.err, err), err)
5299 return
5300 }
5301 if err == nil && testCase.err != nil {
5302 // We expected failure, but got success.
5303 logError(testName, function, args, startTime, "",
5304 fmt.Sprintf("Test %d, unexpected err value: expected: %s, found: %s", i+1, testCase.err, err), err)
5305 return
5306 }
5307 if err != nil && testCase.err != nil {
5308 if err.Error() != testCase.err.Error() {
5309 // We expect a specific error
5310 logError(testName, function, args, startTime, "",
5311 fmt.Sprintf("Test %d, unexpected err value: expected: %s, found: %s", i+1, testCase.err, err), err)
5312 return
5313 }
5314 }
5315 // Check the returned seek pos
5316 if n != testCase.pos {
5317 logError(testName, function, args, startTime, "",
5318 fmt.Sprintf("Test %d, number of bytes seeked does not match, expected %d, got %d", i+1, testCase.pos, n), err)
5319 return
5320 }
5321 // Compare only if shouldCmp is activated
5322 if testCase.shouldCmp {
5323 cmpData(r, testCase.start, testCase.end)
5324 }
5325 }
5326
5327 successLogger(testName, function, args, startTime).Info()
5328}
5329
5330// Tests SSE-S3 get object ReaderSeeker interface methods.
5331func testSSES3EncryptedGetObjectReadSeekFunctional() {
5332 // initialize logging params
5333 startTime := time.Now()
5334 testName := getFuncName()
5335 function := "GetObject(bucketName, objectName)"
5336 args := map[string]interface{}{}
5337
5338 // Seed random based on current time.
5339 rand.Seed(time.Now().Unix())
5340
5341 // Instantiate new minio client object.
5342 c, err := minio.New(os.Getenv(serverEndpoint),
5343 &minio.Options{
5344 Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
5345 Secure: mustParseBool(os.Getenv(enableHTTPS)),
5346 })
5347 if err != nil {
5348 logError(testName, function, args, startTime, "", "MinIO client object creation failed", err)
5349 return
5350 }
5351
5352 // Enable tracing, write to stderr.
5353 // c.TraceOn(os.Stderr)
5354
5355 // Set user agent.
5356 c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
5357
5358 // Generate a new random bucket name.
5359 bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
5360 args["bucketName"] = bucketName
5361
5362 // Make a new bucket.
5363 err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"})
5364 if err != nil {
5365 logError(testName, function, args, startTime, "", "MakeBucket failed", err)
5366 return
5367 }
5368
5369 defer func() {
5370 // Delete all objects and buckets
5371 if err = cleanupBucket(bucketName, c); err != nil {
5372 logError(testName, function, args, startTime, "", "CleanupBucket failed", err)
5373 return
5374 }
5375 }()
5376
5377 // Generate 129MiB of data.
5378 bufSize := dataFileMap["datafile-129-MB"]
5379 reader := getDataReader("datafile-129-MB")
5380 defer reader.Close()
5381
5382 objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
5383 args["objectName"] = objectName
5384
5385 buf, err := io.ReadAll(reader)
5386 if err != nil {
5387 logError(testName, function, args, startTime, "", "ReadAll failed", err)
5388 return
5389 }
5390
5391 // Save the data
5392 _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{
5393 ContentType: "binary/octet-stream",
5394 ServerSideEncryption: encrypt.NewSSE(),
5395 })
5396 if err != nil {
5397 logError(testName, function, args, startTime, "", "PutObject failed", err)
5398 return
5399 }
5400
5401 // Read the data back
5402 r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{})
5403 if err != nil {
5404 logError(testName, function, args, startTime, "", "GetObject failed", err)
5405 return
5406 }
5407 defer r.Close()
5408
5409 st, err := r.Stat()
5410 if err != nil {
5411 logError(testName, function, args, startTime, "", "Stat object failed", err)
5412 return
5413 }
5414
5415 if st.Size != int64(bufSize) {
5416 logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(bufSize))+", got "+string(st.Size), err)
5417 return
5418 }
5419
5420 // This following function helps us to compare data from the reader after seek
5421 // with the data from the original buffer
5422 cmpData := func(r io.Reader, start, end int) {
5423 if end-start == 0 {
5424 return
5425 }
5426 buffer := bytes.NewBuffer([]byte{})
5427 if _, err := io.CopyN(buffer, r, int64(bufSize)); err != nil {
5428 if err != io.EOF {
5429 logError(testName, function, args, startTime, "", "CopyN failed", err)
5430 return
5431 }
5432 }
5433 if !bytes.Equal(buf[start:end], buffer.Bytes()) {
5434 logError(testName, function, args, startTime, "", "Incorrect read bytes v/s original buffer", err)
5435 return
5436 }
5437 }
5438
5439 testCases := []struct {
5440 offset int64
5441 whence int
5442 pos int64
5443 err error
5444 shouldCmp bool
5445 start int
5446 end int
5447 }{
5448 // Start from offset 0, fetch data and compare
5449 {0, 0, 0, nil, true, 0, 0},
5450 // Start from offset 2048, fetch data and compare
5451 {2048, 0, 2048, nil, true, 2048, bufSize},
5452 // Start from offset larger than possible
5453 {int64(bufSize) + 1024, 0, 0, io.EOF, false, 0, 0},
5454 // Move to offset 0 without comparing
5455 {0, 0, 0, nil, false, 0, 0},
5456 // Move one step forward and compare
5457 {1, 1, 1, nil, true, 1, bufSize},
5458 // Move larger than possible
5459 {int64(bufSize), 1, 0, io.EOF, false, 0, 0},
5460 // Provide negative offset with CUR_SEEK
5461 {int64(-1), 1, 0, fmt.Errorf("Negative position not allowed for 1"), false, 0, 0},
5462 // Test with whence SEEK_END and with positive offset
5463 {1024, 2, 0, io.EOF, false, 0, 0},
5464 // Test with whence SEEK_END and with negative offset
5465 {-1024, 2, int64(bufSize) - 1024, nil, true, bufSize - 1024, bufSize},
5466 // Test with whence SEEK_END and with large negative offset
5467 {-int64(bufSize) * 2, 2, 0, fmt.Errorf("Seeking at negative offset not allowed for 2"), false, 0, 0},
5468 // Test with invalid whence
5469 {0, 3, 0, fmt.Errorf("Invalid whence 3"), false, 0, 0},
5470 }
5471
5472 for i, testCase := range testCases {
5473 // Perform seek operation
5474 n, err := r.Seek(testCase.offset, testCase.whence)
5475 if err != nil && testCase.err == nil {
5476 // We expected success.
5477 logError(testName, function, args, startTime, "",
5478 fmt.Sprintf("Test %d, unexpected err value: expected: %s, found: %s", i+1, testCase.err, err), err)
5479 return
5480 }
5481 if err == nil && testCase.err != nil {
5482 // We expected failure, but got success.
5483 logError(testName, function, args, startTime, "",
5484 fmt.Sprintf("Test %d, unexpected err value: expected: %s, found: %s", i+1, testCase.err, err), err)
5485 return
5486 }
5487 if err != nil && testCase.err != nil {
5488 if err.Error() != testCase.err.Error() {
5489 // We expect a specific error
5490 logError(testName, function, args, startTime, "",
5491 fmt.Sprintf("Test %d, unexpected err value: expected: %s, found: %s", i+1, testCase.err, err), err)
5492 return
5493 }
5494 }
5495 // Check the returned seek pos
5496 if n != testCase.pos {
5497 logError(testName, function, args, startTime, "",
5498 fmt.Sprintf("Test %d, number of bytes seeked does not match, expected %d, got %d", i+1, testCase.pos, n), err)
5499 return
5500 }
5501 // Compare only if shouldCmp is activated
5502 if testCase.shouldCmp {
5503 cmpData(r, testCase.start, testCase.end)
5504 }
5505 }
5506
5507 successLogger(testName, function, args, startTime).Info()
5508}
5509
5510// Tests SSE-C get object ReaderAt interface methods.
5511func testSSECEncryptedGetObjectReadAtFunctional() {
5512 // initialize logging params
5513 startTime := time.Now()
5514 testName := getFuncName()
5515 function := "GetObject(bucketName, objectName)"
5516 args := map[string]interface{}{}
5517
5518 // Seed random based on current time.
5519 rand.Seed(time.Now().Unix())
5520
5521 // Instantiate new minio client object.
5522 c, err := minio.New(os.Getenv(serverEndpoint),
5523 &minio.Options{
5524 Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
5525 Secure: mustParseBool(os.Getenv(enableHTTPS)),
5526 })
5527 if err != nil {
5528 logError(testName, function, args, startTime, "", "MinIO client object creation failed", err)
5529 return
5530 }
5531
5532 // Enable tracing, write to stderr.
5533 // c.TraceOn(os.Stderr)
5534
5535 // Set user agent.
5536 c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
5537
5538 // Generate a new random bucket name.
5539 bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
5540 args["bucketName"] = bucketName
5541
5542 // Make a new bucket.
5543 err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"})
5544 if err != nil {
5545 logError(testName, function, args, startTime, "", "MakeBucket failed", err)
5546 return
5547 }
5548
5549 defer cleanupBucket(bucketName, c)
5550
5551 // Generate 129MiB of data.
5552 bufSize := dataFileMap["datafile-129-MB"]
5553 reader := getDataReader("datafile-129-MB")
5554 defer reader.Close()
5555
5556 objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
5557 args["objectName"] = objectName
5558
5559 buf, err := io.ReadAll(reader)
5560 if err != nil {
5561 logError(testName, function, args, startTime, "", "ReadAll failed", err)
5562 return
5563 }
5564
5565 // Save the data
5566 _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{
5567 ContentType: "binary/octet-stream",
5568 ServerSideEncryption: encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+objectName)),
5569 })
5570 if err != nil {
5571 logError(testName, function, args, startTime, "", "PutObject failed", err)
5572 return
5573 }
5574
5575 // read the data back
5576 r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{
5577 ServerSideEncryption: encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+objectName)),
5578 })
5579 if err != nil {
5580 logError(testName, function, args, startTime, "", "PutObject failed", err)
5581 return
5582 }
5583 defer r.Close()
5584
5585 offset := int64(2048)
5586
5587 // read directly
5588 buf1 := make([]byte, 512)
5589 buf2 := make([]byte, 512)
5590 buf3 := make([]byte, 512)
5591 buf4 := make([]byte, 512)
5592
5593 // Test readAt before stat is called such that objectInfo doesn't change.
5594 m, err := r.ReadAt(buf1, offset)
5595 if err != nil {
5596 logError(testName, function, args, startTime, "", "ReadAt failed", err)
5597 return
5598 }
5599 if m != len(buf1) {
5600 logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf1))+", got "+string(m), err)
5601 return
5602 }
5603 if !bytes.Equal(buf1, buf[offset:offset+512]) {
5604 logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err)
5605 return
5606 }
5607 offset += 512
5608
5609 st, err := r.Stat()
5610 if err != nil {
5611 logError(testName, function, args, startTime, "", "Stat failed", err)
5612 return
5613 }
5614
5615 if st.Size != int64(bufSize) {
5616 logError(testName, function, args, startTime, "", "Number of bytes in stat does not match, expected "+string(int64(bufSize))+", got "+string(st.Size), err)
5617 return
5618 }
5619
5620 m, err = r.ReadAt(buf2, offset)
5621 if err != nil {
5622 logError(testName, function, args, startTime, "", "ReadAt failed", err)
5623 return
5624 }
5625 if m != len(buf2) {
5626 logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf2))+", got "+string(m), err)
5627 return
5628 }
5629 if !bytes.Equal(buf2, buf[offset:offset+512]) {
5630 logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err)
5631 return
5632 }
5633 offset += 512
5634 m, err = r.ReadAt(buf3, offset)
5635 if err != nil {
5636 logError(testName, function, args, startTime, "", "ReadAt failed", err)
5637 return
5638 }
5639 if m != len(buf3) {
5640 logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf3))+", got "+string(m), err)
5641 return
5642 }
5643 if !bytes.Equal(buf3, buf[offset:offset+512]) {
5644 logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err)
5645 return
5646 }
5647 offset += 512
5648 m, err = r.ReadAt(buf4, offset)
5649 if err != nil {
5650 logError(testName, function, args, startTime, "", "ReadAt failed", err)
5651 return
5652 }
5653 if m != len(buf4) {
5654 logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf4))+", got "+string(m), err)
5655 return
5656 }
5657 if !bytes.Equal(buf4, buf[offset:offset+512]) {
5658 logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err)
5659 return
5660 }
5661
5662 buf5 := make([]byte, len(buf))
5663 // Read the whole object.
5664 m, err = r.ReadAt(buf5, 0)
5665 if err != nil {
5666 if err != io.EOF {
5667 logError(testName, function, args, startTime, "", "ReadAt failed", err)
5668 return
5669 }
5670 }
5671 if m != len(buf5) {
5672 logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf5))+", got "+string(m), err)
5673 return
5674 }
5675 if !bytes.Equal(buf, buf5) {
5676 logError(testName, function, args, startTime, "", "Incorrect data read in GetObject, than what was previously uploaded", err)
5677 return
5678 }
5679
5680 buf6 := make([]byte, len(buf)+1)
5681 // Read the whole object and beyond.
5682 _, err = r.ReadAt(buf6, 0)
5683 if err != nil {
5684 if err != io.EOF {
5685 logError(testName, function, args, startTime, "", "ReadAt failed", err)
5686 return
5687 }
5688 }
5689
5690 successLogger(testName, function, args, startTime).Info()
5691}
5692
5693// Tests SSE-S3 get object ReaderAt interface methods.
5694func testSSES3EncryptedGetObjectReadAtFunctional() {
5695 // initialize logging params
5696 startTime := time.Now()
5697 testName := getFuncName()
5698 function := "GetObject(bucketName, objectName)"
5699 args := map[string]interface{}{}
5700
5701 // Seed random based on current time.
5702 rand.Seed(time.Now().Unix())
5703
5704 // Instantiate new minio client object.
5705 c, err := minio.New(os.Getenv(serverEndpoint),
5706 &minio.Options{
5707 Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
5708 Secure: mustParseBool(os.Getenv(enableHTTPS)),
5709 })
5710 if err != nil {
5711 logError(testName, function, args, startTime, "", "MinIO client object creation failed", err)
5712 return
5713 }
5714
5715 // Enable tracing, write to stderr.
5716 // c.TraceOn(os.Stderr)
5717
5718 // Set user agent.
5719 c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
5720
5721 // Generate a new random bucket name.
5722 bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
5723 args["bucketName"] = bucketName
5724
5725 // Make a new bucket.
5726 err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"})
5727 if err != nil {
5728 logError(testName, function, args, startTime, "", "MakeBucket failed", err)
5729 return
5730 }
5731
5732 defer cleanupBucket(bucketName, c)
5733
5734 // Generate 129MiB of data.
5735 bufSize := dataFileMap["datafile-129-MB"]
5736 reader := getDataReader("datafile-129-MB")
5737 defer reader.Close()
5738
5739 objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
5740 args["objectName"] = objectName
5741
5742 buf, err := io.ReadAll(reader)
5743 if err != nil {
5744 logError(testName, function, args, startTime, "", "ReadAll failed", err)
5745 return
5746 }
5747
5748 // Save the data
5749 _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{
5750 ContentType: "binary/octet-stream",
5751 ServerSideEncryption: encrypt.NewSSE(),
5752 })
5753 if err != nil {
5754 logError(testName, function, args, startTime, "", "PutObject failed", err)
5755 return
5756 }
5757
5758 // read the data back
5759 r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{})
5760 if err != nil {
5761 logError(testName, function, args, startTime, "", "PutObject failed", err)
5762 return
5763 }
5764 defer r.Close()
5765
5766 offset := int64(2048)
5767
5768 // read directly
5769 buf1 := make([]byte, 512)
5770 buf2 := make([]byte, 512)
5771 buf3 := make([]byte, 512)
5772 buf4 := make([]byte, 512)
5773
5774 // Test readAt before stat is called such that objectInfo doesn't change.
5775 m, err := r.ReadAt(buf1, offset)
5776 if err != nil {
5777 logError(testName, function, args, startTime, "", "ReadAt failed", err)
5778 return
5779 }
5780 if m != len(buf1) {
5781 logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf1))+", got "+string(m), err)
5782 return
5783 }
5784 if !bytes.Equal(buf1, buf[offset:offset+512]) {
5785 logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err)
5786 return
5787 }
5788 offset += 512
5789
5790 st, err := r.Stat()
5791 if err != nil {
5792 logError(testName, function, args, startTime, "", "Stat failed", err)
5793 return
5794 }
5795
5796 if st.Size != int64(bufSize) {
5797 logError(testName, function, args, startTime, "", "Number of bytes in stat does not match, expected "+string(int64(bufSize))+", got "+string(st.Size), err)
5798 return
5799 }
5800
5801 m, err = r.ReadAt(buf2, offset)
5802 if err != nil {
5803 logError(testName, function, args, startTime, "", "ReadAt failed", err)
5804 return
5805 }
5806 if m != len(buf2) {
5807 logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf2))+", got "+string(m), err)
5808 return
5809 }
5810 if !bytes.Equal(buf2, buf[offset:offset+512]) {
5811 logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err)
5812 return
5813 }
5814 offset += 512
5815 m, err = r.ReadAt(buf3, offset)
5816 if err != nil {
5817 logError(testName, function, args, startTime, "", "ReadAt failed", err)
5818 return
5819 }
5820 if m != len(buf3) {
5821 logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf3))+", got "+string(m), err)
5822 return
5823 }
5824 if !bytes.Equal(buf3, buf[offset:offset+512]) {
5825 logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err)
5826 return
5827 }
5828 offset += 512
5829 m, err = r.ReadAt(buf4, offset)
5830 if err != nil {
5831 logError(testName, function, args, startTime, "", "ReadAt failed", err)
5832 return
5833 }
5834 if m != len(buf4) {
5835 logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf4))+", got "+string(m), err)
5836 return
5837 }
5838 if !bytes.Equal(buf4, buf[offset:offset+512]) {
5839 logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err)
5840 return
5841 }
5842
5843 buf5 := make([]byte, len(buf))
5844 // Read the whole object.
5845 m, err = r.ReadAt(buf5, 0)
5846 if err != nil {
5847 if err != io.EOF {
5848 logError(testName, function, args, startTime, "", "ReadAt failed", err)
5849 return
5850 }
5851 }
5852 if m != len(buf5) {
5853 logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf5))+", got "+string(m), err)
5854 return
5855 }
5856 if !bytes.Equal(buf, buf5) {
5857 logError(testName, function, args, startTime, "", "Incorrect data read in GetObject, than what was previously uploaded", err)
5858 return
5859 }
5860
5861 buf6 := make([]byte, len(buf)+1)
5862 // Read the whole object and beyond.
5863 _, err = r.ReadAt(buf6, 0)
5864 if err != nil {
5865 if err != io.EOF {
5866 logError(testName, function, args, startTime, "", "ReadAt failed", err)
5867 return
5868 }
5869 }
5870
5871 successLogger(testName, function, args, startTime).Info()
5872}
5873
5874// testSSECEncryptionPutGet tests encryption with customer provided encryption keys
5875func testSSECEncryptionPutGet() {
5876 // initialize logging params
5877 startTime := time.Now()
5878 testName := getFuncName()
5879 function := "PutEncryptedObject(bucketName, objectName, reader, sse)"
5880 args := map[string]interface{}{
5881 "bucketName": "",
5882 "objectName": "",
5883 "sse": "",
5884 }
5885 // Seed random based on current time.
5886 rand.Seed(time.Now().Unix())
5887
5888 // Instantiate new minio client object
5889 c, err := minio.New(os.Getenv(serverEndpoint),
5890 &minio.Options{
5891 Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
5892 Secure: mustParseBool(os.Getenv(enableHTTPS)),
5893 })
5894 if err != nil {
5895 logError(testName, function, args, startTime, "", "MinIO client object creation failed", err)
5896 return
5897 }
5898
5899 // Enable tracing, write to stderr.
5900 // c.TraceOn(os.Stderr)
5901
5902 // Set user agent.
5903 c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
5904
5905 // Generate a new random bucket name.
5906 bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
5907 args["bucketName"] = bucketName
5908
5909 // Make a new bucket.
5910 err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"})
5911 if err != nil {
5912 logError(testName, function, args, startTime, "", "MakeBucket failed", err)
5913 return
5914 }
5915
5916 defer cleanupBucket(bucketName, c)
5917
5918 testCases := []struct {
5919 buf []byte
5920 }{
5921 {buf: bytes.Repeat([]byte("F"), 1)},
5922 {buf: bytes.Repeat([]byte("F"), 15)},
5923 {buf: bytes.Repeat([]byte("F"), 16)},
5924 {buf: bytes.Repeat([]byte("F"), 17)},
5925 {buf: bytes.Repeat([]byte("F"), 31)},
5926 {buf: bytes.Repeat([]byte("F"), 32)},
5927 {buf: bytes.Repeat([]byte("F"), 33)},
5928 {buf: bytes.Repeat([]byte("F"), 1024)},
5929 {buf: bytes.Repeat([]byte("F"), 1024*2)},
5930 {buf: bytes.Repeat([]byte("F"), 1024*1024)},
5931 }
5932
5933 const password = "correct horse battery staple" // https://xkcd.com/936/
5934
5935 for i, testCase := range testCases {
5936 // Generate a random object name
5937 objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
5938 args["objectName"] = objectName
5939
5940 // Secured object
5941 sse := encrypt.DefaultPBKDF([]byte(password), []byte(bucketName+objectName))
5942 args["sse"] = sse
5943
5944 // Put encrypted data
5945 _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(testCase.buf), int64(len(testCase.buf)), minio.PutObjectOptions{ServerSideEncryption: sse})
5946 if err != nil {
5947 logError(testName, function, args, startTime, "", "PutEncryptedObject failed", err)
5948 return
5949 }
5950
5951 // Read the data back
5952 r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{ServerSideEncryption: sse})
5953 if err != nil {
5954 logError(testName, function, args, startTime, "", "GetEncryptedObject failed", err)
5955 return
5956 }
5957 defer r.Close()
5958
5959 // Compare the sent object with the received one
5960 recvBuffer := bytes.NewBuffer([]byte{})
5961 if _, err = io.Copy(recvBuffer, r); err != nil {
5962 logError(testName, function, args, startTime, "", "Test "+string(i+1)+", error: "+err.Error(), err)
5963 return
5964 }
5965 if recvBuffer.Len() != len(testCase.buf) {
5966 logError(testName, function, args, startTime, "", "Test "+string(i+1)+", Number of bytes of received object does not match, expected "+string(len(testCase.buf))+", got "+string(recvBuffer.Len()), err)
5967 return
5968 }
5969 if !bytes.Equal(testCase.buf, recvBuffer.Bytes()) {
5970 logError(testName, function, args, startTime, "", "Test "+string(i+1)+", Encrypted sent is not equal to decrypted, expected "+string(testCase.buf)+", got "+string(recvBuffer.Bytes()), err)
5971 return
5972 }
5973
5974 successLogger(testName, function, args, startTime).Info()
5975
5976 }
5977
5978 successLogger(testName, function, args, startTime).Info()
5979}
5980
5981// TestEncryptionFPut tests encryption with customer specified encryption keys
5982func testSSECEncryptionFPut() {
5983 // initialize logging params
5984 startTime := time.Now()
5985 testName := getFuncName()
5986 function := "FPutEncryptedObject(bucketName, objectName, filePath, contentType, sse)"
5987 args := map[string]interface{}{
5988 "bucketName": "",
5989 "objectName": "",
5990 "filePath": "",
5991 "contentType": "",
5992 "sse": "",
5993 }
5994 // Seed random based on current time.
5995 rand.Seed(time.Now().Unix())
5996
5997 // Instantiate new minio client object
5998 c, err := minio.New(os.Getenv(serverEndpoint),
5999 &minio.Options{
6000 Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
6001 Secure: mustParseBool(os.Getenv(enableHTTPS)),
6002 })
6003 if err != nil {
6004 logError(testName, function, args, startTime, "", "MinIO client object creation failed", err)
6005 return
6006 }
6007
6008 // Enable tracing, write to stderr.
6009 // c.TraceOn(os.Stderr)
6010
6011 // Set user agent.
6012 c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
6013
6014 // Generate a new random bucket name.
6015 bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
6016 args["bucketName"] = bucketName
6017
6018 // Make a new bucket.
6019 err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"})
6020 if err != nil {
6021 logError(testName, function, args, startTime, "", "MakeBucket failed", err)
6022 return
6023 }
6024
6025 defer cleanupBucket(bucketName, c)
6026
6027 // Object custom metadata
6028 customContentType := "custom/contenttype"
6029 args["metadata"] = customContentType
6030
6031 testCases := []struct {
6032 buf []byte
6033 }{
6034 {buf: bytes.Repeat([]byte("F"), 0)},
6035 {buf: bytes.Repeat([]byte("F"), 1)},
6036 {buf: bytes.Repeat([]byte("F"), 15)},
6037 {buf: bytes.Repeat([]byte("F"), 16)},
6038 {buf: bytes.Repeat([]byte("F"), 17)},
6039 {buf: bytes.Repeat([]byte("F"), 31)},
6040 {buf: bytes.Repeat([]byte("F"), 32)},
6041 {buf: bytes.Repeat([]byte("F"), 33)},
6042 {buf: bytes.Repeat([]byte("F"), 1024)},
6043 {buf: bytes.Repeat([]byte("F"), 1024*2)},
6044 {buf: bytes.Repeat([]byte("F"), 1024*1024)},
6045 }
6046
6047 const password = "correct horse battery staple" // https://xkcd.com/936/
6048 for i, testCase := range testCases {
6049 // Generate a random object name
6050 objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
6051 args["objectName"] = objectName
6052
6053 // Secured object
6054 sse := encrypt.DefaultPBKDF([]byte(password), []byte(bucketName+objectName))
6055 args["sse"] = sse
6056
6057 // Generate a random file name.
6058 fileName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
6059 file, err := os.Create(fileName)
6060 if err != nil {
6061 logError(testName, function, args, startTime, "", "file create failed", err)
6062 return
6063 }
6064 _, err = file.Write(testCase.buf)
6065 if err != nil {
6066 logError(testName, function, args, startTime, "", "file write failed", err)
6067 return
6068 }
6069 file.Close()
6070 // Put encrypted data
6071 if _, err = c.FPutObject(context.Background(), bucketName, objectName, fileName, minio.PutObjectOptions{ServerSideEncryption: sse}); err != nil {
6072 logError(testName, function, args, startTime, "", "FPutEncryptedObject failed", err)
6073 return
6074 }
6075
6076 // Read the data back
6077 r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{ServerSideEncryption: sse})
6078 if err != nil {
6079 logError(testName, function, args, startTime, "", "GetEncryptedObject failed", err)
6080 return
6081 }
6082 defer r.Close()
6083
6084 // Compare the sent object with the received one
6085 recvBuffer := bytes.NewBuffer([]byte{})
6086 if _, err = io.Copy(recvBuffer, r); err != nil {
6087 logError(testName, function, args, startTime, "", "Test "+string(i+1)+", error: "+err.Error(), err)
6088 return
6089 }
6090 if recvBuffer.Len() != len(testCase.buf) {
6091 logError(testName, function, args, startTime, "", "Test "+string(i+1)+", Number of bytes of received object does not match, expected "+string(len(testCase.buf))+", got "+string(recvBuffer.Len()), err)
6092 return
6093 }
6094 if !bytes.Equal(testCase.buf, recvBuffer.Bytes()) {
6095 logError(testName, function, args, startTime, "", "Test "+string(i+1)+", Encrypted sent is not equal to decrypted, expected "+string(testCase.buf)+", got "+string(recvBuffer.Bytes()), err)
6096 return
6097 }
6098
6099 os.Remove(fileName)
6100 }
6101
6102 successLogger(testName, function, args, startTime).Info()
6103}
6104
6105// testSSES3EncryptionPutGet tests SSE-S3 encryption
6106func testSSES3EncryptionPutGet() {
6107 // initialize logging params
6108 startTime := time.Now()
6109 testName := getFuncName()
6110 function := "PutEncryptedObject(bucketName, objectName, reader, sse)"
6111 args := map[string]interface{}{
6112 "bucketName": "",
6113 "objectName": "",
6114 "sse": "",
6115 }
6116 // Seed random based on current time.
6117 rand.Seed(time.Now().Unix())
6118
6119 // Instantiate new minio client object
6120 c, err := minio.New(os.Getenv(serverEndpoint),
6121 &minio.Options{
6122 Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
6123 Secure: mustParseBool(os.Getenv(enableHTTPS)),
6124 })
6125 if err != nil {
6126 logError(testName, function, args, startTime, "", "MinIO client object creation failed", err)
6127 return
6128 }
6129
6130 // Enable tracing, write to stderr.
6131 // c.TraceOn(os.Stderr)
6132
6133 // Set user agent.
6134 c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
6135
6136 // Generate a new random bucket name.
6137 bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
6138 args["bucketName"] = bucketName
6139
6140 // Make a new bucket.
6141 err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"})
6142 if err != nil {
6143 logError(testName, function, args, startTime, "", "MakeBucket failed", err)
6144 return
6145 }
6146
6147 defer cleanupBucket(bucketName, c)
6148
6149 testCases := []struct {
6150 buf []byte
6151 }{
6152 {buf: bytes.Repeat([]byte("F"), 1)},
6153 {buf: bytes.Repeat([]byte("F"), 15)},
6154 {buf: bytes.Repeat([]byte("F"), 16)},
6155 {buf: bytes.Repeat([]byte("F"), 17)},
6156 {buf: bytes.Repeat([]byte("F"), 31)},
6157 {buf: bytes.Repeat([]byte("F"), 32)},
6158 {buf: bytes.Repeat([]byte("F"), 33)},
6159 {buf: bytes.Repeat([]byte("F"), 1024)},
6160 {buf: bytes.Repeat([]byte("F"), 1024*2)},
6161 {buf: bytes.Repeat([]byte("F"), 1024*1024)},
6162 }
6163
6164 for i, testCase := range testCases {
6165 // Generate a random object name
6166 objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
6167 args["objectName"] = objectName
6168
6169 // Secured object
6170 sse := encrypt.NewSSE()
6171 args["sse"] = sse
6172
6173 // Put encrypted data
6174 _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(testCase.buf), int64(len(testCase.buf)), minio.PutObjectOptions{ServerSideEncryption: sse})
6175 if err != nil {
6176 logError(testName, function, args, startTime, "", "PutEncryptedObject failed", err)
6177 return
6178 }
6179
6180 // Read the data back without any encryption headers
6181 r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{})
6182 if err != nil {
6183 logError(testName, function, args, startTime, "", "GetEncryptedObject failed", err)
6184 return
6185 }
6186 defer r.Close()
6187
6188 // Compare the sent object with the received one
6189 recvBuffer := bytes.NewBuffer([]byte{})
6190 if _, err = io.Copy(recvBuffer, r); err != nil {
6191 logError(testName, function, args, startTime, "", "Test "+string(i+1)+", error: "+err.Error(), err)
6192 return
6193 }
6194 if recvBuffer.Len() != len(testCase.buf) {
6195 logError(testName, function, args, startTime, "", "Test "+string(i+1)+", Number of bytes of received object does not match, expected "+string(len(testCase.buf))+", got "+string(recvBuffer.Len()), err)
6196 return
6197 }
6198 if !bytes.Equal(testCase.buf, recvBuffer.Bytes()) {
6199 logError(testName, function, args, startTime, "", "Test "+string(i+1)+", Encrypted sent is not equal to decrypted, expected "+string(testCase.buf)+", got "+string(recvBuffer.Bytes()), err)
6200 return
6201 }
6202
6203 successLogger(testName, function, args, startTime).Info()
6204
6205 }
6206
6207 successLogger(testName, function, args, startTime).Info()
6208}
6209
6210// TestSSES3EncryptionFPut tests server side encryption
6211func testSSES3EncryptionFPut() {
6212 // initialize logging params
6213 startTime := time.Now()
6214 testName := getFuncName()
6215 function := "FPutEncryptedObject(bucketName, objectName, filePath, contentType, sse)"
6216 args := map[string]interface{}{
6217 "bucketName": "",
6218 "objectName": "",
6219 "filePath": "",
6220 "contentType": "",
6221 "sse": "",
6222 }
6223 // Seed random based on current time.
6224 rand.Seed(time.Now().Unix())
6225
6226 // Instantiate new minio client object
6227 c, err := minio.New(os.Getenv(serverEndpoint),
6228 &minio.Options{
6229 Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
6230 Secure: mustParseBool(os.Getenv(enableHTTPS)),
6231 })
6232 if err != nil {
6233 logError(testName, function, args, startTime, "", "MinIO client object creation failed", err)
6234 return
6235 }
6236
6237 // Enable tracing, write to stderr.
6238 // c.TraceOn(os.Stderr)
6239
6240 // Set user agent.
6241 c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
6242
6243 // Generate a new random bucket name.
6244 bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
6245 args["bucketName"] = bucketName
6246
6247 // Make a new bucket.
6248 err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"})
6249 if err != nil {
6250 logError(testName, function, args, startTime, "", "MakeBucket failed", err)
6251 return
6252 }
6253
6254 defer cleanupBucket(bucketName, c)
6255
6256 // Object custom metadata
6257 customContentType := "custom/contenttype"
6258 args["metadata"] = customContentType
6259
6260 testCases := []struct {
6261 buf []byte
6262 }{
6263 {buf: bytes.Repeat([]byte("F"), 0)},
6264 {buf: bytes.Repeat([]byte("F"), 1)},
6265 {buf: bytes.Repeat([]byte("F"), 15)},
6266 {buf: bytes.Repeat([]byte("F"), 16)},
6267 {buf: bytes.Repeat([]byte("F"), 17)},
6268 {buf: bytes.Repeat([]byte("F"), 31)},
6269 {buf: bytes.Repeat([]byte("F"), 32)},
6270 {buf: bytes.Repeat([]byte("F"), 33)},
6271 {buf: bytes.Repeat([]byte("F"), 1024)},
6272 {buf: bytes.Repeat([]byte("F"), 1024*2)},
6273 {buf: bytes.Repeat([]byte("F"), 1024*1024)},
6274 }
6275
6276 for i, testCase := range testCases {
6277 // Generate a random object name
6278 objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
6279 args["objectName"] = objectName
6280
6281 // Secured object
6282 sse := encrypt.NewSSE()
6283 args["sse"] = sse
6284
6285 // Generate a random file name.
6286 fileName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
6287 file, err := os.Create(fileName)
6288 if err != nil {
6289 logError(testName, function, args, startTime, "", "file create failed", err)
6290 return
6291 }
6292 _, err = file.Write(testCase.buf)
6293 if err != nil {
6294 logError(testName, function, args, startTime, "", "file write failed", err)
6295 return
6296 }
6297 file.Close()
6298 // Put encrypted data
6299 if _, err = c.FPutObject(context.Background(), bucketName, objectName, fileName, minio.PutObjectOptions{ServerSideEncryption: sse}); err != nil {
6300 logError(testName, function, args, startTime, "", "FPutEncryptedObject failed", err)
6301 return
6302 }
6303
6304 // Read the data back
6305 r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{})
6306 if err != nil {
6307 logError(testName, function, args, startTime, "", "GetEncryptedObject failed", err)
6308 return
6309 }
6310 defer r.Close()
6311
6312 // Compare the sent object with the received one
6313 recvBuffer := bytes.NewBuffer([]byte{})
6314 if _, err = io.Copy(recvBuffer, r); err != nil {
6315 logError(testName, function, args, startTime, "", "Test "+string(i+1)+", error: "+err.Error(), err)
6316 return
6317 }
6318 if recvBuffer.Len() != len(testCase.buf) {
6319 logError(testName, function, args, startTime, "", "Test "+string(i+1)+", Number of bytes of received object does not match, expected "+string(len(testCase.buf))+", got "+string(recvBuffer.Len()), err)
6320 return
6321 }
6322 if !bytes.Equal(testCase.buf, recvBuffer.Bytes()) {
6323 logError(testName, function, args, startTime, "", "Test "+string(i+1)+", Encrypted sent is not equal to decrypted, expected "+string(testCase.buf)+", got "+string(recvBuffer.Bytes()), err)
6324 return
6325 }
6326
6327 os.Remove(fileName)
6328 }
6329
6330 successLogger(testName, function, args, startTime).Info()
6331}
6332
6333func testBucketNotification() {
6334 // initialize logging params
6335 startTime := time.Now()
6336 testName := getFuncName()
6337 function := "SetBucketNotification(bucketName)"
6338 args := map[string]interface{}{
6339 "bucketName": "",
6340 }
6341
6342 if os.Getenv("NOTIFY_BUCKET") == "" ||
6343 os.Getenv("NOTIFY_SERVICE") == "" ||
6344 os.Getenv("NOTIFY_REGION") == "" ||
6345 os.Getenv("NOTIFY_ACCOUNTID") == "" ||
6346 os.Getenv("NOTIFY_RESOURCE") == "" {
6347 ignoredLog(testName, function, args, startTime, "Skipped notification test as it is not configured").Info()
6348 return
6349 }
6350
6351 // Seed random based on current time.
6352 rand.Seed(time.Now().Unix())
6353
6354 c, err := minio.New(os.Getenv(serverEndpoint),
6355 &minio.Options{
6356 Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
6357 Secure: mustParseBool(os.Getenv(enableHTTPS)),
6358 })
6359 if err != nil {
6360 logError(testName, function, args, startTime, "", "MinIO client object creation failed", err)
6361 return
6362 }
6363
6364 // Enable to debug
6365 // c.TraceOn(os.Stderr)
6366
6367 // Set user agent.
6368 c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
6369
6370 bucketName := os.Getenv("NOTIFY_BUCKET")
6371 args["bucketName"] = bucketName
6372
6373 topicArn := notification.NewArn("aws", os.Getenv("NOTIFY_SERVICE"), os.Getenv("NOTIFY_REGION"), os.Getenv("NOTIFY_ACCOUNTID"), os.Getenv("NOTIFY_RESOURCE"))
6374 queueArn := notification.NewArn("aws", "dummy-service", "dummy-region", "dummy-accountid", "dummy-resource")
6375
6376 topicConfig := notification.NewConfig(topicArn)
6377 topicConfig.AddEvents(notification.ObjectCreatedAll, notification.ObjectRemovedAll)
6378 topicConfig.AddFilterSuffix("jpg")
6379
6380 queueConfig := notification.NewConfig(queueArn)
6381 queueConfig.AddEvents(notification.ObjectCreatedAll)
6382 queueConfig.AddFilterPrefix("photos/")
6383
6384 config := notification.Configuration{}
6385 config.AddTopic(topicConfig)
6386
6387 // Add the same topicConfig again, should have no effect
6388 // because it is duplicated
6389 config.AddTopic(topicConfig)
6390 if len(config.TopicConfigs) != 1 {
6391 logError(testName, function, args, startTime, "", "Duplicate entry added", err)
6392 return
6393 }
6394
6395 // Add and remove a queue config
6396 config.AddQueue(queueConfig)
6397 config.RemoveQueueByArn(queueArn)
6398
6399 err = c.SetBucketNotification(context.Background(), bucketName, config)
6400 if err != nil {
6401 logError(testName, function, args, startTime, "", "SetBucketNotification failed", err)
6402 return
6403 }
6404
6405 config, err = c.GetBucketNotification(context.Background(), bucketName)
6406 if err != nil {
6407 logError(testName, function, args, startTime, "", "GetBucketNotification failed", err)
6408 return
6409 }
6410
6411 if len(config.TopicConfigs) != 1 {
6412 logError(testName, function, args, startTime, "", "Topic config is empty", err)
6413 return
6414 }
6415
6416 if config.TopicConfigs[0].Filter.S3Key.FilterRules[0].Value != "jpg" {
6417 logError(testName, function, args, startTime, "", "Couldn't get the suffix", err)
6418 return
6419 }
6420
6421 err = c.RemoveAllBucketNotification(context.Background(), bucketName)
6422 if err != nil {
6423 logError(testName, function, args, startTime, "", "RemoveAllBucketNotification failed", err)
6424 return
6425 }
6426
6427 // Delete all objects and buckets
6428 if err = cleanupBucket(bucketName, c); err != nil {
6429 logError(testName, function, args, startTime, "", "CleanupBucket failed", err)
6430 return
6431 }
6432
6433 successLogger(testName, function, args, startTime).Info()
6434}
6435
6436// Tests comprehensive list of all methods.
6437func testFunctional() {
6438 // initialize logging params
6439 startTime := time.Now()
6440 testName := getFuncName()
6441 function := "testFunctional()"
6442 functionAll := ""
6443 args := map[string]interface{}{}
6444
6445 // Seed random based on current time.
6446 rand.Seed(time.Now().Unix())
6447
6448 c, err := minio.New(os.Getenv(serverEndpoint),
6449 &minio.Options{
6450 Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
6451 Secure: mustParseBool(os.Getenv(enableHTTPS)),
6452 })
6453 if err != nil {
6454 logError(testName, function, nil, startTime, "", "MinIO client object creation failed", err)
6455 return
6456 }
6457
6458 // Enable to debug
6459 // c.TraceOn(os.Stderr)
6460
6461 // Set user agent.
6462 c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
6463
6464 // Generate a new random bucket name.
6465 bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
6466
6467 // Make a new bucket.
6468 function = "MakeBucket(bucketName, region)"
6469 functionAll = "MakeBucket(bucketName, region)"
6470 args["bucketName"] = bucketName
6471 err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"})
6472
6473 defer cleanupBucket(bucketName, c)
6474 if err != nil {
6475 logError(testName, function, args, startTime, "", "MakeBucket failed", err)
6476 return
6477 }
6478
6479 // Generate a random file name.
6480 fileName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
6481 file, err := os.Create(fileName)
6482 if err != nil {
6483 logError(testName, function, args, startTime, "", "File creation failed", err)
6484 return
6485 }
6486 for i := 0; i < 3; i++ {
6487 buf := make([]byte, rand.Intn(1<<19))
6488 _, err = file.Write(buf)
6489 if err != nil {
6490 logError(testName, function, args, startTime, "", "File write failed", err)
6491 return
6492 }
6493 }
6494 file.Close()
6495
6496 // Verify if bucket exits and you have access.
6497 var exists bool
6498 function = "BucketExists(bucketName)"
6499 functionAll += ", " + function
6500 args = map[string]interface{}{
6501 "bucketName": bucketName,
6502 }
6503 exists, err = c.BucketExists(context.Background(), bucketName)
6504
6505 if err != nil {
6506 logError(testName, function, args, startTime, "", "BucketExists failed", err)
6507 return
6508 }
6509 if !exists {
6510 logError(testName, function, args, startTime, "", "Could not find the bucket", err)
6511 return
6512 }
6513
6514 // Asserting the default bucket policy.
6515 function = "GetBucketPolicy(ctx, bucketName)"
6516 functionAll += ", " + function
6517 args = map[string]interface{}{
6518 "bucketName": bucketName,
6519 }
6520 nilPolicy, err := c.GetBucketPolicy(context.Background(), bucketName)
6521 if err != nil {
6522 logError(testName, function, args, startTime, "", "GetBucketPolicy failed", err)
6523 return
6524 }
6525 if nilPolicy != "" {
6526 logError(testName, function, args, startTime, "", "policy should be set to nil", err)
6527 return
6528 }
6529
6530 // Set the bucket policy to 'public readonly'.
6531 function = "SetBucketPolicy(bucketName, readOnlyPolicy)"
6532 functionAll += ", " + function
6533
6534 readOnlyPolicy := `{"Version":"2012-10-17","Statement":[{"Effect":"Allow","Principal":{"AWS":["*"]},"Action":["s3:ListBucket"],"Resource":["arn:aws:s3:::` + bucketName + `"]}]}`
6535 args = map[string]interface{}{
6536 "bucketName": bucketName,
6537 "bucketPolicy": readOnlyPolicy,
6538 }
6539
6540 err = c.SetBucketPolicy(context.Background(), bucketName, readOnlyPolicy)
6541 if err != nil {
6542 logError(testName, function, args, startTime, "", "SetBucketPolicy failed", err)
6543 return
6544 }
6545 // should return policy `readonly`.
6546 function = "GetBucketPolicy(ctx, bucketName)"
6547 functionAll += ", " + function
6548 args = map[string]interface{}{
6549 "bucketName": bucketName,
6550 }
6551 _, err = c.GetBucketPolicy(context.Background(), bucketName)
6552 if err != nil {
6553 logError(testName, function, args, startTime, "", "GetBucketPolicy failed", err)
6554 return
6555 }
6556
6557 // Make the bucket 'public writeonly'.
6558 function = "SetBucketPolicy(bucketName, writeOnlyPolicy)"
6559 functionAll += ", " + function
6560
6561 writeOnlyPolicy := `{"Version":"2012-10-17","Statement":[{"Effect":"Allow","Principal":{"AWS":["*"]},"Action":["s3:ListBucketMultipartUploads"],"Resource":["arn:aws:s3:::` + bucketName + `"]}]}`
6562 args = map[string]interface{}{
6563 "bucketName": bucketName,
6564 "bucketPolicy": writeOnlyPolicy,
6565 }
6566 err = c.SetBucketPolicy(context.Background(), bucketName, writeOnlyPolicy)
6567
6568 if err != nil {
6569 logError(testName, function, args, startTime, "", "SetBucketPolicy failed", err)
6570 return
6571 }
6572 // should return policy `writeonly`.
6573 function = "GetBucketPolicy(ctx, bucketName)"
6574 functionAll += ", " + function
6575 args = map[string]interface{}{
6576 "bucketName": bucketName,
6577 }
6578
6579 _, err = c.GetBucketPolicy(context.Background(), bucketName)
6580 if err != nil {
6581 logError(testName, function, args, startTime, "", "GetBucketPolicy failed", err)
6582 return
6583 }
6584
6585 // Make the bucket 'public read/write'.
6586 function = "SetBucketPolicy(bucketName, readWritePolicy)"
6587 functionAll += ", " + function
6588
6589 readWritePolicy := `{"Version":"2012-10-17","Statement":[{"Effect":"Allow","Principal":{"AWS":["*"]},"Action":["s3:ListBucket","s3:ListBucketMultipartUploads"],"Resource":["arn:aws:s3:::` + bucketName + `"]}]}`
6590
6591 args = map[string]interface{}{
6592 "bucketName": bucketName,
6593 "bucketPolicy": readWritePolicy,
6594 }
6595 err = c.SetBucketPolicy(context.Background(), bucketName, readWritePolicy)
6596
6597 if err != nil {
6598 logError(testName, function, args, startTime, "", "SetBucketPolicy failed", err)
6599 return
6600 }
6601 // should return policy `readwrite`.
6602 function = "GetBucketPolicy(bucketName)"
6603 functionAll += ", " + function
6604 args = map[string]interface{}{
6605 "bucketName": bucketName,
6606 }
6607 _, err = c.GetBucketPolicy(context.Background(), bucketName)
6608 if err != nil {
6609 logError(testName, function, args, startTime, "", "GetBucketPolicy failed", err)
6610 return
6611 }
6612
6613 // List all buckets.
6614 function = "ListBuckets()"
6615 functionAll += ", " + function
6616 args = nil
6617 buckets, err := c.ListBuckets(context.Background())
6618
6619 if len(buckets) == 0 {
6620 logError(testName, function, args, startTime, "", "Found bucket list to be empty", err)
6621 return
6622 }
6623 if err != nil {
6624 logError(testName, function, args, startTime, "", "ListBuckets failed", err)
6625 return
6626 }
6627
6628 // Verify if previously created bucket is listed in list buckets.
6629 bucketFound := false
6630 for _, bucket := range buckets {
6631 if bucket.Name == bucketName {
6632 bucketFound = true
6633 }
6634 }
6635
6636 // If bucket not found error out.
6637 if !bucketFound {
6638 logError(testName, function, args, startTime, "", "Bucket: "+bucketName+" not found", err)
6639 return
6640 }
6641
6642 objectName := bucketName + "unique"
6643
6644 // Generate data
6645 buf := bytes.Repeat([]byte("f"), 1<<19)
6646
6647 function = "PutObject(bucketName, objectName, reader, contentType)"
6648 functionAll += ", " + function
6649 args = map[string]interface{}{
6650 "bucketName": bucketName,
6651 "objectName": objectName,
6652 "contentType": "",
6653 }
6654
6655 _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{})
6656 if err != nil {
6657 logError(testName, function, args, startTime, "", "PutObject failed", err)
6658 return
6659 }
6660
6661 args = map[string]interface{}{
6662 "bucketName": bucketName,
6663 "objectName": objectName + "-nolength",
6664 "contentType": "binary/octet-stream",
6665 }
6666
6667 _, err = c.PutObject(context.Background(), bucketName, objectName+"-nolength", bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ContentType: "binary/octet-stream"})
6668 if err != nil {
6669 logError(testName, function, args, startTime, "", "PutObject failed", err)
6670 return
6671 }
6672
6673 // Instantiate a done channel to close all listing.
6674 doneCh := make(chan struct{})
6675 defer close(doneCh)
6676
6677 objFound := false
6678 isRecursive := true // Recursive is true.
6679
6680 function = "ListObjects(bucketName, objectName, isRecursive, doneCh)"
6681 functionAll += ", " + function
6682 args = map[string]interface{}{
6683 "bucketName": bucketName,
6684 "objectName": objectName,
6685 "isRecursive": isRecursive,
6686 }
6687
6688 for obj := range c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{UseV1: true, Prefix: objectName, Recursive: true}) {
6689 if obj.Key == objectName {
6690 objFound = true
6691 break
6692 }
6693 }
6694 if !objFound {
6695 logError(testName, function, args, startTime, "", "Object "+objectName+" not found", err)
6696 return
6697 }
6698
6699 objFound = false
6700 isRecursive = true // Recursive is true.
6701 function = "ListObjects()"
6702 functionAll += ", " + function
6703 args = map[string]interface{}{
6704 "bucketName": bucketName,
6705 "objectName": objectName,
6706 "isRecursive": isRecursive,
6707 }
6708
6709 for obj := range c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{Prefix: objectName, Recursive: isRecursive}) {
6710 if obj.Key == objectName {
6711 objFound = true
6712 break
6713 }
6714 }
6715 if !objFound {
6716 logError(testName, function, args, startTime, "", "Object "+objectName+" not found", err)
6717 return
6718 }
6719
6720 incompObjNotFound := true
6721
6722 function = "ListIncompleteUploads(bucketName, objectName, isRecursive, doneCh)"
6723 functionAll += ", " + function
6724 args = map[string]interface{}{
6725 "bucketName": bucketName,
6726 "objectName": objectName,
6727 "isRecursive": isRecursive,
6728 }
6729
6730 for objIncompl := range c.ListIncompleteUploads(context.Background(), bucketName, objectName, isRecursive) {
6731 if objIncompl.Key != "" {
6732 incompObjNotFound = false
6733 break
6734 }
6735 }
6736 if !incompObjNotFound {
6737 logError(testName, function, args, startTime, "", "Unexpected dangling incomplete upload found", err)
6738 return
6739 }
6740
6741 function = "GetObject(bucketName, objectName)"
6742 functionAll += ", " + function
6743 args = map[string]interface{}{
6744 "bucketName": bucketName,
6745 "objectName": objectName,
6746 }
6747 newReader, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{})
6748 if err != nil {
6749 logError(testName, function, args, startTime, "", "GetObject failed", err)
6750 return
6751 }
6752
6753 newReadBytes, err := io.ReadAll(newReader)
6754 if err != nil {
6755 logError(testName, function, args, startTime, "", "ReadAll failed", err)
6756 return
6757 }
6758
6759 if !bytes.Equal(newReadBytes, buf) {
6760 logError(testName, function, args, startTime, "", "GetObject bytes mismatch", err)
6761 return
6762 }
6763 newReader.Close()
6764
6765 function = "FGetObject(bucketName, objectName, fileName)"
6766 functionAll += ", " + function
6767 args = map[string]interface{}{
6768 "bucketName": bucketName,
6769 "objectName": objectName,
6770 "fileName": fileName + "-f",
6771 }
6772 err = c.FGetObject(context.Background(), bucketName, objectName, fileName+"-f", minio.GetObjectOptions{})
6773
6774 if err != nil {
6775 logError(testName, function, args, startTime, "", "FGetObject failed", err)
6776 return
6777 }
6778
6779 function = "PresignedHeadObject(bucketName, objectName, expires, reqParams)"
6780 functionAll += ", " + function
6781 args = map[string]interface{}{
6782 "bucketName": bucketName,
6783 "objectName": "",
6784 "expires": 3600 * time.Second,
6785 }
6786 if _, err = c.PresignedHeadObject(context.Background(), bucketName, "", 3600*time.Second, nil); err == nil {
6787 logError(testName, function, args, startTime, "", "PresignedHeadObject success", err)
6788 return
6789 }
6790
6791 // Generate presigned HEAD object url.
6792 function = "PresignedHeadObject(bucketName, objectName, expires, reqParams)"
6793 functionAll += ", " + function
6794 args = map[string]interface{}{
6795 "bucketName": bucketName,
6796 "objectName": objectName,
6797 "expires": 3600 * time.Second,
6798 }
6799 presignedHeadURL, err := c.PresignedHeadObject(context.Background(), bucketName, objectName, 3600*time.Second, nil)
6800 if err != nil {
6801 logError(testName, function, args, startTime, "", "PresignedHeadObject failed", err)
6802 return
6803 }
6804
6805 transport, err := minio.DefaultTransport(mustParseBool(os.Getenv(enableHTTPS)))
6806 if err != nil {
6807 logError(testName, function, args, startTime, "", "DefaultTransport failed", err)
6808 return
6809 }
6810
6811 httpClient := &http.Client{
6812 // Setting a sensible time out of 30secs to wait for response
6813 // headers. Request is pro-actively canceled after 30secs
6814 // with no response.
6815 Timeout: 30 * time.Second,
6816 Transport: transport,
6817 }
6818
6819 req, err := http.NewRequest(http.MethodHead, presignedHeadURL.String(), nil)
6820 if err != nil {
6821 logError(testName, function, args, startTime, "", "PresignedHeadObject request was incorrect", err)
6822 return
6823 }
6824
6825 // Verify if presigned url works.
6826 resp, err := httpClient.Do(req)
6827 if err != nil {
6828 logError(testName, function, args, startTime, "", "PresignedHeadObject response incorrect", err)
6829 return
6830 }
6831 if resp.StatusCode != http.StatusOK {
6832 logError(testName, function, args, startTime, "", "PresignedHeadObject response incorrect, status "+string(resp.StatusCode), err)
6833 return
6834 }
6835 if resp.Header.Get("ETag") == "" {
6836 logError(testName, function, args, startTime, "", "PresignedHeadObject response incorrect", err)
6837 return
6838 }
6839 resp.Body.Close()
6840
6841 function = "PresignedGetObject(bucketName, objectName, expires, reqParams)"
6842 functionAll += ", " + function
6843 args = map[string]interface{}{
6844 "bucketName": bucketName,
6845 "objectName": "",
6846 "expires": 3600 * time.Second,
6847 }
6848 _, err = c.PresignedGetObject(context.Background(), bucketName, "", 3600*time.Second, nil)
6849 if err == nil {
6850 logError(testName, function, args, startTime, "", "PresignedGetObject success", err)
6851 return
6852 }
6853
6854 // Generate presigned GET object url.
6855 function = "PresignedGetObject(bucketName, objectName, expires, reqParams)"
6856 functionAll += ", " + function
6857 args = map[string]interface{}{
6858 "bucketName": bucketName,
6859 "objectName": objectName,
6860 "expires": 3600 * time.Second,
6861 }
6862 presignedGetURL, err := c.PresignedGetObject(context.Background(), bucketName, objectName, 3600*time.Second, nil)
6863 if err != nil {
6864 logError(testName, function, args, startTime, "", "PresignedGetObject failed", err)
6865 return
6866 }
6867
6868 // Verify if presigned url works.
6869 req, err = http.NewRequest(http.MethodGet, presignedGetURL.String(), nil)
6870 if err != nil {
6871 logError(testName, function, args, startTime, "", "PresignedGetObject request incorrect", err)
6872 return
6873 }
6874
6875 resp, err = httpClient.Do(req)
6876 if err != nil {
6877 logError(testName, function, args, startTime, "", "PresignedGetObject response incorrect", err)
6878 return
6879 }
6880 if resp.StatusCode != http.StatusOK {
6881 logError(testName, function, args, startTime, "", "PresignedGetObject response incorrect, status "+string(resp.StatusCode), err)
6882 return
6883 }
6884 newPresignedBytes, err := io.ReadAll(resp.Body)
6885 if err != nil {
6886 logError(testName, function, args, startTime, "", "PresignedGetObject response incorrect", err)
6887 return
6888 }
6889 resp.Body.Close()
6890 if !bytes.Equal(newPresignedBytes, buf) {
6891 logError(testName, function, args, startTime, "", "PresignedGetObject response incorrect", err)
6892 return
6893 }
6894
6895 // Set request parameters.
6896 reqParams := make(url.Values)
6897 reqParams.Set("response-content-disposition", "attachment; filename=\"test.txt\"")
6898 args = map[string]interface{}{
6899 "bucketName": bucketName,
6900 "objectName": objectName,
6901 "expires": 3600 * time.Second,
6902 "reqParams": reqParams,
6903 }
6904 presignedGetURL, err = c.PresignedGetObject(context.Background(), bucketName, objectName, 3600*time.Second, reqParams)
6905
6906 if err != nil {
6907 logError(testName, function, args, startTime, "", "PresignedGetObject failed", err)
6908 return
6909 }
6910
6911 // Verify if presigned url works.
6912 req, err = http.NewRequest(http.MethodGet, presignedGetURL.String(), nil)
6913 if err != nil {
6914 logError(testName, function, args, startTime, "", "PresignedGetObject request incorrect", err)
6915 return
6916 }
6917
6918 resp, err = httpClient.Do(req)
6919 if err != nil {
6920 logError(testName, function, args, startTime, "", "PresignedGetObject response incorrect", err)
6921 return
6922 }
6923 if resp.StatusCode != http.StatusOK {
6924 logError(testName, function, args, startTime, "", "PresignedGetObject response incorrect, status "+string(resp.StatusCode), err)
6925 return
6926 }
6927 newPresignedBytes, err = io.ReadAll(resp.Body)
6928 if err != nil {
6929 logError(testName, function, args, startTime, "", "PresignedGetObject response incorrect", err)
6930 return
6931 }
6932 if !bytes.Equal(newPresignedBytes, buf) {
6933 logError(testName, function, args, startTime, "", "Bytes mismatch for presigned GET URL", err)
6934 return
6935 }
6936 if resp.Header.Get("Content-Disposition") != "attachment; filename=\"test.txt\"" {
6937 logError(testName, function, args, startTime, "", "wrong Content-Disposition received "+string(resp.Header.Get("Content-Disposition")), err)
6938 return
6939 }
6940
6941 function = "PresignedPutObject(bucketName, objectName, expires)"
6942 functionAll += ", " + function
6943 args = map[string]interface{}{
6944 "bucketName": bucketName,
6945 "objectName": "",
6946 "expires": 3600 * time.Second,
6947 }
6948 _, err = c.PresignedPutObject(context.Background(), bucketName, "", 3600*time.Second)
6949 if err == nil {
6950 logError(testName, function, args, startTime, "", "PresignedPutObject success", err)
6951 return
6952 }
6953
6954 function = "PresignedPutObject(bucketName, objectName, expires)"
6955 functionAll += ", " + function
6956 args = map[string]interface{}{
6957 "bucketName": bucketName,
6958 "objectName": objectName + "-presigned",
6959 "expires": 3600 * time.Second,
6960 }
6961 presignedPutURL, err := c.PresignedPutObject(context.Background(), bucketName, objectName+"-presigned", 3600*time.Second)
6962 if err != nil {
6963 logError(testName, function, args, startTime, "", "PresignedPutObject failed", err)
6964 return
6965 }
6966
6967 buf = bytes.Repeat([]byte("g"), 1<<19)
6968
6969 req, err = http.NewRequest(http.MethodPut, presignedPutURL.String(), bytes.NewReader(buf))
6970 if err != nil {
6971 logError(testName, function, args, startTime, "", "Couldn't make HTTP request with PresignedPutObject URL", err)
6972 return
6973 }
6974
6975 resp, err = httpClient.Do(req)
6976 if err != nil {
6977 logError(testName, function, args, startTime, "", "PresignedPutObject failed", err)
6978 return
6979 }
6980
6981 newReader, err = c.GetObject(context.Background(), bucketName, objectName+"-presigned", minio.GetObjectOptions{})
6982 if err != nil {
6983 logError(testName, function, args, startTime, "", "GetObject after PresignedPutObject failed", err)
6984 return
6985 }
6986
6987 newReadBytes, err = io.ReadAll(newReader)
6988 if err != nil {
6989 logError(testName, function, args, startTime, "", "ReadAll after GetObject failed", err)
6990 return
6991 }
6992
6993 if !bytes.Equal(newReadBytes, buf) {
6994 logError(testName, function, args, startTime, "", "Bytes mismatch", err)
6995 return
6996 }
6997
6998 function = "PresignHeader(method, bucketName, objectName, expires, reqParams, extraHeaders)"
6999 functionAll += ", " + function
7000 presignExtraHeaders := map[string][]string{
7001 "mysecret": {"abcxxx"},
7002 }
7003 args = map[string]interface{}{
7004 "method": "PUT",
7005 "bucketName": bucketName,
7006 "objectName": objectName + "-presign-custom",
7007 "expires": 3600 * time.Second,
7008 "extraHeaders": presignExtraHeaders,
7009 }
7010 presignedURL, err := c.PresignHeader(context.Background(), "PUT", bucketName, objectName+"-presign-custom", 3600*time.Second, nil, presignExtraHeaders)
7011 if err != nil {
7012 logError(testName, function, args, startTime, "", "Presigned failed", err)
7013 return
7014 }
7015
7016 // Generate data more than 32K
7017 buf = bytes.Repeat([]byte("1"), rand.Intn(1<<10)+32*1024)
7018
7019 req, err = http.NewRequest(http.MethodPut, presignedURL.String(), bytes.NewReader(buf))
7020 if err != nil {
7021 logError(testName, function, args, startTime, "", "HTTP request to Presigned URL failed", err)
7022 return
7023 }
7024
7025 req.Header.Add("mysecret", "abcxxx")
7026 resp, err = httpClient.Do(req)
7027 if err != nil {
7028 logError(testName, function, args, startTime, "", "HTTP request to Presigned URL failed", err)
7029 return
7030 }
7031
7032 // Download the uploaded object to verify
7033 args = map[string]interface{}{
7034 "bucketName": bucketName,
7035 "objectName": objectName + "-presign-custom",
7036 }
7037 newReader, err = c.GetObject(context.Background(), bucketName, objectName+"-presign-custom", minio.GetObjectOptions{})
7038 if err != nil {
7039 logError(testName, function, args, startTime, "", "GetObject of uploaded custom-presigned object failed", err)
7040 return
7041 }
7042
7043 newReadBytes, err = io.ReadAll(newReader)
7044 if err != nil {
7045 logError(testName, function, args, startTime, "", "ReadAll failed during get on custom-presigned put object", err)
7046 return
7047 }
7048 newReader.Close()
7049
7050 if !bytes.Equal(newReadBytes, buf) {
7051 logError(testName, function, args, startTime, "", "Bytes mismatch on custom-presigned object upload verification", err)
7052 return
7053 }
7054
7055 function = "RemoveObject(bucketName, objectName)"
7056 functionAll += ", " + function
7057 args = map[string]interface{}{
7058 "bucketName": bucketName,
7059 "objectName": objectName,
7060 }
7061 err = c.RemoveObject(context.Background(), bucketName, objectName, minio.RemoveObjectOptions{})
7062
7063 if err != nil {
7064 logError(testName, function, args, startTime, "", "RemoveObject failed", err)
7065 return
7066 }
7067 args["objectName"] = objectName + "-f"
7068 err = c.RemoveObject(context.Background(), bucketName, objectName+"-f", minio.RemoveObjectOptions{})
7069
7070 if err != nil {
7071 logError(testName, function, args, startTime, "", "RemoveObject failed", err)
7072 return
7073 }
7074
7075 args["objectName"] = objectName + "-nolength"
7076 err = c.RemoveObject(context.Background(), bucketName, objectName+"-nolength", minio.RemoveObjectOptions{})
7077
7078 if err != nil {
7079 logError(testName, function, args, startTime, "", "RemoveObject failed", err)
7080 return
7081 }
7082
7083 args["objectName"] = objectName + "-presigned"
7084 err = c.RemoveObject(context.Background(), bucketName, objectName+"-presigned", minio.RemoveObjectOptions{})
7085
7086 if err != nil {
7087 logError(testName, function, args, startTime, "", "RemoveObject failed", err)
7088 return
7089 }
7090
7091 args["objectName"] = objectName + "-presign-custom"
7092 err = c.RemoveObject(context.Background(), bucketName, objectName+"-presign-custom", minio.RemoveObjectOptions{})
7093
7094 if err != nil {
7095 logError(testName, function, args, startTime, "", "RemoveObject failed", err)
7096 return
7097 }
7098
7099 function = "RemoveBucket(bucketName)"
7100 functionAll += ", " + function
7101 args = map[string]interface{}{
7102 "bucketName": bucketName,
7103 }
7104 err = c.RemoveBucket(context.Background(), bucketName)
7105
7106 if err != nil {
7107 logError(testName, function, args, startTime, "", "RemoveBucket failed", err)
7108 return
7109 }
7110 err = c.RemoveBucket(context.Background(), bucketName)
7111 if err == nil {
7112 logError(testName, function, args, startTime, "", "RemoveBucket did not fail for invalid bucket name", err)
7113 return
7114 }
7115 if err.Error() != "The specified bucket does not exist" {
7116 logError(testName, function, args, startTime, "", "RemoveBucket failed", err)
7117 return
7118 }
7119
7120 os.Remove(fileName)
7121 os.Remove(fileName + "-f")
7122 successLogger(testName, functionAll, args, startTime).Info()
7123}
7124
7125// Test for validating GetObject Reader* methods functioning when the
7126// object is modified in the object store.
7127func testGetObjectModified() {
7128 // initialize logging params
7129 startTime := time.Now()
7130 testName := getFuncName()
7131 function := "GetObject(bucketName, objectName)"
7132 args := map[string]interface{}{}
7133
7134 // Instantiate new minio client object.
7135 c, err := minio.New(os.Getenv(serverEndpoint),
7136 &minio.Options{
7137 Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
7138 Secure: mustParseBool(os.Getenv(enableHTTPS)),
7139 })
7140 if err != nil {
7141 logError(testName, function, args, startTime, "", "MinIO client object creation failed", err)
7142 return
7143 }
7144
7145 // Enable tracing, write to stderr.
7146 // c.TraceOn(os.Stderr)
7147
7148 // Set user agent.
7149 c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
7150
7151 // Make a new bucket.
7152 bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
7153 args["bucketName"] = bucketName
7154
7155 err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"})
7156 if err != nil {
7157 logError(testName, function, args, startTime, "", "MakeBucket failed", err)
7158 return
7159 }
7160
7161 defer cleanupBucket(bucketName, c)
7162
7163 // Upload an object.
7164 objectName := "myobject"
7165 args["objectName"] = objectName
7166 content := "helloworld"
7167 _, err = c.PutObject(context.Background(), bucketName, objectName, strings.NewReader(content), int64(len(content)), minio.PutObjectOptions{ContentType: "application/text"})
7168 if err != nil {
7169 logError(testName, function, args, startTime, "", "Failed to upload "+objectName+", to bucket "+bucketName, err)
7170 return
7171 }
7172
7173 defer c.RemoveObject(context.Background(), bucketName, objectName, minio.RemoveObjectOptions{})
7174
7175 reader, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{})
7176 if err != nil {
7177 logError(testName, function, args, startTime, "", "Failed to GetObject "+objectName+", from bucket "+bucketName, err)
7178 return
7179 }
7180 defer reader.Close()
7181
7182 // Read a few bytes of the object.
7183 b := make([]byte, 5)
7184 n, err := reader.ReadAt(b, 0)
7185 if err != nil {
7186 logError(testName, function, args, startTime, "", "Failed to read object "+objectName+", from bucket "+bucketName+" at an offset", err)
7187 return
7188 }
7189
7190 // Upload different contents to the same object while object is being read.
7191 newContent := "goodbyeworld"
7192 _, err = c.PutObject(context.Background(), bucketName, objectName, strings.NewReader(newContent), int64(len(newContent)), minio.PutObjectOptions{ContentType: "application/text"})
7193 if err != nil {
7194 logError(testName, function, args, startTime, "", "Failed to upload "+objectName+", to bucket "+bucketName, err)
7195 return
7196 }
7197
7198 // Confirm that a Stat() call in between doesn't change the Object's cached etag.
7199 _, err = reader.Stat()
7200 expectedError := "At least one of the pre-conditions you specified did not hold"
7201 if err.Error() != expectedError {
7202 logError(testName, function, args, startTime, "", "Expected Stat to fail with error "+expectedError+", but received "+err.Error(), err)
7203 return
7204 }
7205
7206 // Read again only to find object contents have been modified since last read.
7207 _, err = reader.ReadAt(b, int64(n))
7208 if err.Error() != expectedError {
7209 logError(testName, function, args, startTime, "", "Expected ReadAt to fail with error "+expectedError+", but received "+err.Error(), err)
7210 return
7211 }
7212
7213 successLogger(testName, function, args, startTime).Info()
7214}
7215
7216// Test validates putObject to upload a file seeked at a given offset.
7217func testPutObjectUploadSeekedObject() {
7218 // initialize logging params
7219 startTime := time.Now()
7220 testName := getFuncName()
7221 function := "PutObject(bucketName, objectName, fileToUpload, contentType)"
7222 args := map[string]interface{}{
7223 "bucketName": "",
7224 "objectName": "",
7225 "fileToUpload": "",
7226 "contentType": "binary/octet-stream",
7227 }
7228
7229 // Instantiate new minio client object.
7230 c, err := minio.New(os.Getenv(serverEndpoint),
7231 &minio.Options{
7232 Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
7233 Secure: mustParseBool(os.Getenv(enableHTTPS)),
7234 })
7235 if err != nil {
7236 logError(testName, function, args, startTime, "", "MinIO client object creation failed", err)
7237 return
7238 }
7239
7240 // Enable tracing, write to stderr.
7241 // c.TraceOn(os.Stderr)
7242
7243 // Set user agent.
7244 c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
7245
7246 // Make a new bucket.
7247 bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
7248 args["bucketName"] = bucketName
7249
7250 err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"})
7251 if err != nil {
7252 logError(testName, function, args, startTime, "", "MakeBucket failed", err)
7253 return
7254 }
7255 defer cleanupBucket(bucketName, c)
7256
7257 var tempfile *os.File
7258
7259 if fileName := getMintDataDirFilePath("datafile-100-kB"); fileName != "" {
7260 tempfile, err = os.Open(fileName)
7261 if err != nil {
7262 logError(testName, function, args, startTime, "", "File open failed", err)
7263 return
7264 }
7265 args["fileToUpload"] = fileName
7266 } else {
7267 tempfile, err = os.CreateTemp("", "minio-go-upload-test-")
7268 if err != nil {
7269 logError(testName, function, args, startTime, "", "TempFile create failed", err)
7270 return
7271 }
7272 args["fileToUpload"] = tempfile.Name()
7273
7274 // Generate 100kB data
7275 if _, err = io.Copy(tempfile, getDataReader("datafile-100-kB")); err != nil {
7276 logError(testName, function, args, startTime, "", "File copy failed", err)
7277 return
7278 }
7279
7280 defer os.Remove(tempfile.Name())
7281
7282 // Seek back to the beginning of the file.
7283 tempfile.Seek(0, 0)
7284 }
7285 length := 100 * humanize.KiByte
7286 objectName := fmt.Sprintf("test-file-%v", rand.Uint32())
7287 args["objectName"] = objectName
7288
7289 offset := length / 2
7290 if _, err = tempfile.Seek(int64(offset), 0); err != nil {
7291 logError(testName, function, args, startTime, "", "TempFile seek failed", err)
7292 return
7293 }
7294
7295 _, err = c.PutObject(context.Background(), bucketName, objectName, tempfile, int64(length-offset), minio.PutObjectOptions{ContentType: "binary/octet-stream"})
7296 if err != nil {
7297 logError(testName, function, args, startTime, "", "PutObject failed", err)
7298 return
7299 }
7300 tempfile.Close()
7301
7302 obj, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{})
7303 if err != nil {
7304 logError(testName, function, args, startTime, "", "GetObject failed", err)
7305 return
7306 }
7307 defer obj.Close()
7308
7309 n, err := obj.Seek(int64(offset), 0)
7310 if err != nil {
7311 logError(testName, function, args, startTime, "", "Seek failed", err)
7312 return
7313 }
7314 if n != int64(offset) {
7315 logError(testName, function, args, startTime, "", fmt.Sprintf("Invalid offset returned, expected %d got %d", int64(offset), n), err)
7316 return
7317 }
7318
7319 _, err = c.PutObject(context.Background(), bucketName, objectName+"getobject", obj, int64(length-offset), minio.PutObjectOptions{ContentType: "binary/octet-stream"})
7320 if err != nil {
7321 logError(testName, function, args, startTime, "", "PutObject failed", err)
7322 return
7323 }
7324 st, err := c.StatObject(context.Background(), bucketName, objectName+"getobject", minio.StatObjectOptions{})
7325 if err != nil {
7326 logError(testName, function, args, startTime, "", "StatObject failed", err)
7327 return
7328 }
7329 if st.Size != int64(length-offset) {
7330 logError(testName, function, args, startTime, "", fmt.Sprintf("Invalid offset returned, expected %d got %d", int64(length-offset), n), err)
7331 return
7332 }
7333
7334 successLogger(testName, function, args, startTime).Info()
7335}
7336
7337// Tests bucket re-create errors.
7338func testMakeBucketErrorV2() {
7339 // initialize logging params
7340 startTime := time.Now()
7341 testName := getFuncName()
7342 function := "MakeBucket(bucketName, region)"
7343 args := map[string]interface{}{
7344 "bucketName": "",
7345 "region": "eu-west-1",
7346 }
7347
7348 // Seed random based on current time.
7349 rand.Seed(time.Now().Unix())
7350
7351 // Instantiate new minio client object.
7352 c, err := minio.New(os.Getenv(serverEndpoint),
7353 &minio.Options{
7354 Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""),
7355 Secure: mustParseBool(os.Getenv(enableHTTPS)),
7356 })
7357 if err != nil {
7358 logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err)
7359 return
7360 }
7361
7362 // Enable tracing, write to stderr.
7363 // c.TraceOn(os.Stderr)
7364
7365 // Set user agent.
7366 c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
7367
7368 // Generate a new random bucket name.
7369 bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
7370 region := "eu-west-1"
7371 args["bucketName"] = bucketName
7372 args["region"] = region
7373
7374 // Make a new bucket in 'eu-west-1'.
7375 if err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: region}); err != nil {
7376 logError(testName, function, args, startTime, "", "MakeBucket failed", err)
7377 return
7378 }
7379
7380 defer cleanupBucket(bucketName, c)
7381
7382 if err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: region}); err == nil {
7383 logError(testName, function, args, startTime, "", "MakeBucket did not fail for existing bucket name", err)
7384 return
7385 }
7386 // Verify valid error response from server.
7387 if minio.ToErrorResponse(err).Code != "BucketAlreadyExists" &&
7388 minio.ToErrorResponse(err).Code != "BucketAlreadyOwnedByYou" {
7389 logError(testName, function, args, startTime, "", "Invalid error returned by server", err)
7390 return
7391 }
7392
7393 successLogger(testName, function, args, startTime).Info()
7394}
7395
7396// Test get object reader to not throw error on being closed twice.
7397func testGetObjectClosedTwiceV2() {
7398 // initialize logging params
7399 startTime := time.Now()
7400 testName := getFuncName()
7401 function := "MakeBucket(bucketName, region)"
7402 args := map[string]interface{}{
7403 "bucketName": "",
7404 "region": "eu-west-1",
7405 }
7406
7407 // Seed random based on current time.
7408 rand.Seed(time.Now().Unix())
7409
7410 // Instantiate new minio client object.
7411 c, err := minio.New(os.Getenv(serverEndpoint),
7412 &minio.Options{
7413 Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""),
7414 Secure: mustParseBool(os.Getenv(enableHTTPS)),
7415 })
7416 if err != nil {
7417 logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err)
7418 return
7419 }
7420
7421 // Enable tracing, write to stderr.
7422 // c.TraceOn(os.Stderr)
7423
7424 // Set user agent.
7425 c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
7426
7427 // Generate a new random bucket name.
7428 bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
7429 args["bucketName"] = bucketName
7430
7431 // Make a new bucket.
7432 err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"})
7433 if err != nil {
7434 logError(testName, function, args, startTime, "", "MakeBucket failed", err)
7435 return
7436 }
7437
7438 defer cleanupBucket(bucketName, c)
7439
7440 // Generate 33K of data.
7441 bufSize := dataFileMap["datafile-33-kB"]
7442 reader := getDataReader("datafile-33-kB")
7443 defer reader.Close()
7444
7445 // Save the data
7446 objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
7447 args["objectName"] = objectName
7448
7449 _, err = c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"})
7450 if err != nil {
7451 logError(testName, function, args, startTime, "", "PutObject failed", err)
7452 return
7453 }
7454
7455 // Read the data back
7456 r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{})
7457 if err != nil {
7458 logError(testName, function, args, startTime, "", "GetObject failed", err)
7459 return
7460 }
7461
7462 st, err := r.Stat()
7463 if err != nil {
7464 logError(testName, function, args, startTime, "", "Stat failed", err)
7465 return
7466 }
7467
7468 if st.Size != int64(bufSize) {
7469 logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(bufSize)+" got "+string(st.Size), err)
7470 return
7471 }
7472 if err := r.Close(); err != nil {
7473 logError(testName, function, args, startTime, "", "Stat failed", err)
7474 return
7475 }
7476 if err := r.Close(); err == nil {
7477 logError(testName, function, args, startTime, "", "Object is already closed, should return error", err)
7478 return
7479 }
7480
7481 successLogger(testName, function, args, startTime).Info()
7482}
7483
7484// Tests FPutObject hidden contentType setting
7485func testFPutObjectV2() {
7486 // initialize logging params
7487 startTime := time.Now()
7488 testName := getFuncName()
7489 function := "FPutObject(bucketName, objectName, fileName, opts)"
7490 args := map[string]interface{}{
7491 "bucketName": "",
7492 "objectName": "",
7493 "fileName": "",
7494 "opts": "",
7495 }
7496
7497 // Seed random based on current time.
7498 rand.Seed(time.Now().Unix())
7499
7500 // Instantiate new minio client object.
7501 c, err := minio.New(os.Getenv(serverEndpoint),
7502 &minio.Options{
7503 Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""),
7504 Secure: mustParseBool(os.Getenv(enableHTTPS)),
7505 })
7506 if err != nil {
7507 logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err)
7508 return
7509 }
7510
7511 // Enable tracing, write to stderr.
7512 // c.TraceOn(os.Stderr)
7513
7514 // Set user agent.
7515 c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
7516
7517 // Generate a new random bucket name.
7518 bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
7519 args["bucketName"] = bucketName
7520
7521 // Make a new bucket.
7522 err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"})
7523 if err != nil {
7524 logError(testName, function, args, startTime, "", "MakeBucket failed", err)
7525 return
7526 }
7527
7528 defer cleanupBucket(bucketName, c)
7529
7530 // Make a temp file with 11*1024*1024 bytes of data.
7531 file, err := os.CreateTemp(os.TempDir(), "FPutObjectTest")
7532 if err != nil {
7533 logError(testName, function, args, startTime, "", "TempFile creation failed", err)
7534 return
7535 }
7536
7537 r := bytes.NewReader(bytes.Repeat([]byte("b"), 11*1024*1024))
7538 n, err := io.CopyN(file, r, 11*1024*1024)
7539 if err != nil {
7540 logError(testName, function, args, startTime, "", "Copy failed", err)
7541 return
7542 }
7543 if n != int64(11*1024*1024) {
7544 logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(11*1024*1024))+" got "+string(n), err)
7545 return
7546 }
7547
7548 // Close the file pro-actively for windows.
7549 err = file.Close()
7550 if err != nil {
7551 logError(testName, function, args, startTime, "", "File close failed", err)
7552 return
7553 }
7554
7555 // Set base object name
7556 objectName := bucketName + "FPutObject"
7557 args["objectName"] = objectName
7558 args["fileName"] = file.Name()
7559
7560 // Perform standard FPutObject with contentType provided (Expecting application/octet-stream)
7561 _, err = c.FPutObject(context.Background(), bucketName, objectName+"-standard", file.Name(), minio.PutObjectOptions{ContentType: "application/octet-stream"})
7562 if err != nil {
7563 logError(testName, function, args, startTime, "", "FPutObject failed", err)
7564 return
7565 }
7566
7567 // Perform FPutObject with no contentType provided (Expecting application/octet-stream)
7568 args["objectName"] = objectName + "-Octet"
7569 args["contentType"] = ""
7570
7571 _, err = c.FPutObject(context.Background(), bucketName, objectName+"-Octet", file.Name(), minio.PutObjectOptions{})
7572 if err != nil {
7573 logError(testName, function, args, startTime, "", "FPutObject failed", err)
7574 return
7575 }
7576
7577 // Add extension to temp file name
7578 fileName := file.Name()
7579 err = os.Rename(fileName, fileName+".gtar")
7580 if err != nil {
7581 logError(testName, function, args, startTime, "", "Rename failed", err)
7582 return
7583 }
7584
7585 // Perform FPutObject with no contentType provided (Expecting application/x-gtar)
7586 args["objectName"] = objectName + "-Octet"
7587 args["contentType"] = ""
7588 args["fileName"] = fileName + ".gtar"
7589
7590 _, err = c.FPutObject(context.Background(), bucketName, objectName+"-GTar", fileName+".gtar", minio.PutObjectOptions{})
7591 if err != nil {
7592 logError(testName, function, args, startTime, "", "FPutObject failed", err)
7593 return
7594 }
7595
7596 // Check headers and sizes
7597 rStandard, err := c.StatObject(context.Background(), bucketName, objectName+"-standard", minio.StatObjectOptions{})
7598 if err != nil {
7599 logError(testName, function, args, startTime, "", "StatObject failed", err)
7600 return
7601 }
7602
7603 if rStandard.Size != 11*1024*1024 {
7604 logError(testName, function, args, startTime, "", "Unexpected size", nil)
7605 return
7606 }
7607
7608 if rStandard.ContentType != "application/octet-stream" {
7609 logError(testName, function, args, startTime, "", "Content-Type headers mismatched, expected: application/octet-stream , got "+rStandard.ContentType, err)
7610 return
7611 }
7612
7613 rOctet, err := c.StatObject(context.Background(), bucketName, objectName+"-Octet", minio.StatObjectOptions{})
7614 if err != nil {
7615 logError(testName, function, args, startTime, "", "StatObject failed", err)
7616 return
7617 }
7618 if rOctet.ContentType != "application/octet-stream" {
7619 logError(testName, function, args, startTime, "", "Content-Type headers mismatched, expected: application/octet-stream , got "+rOctet.ContentType, err)
7620 return
7621 }
7622
7623 if rOctet.Size != 11*1024*1024 {
7624 logError(testName, function, args, startTime, "", "Unexpected size", nil)
7625 return
7626 }
7627
7628 rGTar, err := c.StatObject(context.Background(), bucketName, objectName+"-GTar", minio.StatObjectOptions{})
7629 if err != nil {
7630 logError(testName, function, args, startTime, "", "StatObject failed", err)
7631 return
7632 }
7633 if rGTar.Size != 11*1024*1024 {
7634 logError(testName, function, args, startTime, "", "Unexpected size", nil)
7635 return
7636 }
7637 if rGTar.ContentType != "application/x-gtar" && rGTar.ContentType != "application/octet-stream" && rGTar.ContentType != "application/x-tar" {
7638 logError(testName, function, args, startTime, "", "Content-Type headers mismatched, expected: application/x-tar , got "+rGTar.ContentType, err)
7639 return
7640 }
7641
7642 os.Remove(fileName + ".gtar")
7643 successLogger(testName, function, args, startTime).Info()
7644}
7645
7646// Tests various bucket supported formats.
7647func testMakeBucketRegionsV2() {
7648 // initialize logging params
7649 startTime := time.Now()
7650 testName := getFuncName()
7651 function := "MakeBucket(bucketName, region)"
7652 args := map[string]interface{}{
7653 "bucketName": "",
7654 "region": "eu-west-1",
7655 }
7656
7657 // Seed random based on current time.
7658 rand.Seed(time.Now().Unix())
7659
7660 // Instantiate new minio client object.
7661 c, err := minio.New(os.Getenv(serverEndpoint),
7662 &minio.Options{
7663 Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""),
7664 Secure: mustParseBool(os.Getenv(enableHTTPS)),
7665 })
7666 if err != nil {
7667 logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err)
7668 return
7669 }
7670
7671 // Enable tracing, write to stderr.
7672 // c.TraceOn(os.Stderr)
7673
7674 // Set user agent.
7675 c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
7676
7677 // Generate a new random bucket name.
7678 bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
7679 args["bucketName"] = bucketName
7680
7681 // Make a new bucket in 'eu-central-1'.
7682 if err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "eu-west-1"}); err != nil {
7683 logError(testName, function, args, startTime, "", "MakeBucket failed", err)
7684 return
7685 }
7686
7687 if err = cleanupBucket(bucketName, c); err != nil {
7688 logError(testName, function, args, startTime, "", "CleanupBucket failed while removing bucket recursively", err)
7689 return
7690 }
7691
7692 // Make a new bucket with '.' in its name, in 'us-west-2'. This
7693 // request is internally staged into a path style instead of
7694 // virtual host style.
7695 if err = c.MakeBucket(context.Background(), bucketName+".withperiod", minio.MakeBucketOptions{Region: "us-west-2"}); err != nil {
7696 args["bucketName"] = bucketName + ".withperiod"
7697 args["region"] = "us-west-2"
7698 logError(testName, function, args, startTime, "", "MakeBucket test with a bucket name with period, '.', failed", err)
7699 return
7700 }
7701
7702 // Delete all objects and buckets
7703 if err = cleanupBucket(bucketName+".withperiod", c); err != nil {
7704 logError(testName, function, args, startTime, "", "CleanupBucket failed while removing bucket recursively", err)
7705 return
7706 }
7707
7708 successLogger(testName, function, args, startTime).Info()
7709}
7710
7711// Tests get object ReaderSeeker interface methods.
7712func testGetObjectReadSeekFunctionalV2() {
7713 // initialize logging params
7714 startTime := time.Now()
7715 testName := getFuncName()
7716 function := "GetObject(bucketName, objectName)"
7717 args := map[string]interface{}{}
7718
7719 // Seed random based on current time.
7720 rand.Seed(time.Now().Unix())
7721
7722 // Instantiate new minio client object.
7723 c, err := minio.New(os.Getenv(serverEndpoint),
7724 &minio.Options{
7725 Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""),
7726 Secure: mustParseBool(os.Getenv(enableHTTPS)),
7727 })
7728 if err != nil {
7729 logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err)
7730 return
7731 }
7732
7733 // Enable tracing, write to stderr.
7734 // c.TraceOn(os.Stderr)
7735
7736 // Set user agent.
7737 c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
7738
7739 // Generate a new random bucket name.
7740 bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
7741 args["bucketName"] = bucketName
7742
7743 // Make a new bucket.
7744 err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"})
7745 if err != nil {
7746 logError(testName, function, args, startTime, "", "MakeBucket failed", err)
7747 return
7748 }
7749
7750 defer cleanupBucket(bucketName, c)
7751
7752 // Generate 33K of data.
7753 bufSize := dataFileMap["datafile-33-kB"]
7754 reader := getDataReader("datafile-33-kB")
7755 defer reader.Close()
7756
7757 objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
7758 args["objectName"] = objectName
7759
7760 buf, err := io.ReadAll(reader)
7761 if err != nil {
7762 logError(testName, function, args, startTime, "", "ReadAll failed", err)
7763 return
7764 }
7765
7766 // Save the data.
7767 _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"})
7768 if err != nil {
7769 logError(testName, function, args, startTime, "", "PutObject failed", err)
7770 return
7771 }
7772
7773 // Read the data back
7774 r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{})
7775 if err != nil {
7776 logError(testName, function, args, startTime, "", "GetObject failed", err)
7777 return
7778 }
7779 defer r.Close()
7780
7781 st, err := r.Stat()
7782 if err != nil {
7783 logError(testName, function, args, startTime, "", "Stat failed", err)
7784 return
7785 }
7786
7787 if st.Size != int64(bufSize) {
7788 logError(testName, function, args, startTime, "", "Number of bytes in stat does not match, expected "+string(int64(bufSize))+" got "+string(st.Size), err)
7789 return
7790 }
7791
7792 offset := int64(2048)
7793 n, err := r.Seek(offset, 0)
7794 if err != nil {
7795 logError(testName, function, args, startTime, "", "Seek failed", err)
7796 return
7797 }
7798 if n != offset {
7799 logError(testName, function, args, startTime, "", "Number of seeked bytes does not match, expected "+string(offset)+" got "+string(n), err)
7800 return
7801 }
7802 n, err = r.Seek(0, 1)
7803 if err != nil {
7804 logError(testName, function, args, startTime, "", "Seek failed", err)
7805 return
7806 }
7807 if n != offset {
7808 logError(testName, function, args, startTime, "", "Number of seeked bytes does not match, expected "+string(offset)+" got "+string(n), err)
7809 return
7810 }
7811 _, err = r.Seek(offset, 2)
7812 if err == nil {
7813 logError(testName, function, args, startTime, "", "Seek on positive offset for whence '2' should error out", err)
7814 return
7815 }
7816 n, err = r.Seek(-offset, 2)
7817 if err != nil {
7818 logError(testName, function, args, startTime, "", "Seek failed", err)
7819 return
7820 }
7821 if n != st.Size-offset {
7822 logError(testName, function, args, startTime, "", "Number of seeked bytes does not match, expected "+string(st.Size-offset)+" got "+string(n), err)
7823 return
7824 }
7825
7826 var buffer1 bytes.Buffer
7827 if _, err = io.CopyN(&buffer1, r, st.Size); err != nil {
7828 if err != io.EOF {
7829 logError(testName, function, args, startTime, "", "Copy failed", err)
7830 return
7831 }
7832 }
7833 if !bytes.Equal(buf[len(buf)-int(offset):], buffer1.Bytes()) {
7834 logError(testName, function, args, startTime, "", "Incorrect read bytes v/s original buffer", err)
7835 return
7836 }
7837
7838 // Seek again and read again.
7839 n, err = r.Seek(offset-1, 0)
7840 if err != nil {
7841 logError(testName, function, args, startTime, "", "Seek failed", err)
7842 return
7843 }
7844 if n != (offset - 1) {
7845 logError(testName, function, args, startTime, "", "Number of seeked bytes does not match, expected "+string(offset-1)+" got "+string(n), err)
7846 return
7847 }
7848
7849 var buffer2 bytes.Buffer
7850 if _, err = io.CopyN(&buffer2, r, st.Size); err != nil {
7851 if err != io.EOF {
7852 logError(testName, function, args, startTime, "", "Copy failed", err)
7853 return
7854 }
7855 }
7856 // Verify now lesser bytes.
7857 if !bytes.Equal(buf[2047:], buffer2.Bytes()) {
7858 logError(testName, function, args, startTime, "", "Incorrect read bytes v/s original buffer", err)
7859 return
7860 }
7861
7862 successLogger(testName, function, args, startTime).Info()
7863}
7864
7865// Tests get object ReaderAt interface methods.
7866func testGetObjectReadAtFunctionalV2() {
7867 // initialize logging params
7868 startTime := time.Now()
7869 testName := getFuncName()
7870 function := "GetObject(bucketName, objectName)"
7871 args := map[string]interface{}{}
7872
7873 // Seed random based on current time.
7874 rand.Seed(time.Now().Unix())
7875
7876 // Instantiate new minio client object.
7877 c, err := minio.New(os.Getenv(serverEndpoint),
7878 &minio.Options{
7879 Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""),
7880 Secure: mustParseBool(os.Getenv(enableHTTPS)),
7881 })
7882 if err != nil {
7883 logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err)
7884 return
7885 }
7886
7887 // Enable tracing, write to stderr.
7888 // c.TraceOn(os.Stderr)
7889
7890 // Set user agent.
7891 c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
7892
7893 // Generate a new random bucket name.
7894 bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
7895 args["bucketName"] = bucketName
7896
7897 // Make a new bucket.
7898 err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"})
7899 if err != nil {
7900 logError(testName, function, args, startTime, "", "MakeBucket failed", err)
7901 return
7902 }
7903
7904 defer cleanupBucket(bucketName, c)
7905
7906 // Generate 33K of data.
7907 bufSize := dataFileMap["datafile-33-kB"]
7908 reader := getDataReader("datafile-33-kB")
7909 defer reader.Close()
7910
7911 objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
7912 args["objectName"] = objectName
7913
7914 buf, err := io.ReadAll(reader)
7915 if err != nil {
7916 logError(testName, function, args, startTime, "", "ReadAll failed", err)
7917 return
7918 }
7919
7920 // Save the data
7921 _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"})
7922 if err != nil {
7923 logError(testName, function, args, startTime, "", "PutObject failed", err)
7924 return
7925 }
7926
7927 // Read the data back
7928 r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{})
7929 if err != nil {
7930 logError(testName, function, args, startTime, "", "GetObject failed", err)
7931 return
7932 }
7933 defer r.Close()
7934
7935 st, err := r.Stat()
7936 if err != nil {
7937 logError(testName, function, args, startTime, "", "Stat failed", err)
7938 return
7939 }
7940
7941 if st.Size != int64(bufSize) {
7942 logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(bufSize)+" got "+string(st.Size), err)
7943 return
7944 }
7945
7946 offset := int64(2048)
7947
7948 // Read directly
7949 buf2 := make([]byte, 512)
7950 buf3 := make([]byte, 512)
7951 buf4 := make([]byte, 512)
7952
7953 m, err := r.ReadAt(buf2, offset)
7954 if err != nil {
7955 logError(testName, function, args, startTime, "", "ReadAt failed", err)
7956 return
7957 }
7958 if m != len(buf2) {
7959 logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf2))+" got "+string(m), err)
7960 return
7961 }
7962 if !bytes.Equal(buf2, buf[offset:offset+512]) {
7963 logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err)
7964 return
7965 }
7966 offset += 512
7967 m, err = r.ReadAt(buf3, offset)
7968 if err != nil {
7969 logError(testName, function, args, startTime, "", "ReadAt failed", err)
7970 return
7971 }
7972 if m != len(buf3) {
7973 logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf3))+" got "+string(m), err)
7974 return
7975 }
7976 if !bytes.Equal(buf3, buf[offset:offset+512]) {
7977 logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err)
7978 return
7979 }
7980 offset += 512
7981 m, err = r.ReadAt(buf4, offset)
7982 if err != nil {
7983 logError(testName, function, args, startTime, "", "ReadAt failed", err)
7984 return
7985 }
7986 if m != len(buf4) {
7987 logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf4))+" got "+string(m), err)
7988 return
7989 }
7990 if !bytes.Equal(buf4, buf[offset:offset+512]) {
7991 logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err)
7992 return
7993 }
7994
7995 buf5 := make([]byte, bufSize)
7996 // Read the whole object.
7997 m, err = r.ReadAt(buf5, 0)
7998 if err != nil {
7999 if err != io.EOF {
8000 logError(testName, function, args, startTime, "", "ReadAt failed", err)
8001 return
8002 }
8003 }
8004 if m != len(buf5) {
8005 logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf5))+" got "+string(m), err)
8006 return
8007 }
8008 if !bytes.Equal(buf, buf5) {
8009 logError(testName, function, args, startTime, "", "Incorrect data read in GetObject, than what was previously uploaded", err)
8010 return
8011 }
8012
8013 buf6 := make([]byte, bufSize+1)
8014 // Read the whole object and beyond.
8015 _, err = r.ReadAt(buf6, 0)
8016 if err != nil {
8017 if err != io.EOF {
8018 logError(testName, function, args, startTime, "", "ReadAt failed", err)
8019 return
8020 }
8021 }
8022
8023 successLogger(testName, function, args, startTime).Info()
8024}
8025
8026// Tests copy object
8027func testCopyObjectV2() {
8028 // initialize logging params
8029 startTime := time.Now()
8030 testName := getFuncName()
8031 function := "CopyObject(destination, source)"
8032 args := map[string]interface{}{}
8033
8034 // Seed random based on current time.
8035 rand.Seed(time.Now().Unix())
8036
8037 // Instantiate new minio client object
8038 c, err := minio.New(os.Getenv(serverEndpoint),
8039 &minio.Options{
8040 Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""),
8041 Secure: mustParseBool(os.Getenv(enableHTTPS)),
8042 })
8043 if err != nil {
8044 logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err)
8045 return
8046 }
8047
8048 // Enable tracing, write to stderr.
8049 // c.TraceOn(os.Stderr)
8050
8051 // Set user agent.
8052 c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
8053
8054 // Generate a new random bucket name.
8055 bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
8056
8057 // Make a new bucket in 'us-east-1' (source bucket).
8058 err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"})
8059 if err != nil {
8060 logError(testName, function, args, startTime, "", "MakeBucket failed", err)
8061 return
8062 }
8063 defer cleanupBucket(bucketName, c)
8064
8065 // Make a new bucket in 'us-east-1' (destination bucket).
8066 err = c.MakeBucket(context.Background(), bucketName+"-copy", minio.MakeBucketOptions{Region: "us-east-1"})
8067 if err != nil {
8068 logError(testName, function, args, startTime, "", "MakeBucket failed", err)
8069 return
8070 }
8071 defer cleanupBucket(bucketName+"-copy", c)
8072
8073 // Generate 33K of data.
8074 bufSize := dataFileMap["datafile-33-kB"]
8075 reader := getDataReader("datafile-33-kB")
8076 defer reader.Close()
8077
8078 // Save the data
8079 objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
8080 _, err = c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"})
8081 if err != nil {
8082 logError(testName, function, args, startTime, "", "PutObject failed", err)
8083 return
8084 }
8085
8086 r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{})
8087 if err != nil {
8088 logError(testName, function, args, startTime, "", "GetObject failed", err)
8089 return
8090 }
8091 // Check the various fields of source object against destination object.
8092 objInfo, err := r.Stat()
8093 if err != nil {
8094 logError(testName, function, args, startTime, "", "Stat failed", err)
8095 return
8096 }
8097 r.Close()
8098
8099 // Copy Source
8100 src := minio.CopySrcOptions{
8101 Bucket: bucketName,
8102 Object: objectName,
8103 MatchModifiedSince: time.Date(2014, time.April, 0, 0, 0, 0, 0, time.UTC),
8104 MatchETag: objInfo.ETag,
8105 }
8106 args["source"] = src
8107
8108 // Set copy conditions.
8109 dst := minio.CopyDestOptions{
8110 Bucket: bucketName + "-copy",
8111 Object: objectName + "-copy",
8112 }
8113 args["destination"] = dst
8114
8115 // Perform the Copy
8116 _, err = c.CopyObject(context.Background(), dst, src)
8117 if err != nil {
8118 logError(testName, function, args, startTime, "", "CopyObject failed", err)
8119 return
8120 }
8121
8122 // Source object
8123 r, err = c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{})
8124 if err != nil {
8125 logError(testName, function, args, startTime, "", "GetObject failed", err)
8126 return
8127 }
8128 // Destination object
8129 readerCopy, err := c.GetObject(context.Background(), bucketName+"-copy", objectName+"-copy", minio.GetObjectOptions{})
8130 if err != nil {
8131 logError(testName, function, args, startTime, "", "GetObject failed", err)
8132 return
8133 }
8134 // Check the various fields of source object against destination object.
8135 objInfo, err = r.Stat()
8136 if err != nil {
8137 logError(testName, function, args, startTime, "", "Stat failed", err)
8138 return
8139 }
8140 objInfoCopy, err := readerCopy.Stat()
8141 if err != nil {
8142 logError(testName, function, args, startTime, "", "Stat failed", err)
8143 return
8144 }
8145 if objInfo.Size != objInfoCopy.Size {
8146 logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(objInfoCopy.Size)+" got "+string(objInfo.Size), err)
8147 return
8148 }
8149
8150 // Close all the readers.
8151 r.Close()
8152 readerCopy.Close()
8153
8154 // CopyObject again but with wrong conditions
8155 src = minio.CopySrcOptions{
8156 Bucket: bucketName,
8157 Object: objectName,
8158 MatchUnmodifiedSince: time.Date(2014, time.April, 0, 0, 0, 0, 0, time.UTC),
8159 NoMatchETag: objInfo.ETag,
8160 }
8161
8162 // Perform the Copy which should fail
8163 _, err = c.CopyObject(context.Background(), dst, src)
8164 if err == nil {
8165 logError(testName, function, args, startTime, "", "CopyObject did not fail for invalid conditions", err)
8166 return
8167 }
8168
8169 successLogger(testName, function, args, startTime).Info()
8170}
8171
8172func testComposeObjectErrorCasesWrapper(c *minio.Client) {
8173 // initialize logging params
8174 startTime := time.Now()
8175 testName := getFuncName()
8176 function := "ComposeObject(destination, sourceList)"
8177 args := map[string]interface{}{}
8178
8179 // Generate a new random bucket name.
8180 bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
8181
8182 // Make a new bucket in 'us-east-1' (source bucket).
8183 err := c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"})
8184 if err != nil {
8185 logError(testName, function, args, startTime, "", "MakeBucket failed", err)
8186 return
8187 }
8188
8189 defer cleanupBucket(bucketName, c)
8190
8191 // Test that more than 10K source objects cannot be
8192 // concatenated.
8193 srcArr := [10001]minio.CopySrcOptions{}
8194 srcSlice := srcArr[:]
8195 dst := minio.CopyDestOptions{
8196 Bucket: bucketName,
8197 Object: "object",
8198 }
8199
8200 args["destination"] = dst
8201 // Just explain about srcArr in args["sourceList"]
8202 // to stop having 10,001 null headers logged
8203 args["sourceList"] = "source array of 10,001 elements"
8204 if _, err := c.ComposeObject(context.Background(), dst, srcSlice...); err == nil {
8205 logError(testName, function, args, startTime, "", "Expected error in ComposeObject", err)
8206 return
8207 } else if err.Error() != "There must be as least one and up to 10000 source objects." {
8208 logError(testName, function, args, startTime, "", "Got unexpected error", err)
8209 return
8210 }
8211
8212 // Create a source with invalid offset spec and check that
8213 // error is returned:
8214 // 1. Create the source object.
8215 const badSrcSize = 5 * 1024 * 1024
8216 buf := bytes.Repeat([]byte("1"), badSrcSize)
8217 _, err = c.PutObject(context.Background(), bucketName, "badObject", bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{})
8218 if err != nil {
8219 logError(testName, function, args, startTime, "", "PutObject failed", err)
8220 return
8221 }
8222 // 2. Set invalid range spec on the object (going beyond
8223 // object size)
8224 badSrc := minio.CopySrcOptions{
8225 Bucket: bucketName,
8226 Object: "badObject",
8227 MatchRange: true,
8228 Start: 1,
8229 End: badSrcSize,
8230 }
8231
8232 // 3. ComposeObject call should fail.
8233 if _, err := c.ComposeObject(context.Background(), dst, badSrc); err == nil {
8234 logError(testName, function, args, startTime, "", "ComposeObject expected to fail", err)
8235 return
8236 } else if !strings.Contains(err.Error(), "has invalid segment-to-copy") {
8237 logError(testName, function, args, startTime, "", "Got invalid error", err)
8238 return
8239 }
8240
8241 successLogger(testName, function, args, startTime).Info()
8242}
8243
8244// Test expected error cases
8245func testComposeObjectErrorCasesV2() {
8246 // initialize logging params
8247 startTime := time.Now()
8248 testName := getFuncName()
8249 function := "ComposeObject(destination, sourceList)"
8250 args := map[string]interface{}{}
8251
8252 // Instantiate new minio client object
8253 c, err := minio.New(os.Getenv(serverEndpoint),
8254 &minio.Options{
8255 Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""),
8256 Secure: mustParseBool(os.Getenv(enableHTTPS)),
8257 })
8258 if err != nil {
8259 logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err)
8260 return
8261 }
8262
8263 testComposeObjectErrorCasesWrapper(c)
8264}
8265
8266func testComposeMultipleSources(c *minio.Client) {
8267 // initialize logging params
8268 startTime := time.Now()
8269 testName := getFuncName()
8270 function := "ComposeObject(destination, sourceList)"
8271 args := map[string]interface{}{
8272 "destination": "",
8273 "sourceList": "",
8274 }
8275
8276 // Generate a new random bucket name.
8277 bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
8278 // Make a new bucket in 'us-east-1' (source bucket).
8279 err := c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"})
8280 if err != nil {
8281 logError(testName, function, args, startTime, "", "MakeBucket failed", err)
8282 return
8283 }
8284
8285 defer cleanupBucket(bucketName, c)
8286
8287 // Upload a small source object
8288 const srcSize = 1024 * 1024 * 5
8289 buf := bytes.Repeat([]byte("1"), srcSize)
8290 _, err = c.PutObject(context.Background(), bucketName, "srcObject", bytes.NewReader(buf), int64(srcSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"})
8291 if err != nil {
8292 logError(testName, function, args, startTime, "", "PutObject failed", err)
8293 return
8294 }
8295
8296 // We will append 10 copies of the object.
8297 srcs := []minio.CopySrcOptions{}
8298 for i := 0; i < 10; i++ {
8299 srcs = append(srcs, minio.CopySrcOptions{
8300 Bucket: bucketName,
8301 Object: "srcObject",
8302 })
8303 }
8304
8305 // make the last part very small
8306 srcs[9].MatchRange = true
8307
8308 args["sourceList"] = srcs
8309
8310 dst := minio.CopyDestOptions{
8311 Bucket: bucketName,
8312 Object: "dstObject",
8313 }
8314 args["destination"] = dst
8315
8316 ui, err := c.ComposeObject(context.Background(), dst, srcs...)
8317 if err != nil {
8318 logError(testName, function, args, startTime, "", "ComposeObject failed", err)
8319 return
8320 }
8321
8322 if ui.Size != 9*srcSize+1 {
8323 logError(testName, function, args, startTime, "", "ComposeObject returned unexpected size", err)
8324 return
8325 }
8326
8327 objProps, err := c.StatObject(context.Background(), bucketName, "dstObject", minio.StatObjectOptions{})
8328 if err != nil {
8329 logError(testName, function, args, startTime, "", "StatObject failed", err)
8330 return
8331 }
8332
8333 if objProps.Size != 9*srcSize+1 {
8334 logError(testName, function, args, startTime, "", "Size mismatched! Expected "+string(10000*srcSize)+" got "+string(objProps.Size), err)
8335 return
8336 }
8337
8338 successLogger(testName, function, args, startTime).Info()
8339}
8340
8341// Test concatenating multiple 10K objects V2
8342func testCompose10KSourcesV2() {
8343 // initialize logging params
8344 startTime := time.Now()
8345 testName := getFuncName()
8346 function := "ComposeObject(destination, sourceList)"
8347 args := map[string]interface{}{}
8348
8349 // Instantiate new minio client object
8350 c, err := minio.New(os.Getenv(serverEndpoint),
8351 &minio.Options{
8352 Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""),
8353 Secure: mustParseBool(os.Getenv(enableHTTPS)),
8354 })
8355 if err != nil {
8356 logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err)
8357 return
8358 }
8359
8360 testComposeMultipleSources(c)
8361}
8362
8363func testEncryptedEmptyObject() {
8364 // initialize logging params
8365 startTime := time.Now()
8366 testName := getFuncName()
8367 function := "PutObject(bucketName, objectName, reader, objectSize, opts)"
8368 args := map[string]interface{}{}
8369
8370 // Instantiate new minio client object
8371 c, err := minio.New(os.Getenv(serverEndpoint),
8372 &minio.Options{
8373 Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
8374 Secure: mustParseBool(os.Getenv(enableHTTPS)),
8375 })
8376 if err != nil {
8377 logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err)
8378 return
8379 }
8380
8381 // Generate a new random bucket name.
8382 bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
8383 args["bucketName"] = bucketName
8384 // Make a new bucket in 'us-east-1' (source bucket).
8385 err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"})
8386 if err != nil {
8387 logError(testName, function, args, startTime, "", "MakeBucket failed", err)
8388 return
8389 }
8390
8391 defer cleanupBucket(bucketName, c)
8392
8393 sse := encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+"object"))
8394
8395 // 1. create an sse-c encrypted object to copy by uploading
8396 const srcSize = 0
8397 var buf []byte // Empty buffer
8398 args["objectName"] = "object"
8399 _, err = c.PutObject(context.Background(), bucketName, "object", bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ServerSideEncryption: sse})
8400 if err != nil {
8401 logError(testName, function, args, startTime, "", "PutObject call failed", err)
8402 return
8403 }
8404
8405 // 2. Test CopyObject for an empty object
8406 src := minio.CopySrcOptions{
8407 Bucket: bucketName,
8408 Object: "object",
8409 Encryption: sse,
8410 }
8411
8412 dst := minio.CopyDestOptions{
8413 Bucket: bucketName,
8414 Object: "new-object",
8415 Encryption: sse,
8416 }
8417
8418 if _, err = c.CopyObject(context.Background(), dst, src); err != nil {
8419 function = "CopyObject(dst, src)"
8420 logError(testName, function, map[string]interface{}{}, startTime, "", "CopyObject failed", err)
8421 return
8422 }
8423
8424 // 3. Test Key rotation
8425 newSSE := encrypt.DefaultPBKDF([]byte("Don't Panic"), []byte(bucketName+"new-object"))
8426 src = minio.CopySrcOptions{
8427 Bucket: bucketName,
8428 Object: "new-object",
8429 Encryption: sse,
8430 }
8431
8432 dst = minio.CopyDestOptions{
8433 Bucket: bucketName,
8434 Object: "new-object",
8435 Encryption: newSSE,
8436 }
8437
8438 if _, err = c.CopyObject(context.Background(), dst, src); err != nil {
8439 function = "CopyObject(dst, src)"
8440 logError(testName, function, map[string]interface{}{}, startTime, "", "CopyObject with key rotation failed", err)
8441 return
8442 }
8443
8444 // 4. Download the object.
8445 reader, err := c.GetObject(context.Background(), bucketName, "new-object", minio.GetObjectOptions{ServerSideEncryption: newSSE})
8446 if err != nil {
8447 logError(testName, function, args, startTime, "", "GetObject failed", err)
8448 return
8449 }
8450 defer reader.Close()
8451
8452 decBytes, err := io.ReadAll(reader)
8453 if err != nil {
8454 logError(testName, function, map[string]interface{}{}, startTime, "", "ReadAll failed", err)
8455 return
8456 }
8457 if !bytes.Equal(decBytes, buf) {
8458 logError(testName, function, map[string]interface{}{}, startTime, "", "Downloaded object doesn't match the empty encrypted object", err)
8459 return
8460 }
8461
8462 delete(args, "objectName")
8463 successLogger(testName, function, args, startTime).Info()
8464}
8465
8466func testEncryptedCopyObjectWrapper(c *minio.Client, bucketName string, sseSrc, sseDst encrypt.ServerSide) {
8467 // initialize logging params
8468 startTime := time.Now()
8469 testName := getFuncNameLoc(2)
8470 function := "CopyObject(destination, source)"
8471 args := map[string]interface{}{}
8472 var srcEncryption, dstEncryption encrypt.ServerSide
8473
8474 // Make a new bucket in 'us-east-1' (source bucket).
8475 err := c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"})
8476 if err != nil {
8477 logError(testName, function, args, startTime, "", "MakeBucket failed", err)
8478 return
8479 }
8480
8481 defer cleanupBucket(bucketName, c)
8482
8483 // 1. create an sse-c encrypted object to copy by uploading
8484 const srcSize = 1024 * 1024
8485 buf := bytes.Repeat([]byte("abcde"), srcSize) // gives a buffer of 5MiB
8486 _, err = c.PutObject(context.Background(), bucketName, "srcObject", bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{
8487 ServerSideEncryption: sseSrc,
8488 })
8489 if err != nil {
8490 logError(testName, function, args, startTime, "", "PutObject call failed", err)
8491 return
8492 }
8493
8494 if sseSrc != nil && sseSrc.Type() != encrypt.S3 {
8495 srcEncryption = sseSrc
8496 }
8497
8498 // 2. copy object and change encryption key
8499 src := minio.CopySrcOptions{
8500 Bucket: bucketName,
8501 Object: "srcObject",
8502 Encryption: srcEncryption,
8503 }
8504 args["source"] = src
8505
8506 dst := minio.CopyDestOptions{
8507 Bucket: bucketName,
8508 Object: "dstObject",
8509 Encryption: sseDst,
8510 }
8511 args["destination"] = dst
8512
8513 _, err = c.CopyObject(context.Background(), dst, src)
8514 if err != nil {
8515 logError(testName, function, args, startTime, "", "CopyObject failed", err)
8516 return
8517 }
8518
8519 if sseDst != nil && sseDst.Type() != encrypt.S3 {
8520 dstEncryption = sseDst
8521 }
8522 // 3. get copied object and check if content is equal
8523 coreClient := minio.Core{c}
8524 reader, _, _, err := coreClient.GetObject(context.Background(), bucketName, "dstObject", minio.GetObjectOptions{ServerSideEncryption: dstEncryption})
8525 if err != nil {
8526 logError(testName, function, args, startTime, "", "GetObject failed", err)
8527 return
8528 }
8529
8530 decBytes, err := io.ReadAll(reader)
8531 if err != nil {
8532 logError(testName, function, args, startTime, "", "ReadAll failed", err)
8533 return
8534 }
8535 if !bytes.Equal(decBytes, buf) {
8536 logError(testName, function, args, startTime, "", "Downloaded object mismatched for encrypted object", err)
8537 return
8538 }
8539 reader.Close()
8540
8541 // Test key rotation for source object in-place.
8542 var newSSE encrypt.ServerSide
8543 if sseSrc != nil && sseSrc.Type() == encrypt.SSEC {
8544 newSSE = encrypt.DefaultPBKDF([]byte("Don't Panic"), []byte(bucketName+"srcObject")) // replace key
8545 }
8546 if sseSrc != nil && sseSrc.Type() == encrypt.S3 {
8547 newSSE = encrypt.NewSSE()
8548 }
8549 if newSSE != nil {
8550 dst = minio.CopyDestOptions{
8551 Bucket: bucketName,
8552 Object: "srcObject",
8553 Encryption: newSSE,
8554 }
8555 args["destination"] = dst
8556
8557 _, err = c.CopyObject(context.Background(), dst, src)
8558 if err != nil {
8559 logError(testName, function, args, startTime, "", "CopyObject failed", err)
8560 return
8561 }
8562
8563 // Get copied object and check if content is equal
8564 reader, _, _, err = coreClient.GetObject(context.Background(), bucketName, "srcObject", minio.GetObjectOptions{ServerSideEncryption: newSSE})
8565 if err != nil {
8566 logError(testName, function, args, startTime, "", "GetObject failed", err)
8567 return
8568 }
8569
8570 decBytes, err = io.ReadAll(reader)
8571 if err != nil {
8572 logError(testName, function, args, startTime, "", "ReadAll failed", err)
8573 return
8574 }
8575 if !bytes.Equal(decBytes, buf) {
8576 logError(testName, function, args, startTime, "", "Downloaded object mismatched for encrypted object", err)
8577 return
8578 }
8579 reader.Close()
8580
8581 // Test in-place decryption.
8582 dst = minio.CopyDestOptions{
8583 Bucket: bucketName,
8584 Object: "srcObject",
8585 }
8586 args["destination"] = dst
8587
8588 src = minio.CopySrcOptions{
8589 Bucket: bucketName,
8590 Object: "srcObject",
8591 Encryption: newSSE,
8592 }
8593 args["source"] = src
8594 _, err = c.CopyObject(context.Background(), dst, src)
8595 if err != nil {
8596 logError(testName, function, args, startTime, "", "CopyObject Key rotation failed", err)
8597 return
8598 }
8599 }
8600
8601 // Get copied decrypted object and check if content is equal
8602 reader, _, _, err = coreClient.GetObject(context.Background(), bucketName, "srcObject", minio.GetObjectOptions{})
8603 if err != nil {
8604 logError(testName, function, args, startTime, "", "GetObject failed", err)
8605 return
8606 }
8607 defer reader.Close()
8608
8609 decBytes, err = io.ReadAll(reader)
8610 if err != nil {
8611 logError(testName, function, args, startTime, "", "ReadAll failed", err)
8612 return
8613 }
8614 if !bytes.Equal(decBytes, buf) {
8615 logError(testName, function, args, startTime, "", "Downloaded object mismatched for encrypted object", err)
8616 return
8617 }
8618
8619 successLogger(testName, function, args, startTime).Info()
8620}
8621
8622// Test encrypted copy object
8623func testUnencryptedToSSECCopyObject() {
8624 // initialize logging params
8625 startTime := time.Now()
8626 testName := getFuncName()
8627 function := "CopyObject(destination, source)"
8628 args := map[string]interface{}{}
8629
8630 // Instantiate new minio client object
8631 c, err := minio.New(os.Getenv(serverEndpoint),
8632 &minio.Options{
8633 Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
8634 Secure: mustParseBool(os.Getenv(enableHTTPS)),
8635 })
8636 if err != nil {
8637 logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err)
8638 return
8639 }
8640 // Generate a new random bucket name.
8641 bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
8642
8643 sseDst := encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+"dstObject"))
8644 // c.TraceOn(os.Stderr)
8645 testEncryptedCopyObjectWrapper(c, bucketName, nil, sseDst)
8646}
8647
8648// Test encrypted copy object
8649func testUnencryptedToSSES3CopyObject() {
8650 // initialize logging params
8651 startTime := time.Now()
8652 testName := getFuncName()
8653 function := "CopyObject(destination, source)"
8654 args := map[string]interface{}{}
8655
8656 // Instantiate new minio client object
8657 c, err := minio.New(os.Getenv(serverEndpoint),
8658 &minio.Options{
8659 Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
8660 Secure: mustParseBool(os.Getenv(enableHTTPS)),
8661 })
8662 if err != nil {
8663 logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err)
8664 return
8665 }
8666 // Generate a new random bucket name.
8667 bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
8668
8669 var sseSrc encrypt.ServerSide
8670 sseDst := encrypt.NewSSE()
8671 // c.TraceOn(os.Stderr)
8672 testEncryptedCopyObjectWrapper(c, bucketName, sseSrc, sseDst)
8673}
8674
8675// Test encrypted copy object
8676func testUnencryptedToUnencryptedCopyObject() {
8677 // initialize logging params
8678 startTime := time.Now()
8679 testName := getFuncName()
8680 function := "CopyObject(destination, source)"
8681 args := map[string]interface{}{}
8682
8683 // Instantiate new minio client object
8684 c, err := minio.New(os.Getenv(serverEndpoint),
8685 &minio.Options{
8686 Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
8687 Secure: mustParseBool(os.Getenv(enableHTTPS)),
8688 })
8689 if err != nil {
8690 logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err)
8691 return
8692 }
8693 // Generate a new random bucket name.
8694 bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
8695
8696 var sseSrc, sseDst encrypt.ServerSide
8697 // c.TraceOn(os.Stderr)
8698 testEncryptedCopyObjectWrapper(c, bucketName, sseSrc, sseDst)
8699}
8700
8701// Test encrypted copy object
8702func testEncryptedSSECToSSECCopyObject() {
8703 // initialize logging params
8704 startTime := time.Now()
8705 testName := getFuncName()
8706 function := "CopyObject(destination, source)"
8707 args := map[string]interface{}{}
8708
8709 // Instantiate new minio client object
8710 c, err := minio.New(os.Getenv(serverEndpoint),
8711 &minio.Options{
8712 Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
8713 Secure: mustParseBool(os.Getenv(enableHTTPS)),
8714 })
8715 if err != nil {
8716 logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err)
8717 return
8718 }
8719 // Generate a new random bucket name.
8720 bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
8721
8722 sseSrc := encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+"srcObject"))
8723 sseDst := encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+"dstObject"))
8724 // c.TraceOn(os.Stderr)
8725 testEncryptedCopyObjectWrapper(c, bucketName, sseSrc, sseDst)
8726}
8727
8728// Test encrypted copy object
8729func testEncryptedSSECToSSES3CopyObject() {
8730 // initialize logging params
8731 startTime := time.Now()
8732 testName := getFuncName()
8733 function := "CopyObject(destination, source)"
8734 args := map[string]interface{}{}
8735
8736 // Instantiate new minio client object
8737 c, err := minio.New(os.Getenv(serverEndpoint),
8738 &minio.Options{
8739 Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
8740 Secure: mustParseBool(os.Getenv(enableHTTPS)),
8741 })
8742 if err != nil {
8743 logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err)
8744 return
8745 }
8746 // Generate a new random bucket name.
8747 bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
8748
8749 sseSrc := encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+"srcObject"))
8750 sseDst := encrypt.NewSSE()
8751 // c.TraceOn(os.Stderr)
8752 testEncryptedCopyObjectWrapper(c, bucketName, sseSrc, sseDst)
8753}
8754
8755// Test encrypted copy object
8756func testEncryptedSSECToUnencryptedCopyObject() {
8757 // initialize logging params
8758 startTime := time.Now()
8759 testName := getFuncName()
8760 function := "CopyObject(destination, source)"
8761 args := map[string]interface{}{}
8762
8763 // Instantiate new minio client object
8764 c, err := minio.New(os.Getenv(serverEndpoint),
8765 &minio.Options{
8766 Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
8767 Secure: mustParseBool(os.Getenv(enableHTTPS)),
8768 })
8769 if err != nil {
8770 logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err)
8771 return
8772 }
8773 // Generate a new random bucket name.
8774 bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
8775
8776 sseSrc := encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+"srcObject"))
8777 var sseDst encrypt.ServerSide
8778 // c.TraceOn(os.Stderr)
8779 testEncryptedCopyObjectWrapper(c, bucketName, sseSrc, sseDst)
8780}
8781
8782// Test encrypted copy object
8783func testEncryptedSSES3ToSSECCopyObject() {
8784 // initialize logging params
8785 startTime := time.Now()
8786 testName := getFuncName()
8787 function := "CopyObject(destination, source)"
8788 args := map[string]interface{}{}
8789
8790 // Instantiate new minio client object
8791 c, err := minio.New(os.Getenv(serverEndpoint),
8792 &minio.Options{
8793 Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
8794 Secure: mustParseBool(os.Getenv(enableHTTPS)),
8795 })
8796 if err != nil {
8797 logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err)
8798 return
8799 }
8800 // Generate a new random bucket name.
8801 bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
8802
8803 sseSrc := encrypt.NewSSE()
8804 sseDst := encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+"dstObject"))
8805 // c.TraceOn(os.Stderr)
8806 testEncryptedCopyObjectWrapper(c, bucketName, sseSrc, sseDst)
8807}
8808
8809// Test encrypted copy object
8810func testEncryptedSSES3ToSSES3CopyObject() {
8811 // initialize logging params
8812 startTime := time.Now()
8813 testName := getFuncName()
8814 function := "CopyObject(destination, source)"
8815 args := map[string]interface{}{}
8816
8817 // Instantiate new minio client object
8818 c, err := minio.New(os.Getenv(serverEndpoint),
8819 &minio.Options{
8820 Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
8821 Secure: mustParseBool(os.Getenv(enableHTTPS)),
8822 })
8823 if err != nil {
8824 logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err)
8825 return
8826 }
8827 // Generate a new random bucket name.
8828 bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
8829
8830 sseSrc := encrypt.NewSSE()
8831 sseDst := encrypt.NewSSE()
8832 // c.TraceOn(os.Stderr)
8833 testEncryptedCopyObjectWrapper(c, bucketName, sseSrc, sseDst)
8834}
8835
8836// Test encrypted copy object
8837func testEncryptedSSES3ToUnencryptedCopyObject() {
8838 // initialize logging params
8839 startTime := time.Now()
8840 testName := getFuncName()
8841 function := "CopyObject(destination, source)"
8842 args := map[string]interface{}{}
8843
8844 // Instantiate new minio client object
8845 c, err := minio.New(os.Getenv(serverEndpoint),
8846 &minio.Options{
8847 Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
8848 Secure: mustParseBool(os.Getenv(enableHTTPS)),
8849 })
8850 if err != nil {
8851 logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err)
8852 return
8853 }
8854 // Generate a new random bucket name.
8855 bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
8856
8857 sseSrc := encrypt.NewSSE()
8858 var sseDst encrypt.ServerSide
8859 // c.TraceOn(os.Stderr)
8860 testEncryptedCopyObjectWrapper(c, bucketName, sseSrc, sseDst)
8861}
8862
8863// Test encrypted copy object
8864func testEncryptedCopyObjectV2() {
8865 // initialize logging params
8866 startTime := time.Now()
8867 testName := getFuncName()
8868 function := "CopyObject(destination, source)"
8869 args := map[string]interface{}{}
8870
8871 // Instantiate new minio client object
8872 c, err := minio.New(os.Getenv(serverEndpoint),
8873 &minio.Options{
8874 Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""),
8875 Secure: mustParseBool(os.Getenv(enableHTTPS)),
8876 })
8877 if err != nil {
8878 logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err)
8879 return
8880 }
8881 // Generate a new random bucket name.
8882 bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
8883
8884 sseSrc := encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+"srcObject"))
8885 sseDst := encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+"dstObject"))
8886 // c.TraceOn(os.Stderr)
8887 testEncryptedCopyObjectWrapper(c, bucketName, sseSrc, sseDst)
8888}
8889
8890func testDecryptedCopyObject() {
8891 // initialize logging params
8892 startTime := time.Now()
8893 testName := getFuncName()
8894 function := "CopyObject(destination, source)"
8895 args := map[string]interface{}{}
8896
8897 // Instantiate new minio client object
8898 c, err := minio.New(os.Getenv(serverEndpoint),
8899 &minio.Options{
8900 Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
8901 Secure: mustParseBool(os.Getenv(enableHTTPS)),
8902 })
8903 if err != nil {
8904 logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err)
8905 return
8906 }
8907
8908 bucketName, objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-"), "object"
8909 if err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}); err != nil {
8910 logError(testName, function, args, startTime, "", "MakeBucket failed", err)
8911 return
8912 }
8913
8914 defer cleanupBucket(bucketName, c)
8915
8916 encryption := encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+objectName))
8917 _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(bytes.Repeat([]byte("a"), 1024*1024)), 1024*1024, minio.PutObjectOptions{
8918 ServerSideEncryption: encryption,
8919 })
8920 if err != nil {
8921 logError(testName, function, args, startTime, "", "PutObject call failed", err)
8922 return
8923 }
8924
8925 src := minio.CopySrcOptions{
8926 Bucket: bucketName,
8927 Object: objectName,
8928 Encryption: encrypt.SSECopy(encryption),
8929 }
8930 args["source"] = src
8931
8932 dst := minio.CopyDestOptions{
8933 Bucket: bucketName,
8934 Object: "decrypted-" + objectName,
8935 }
8936 args["destination"] = dst
8937
8938 if _, err = c.CopyObject(context.Background(), dst, src); err != nil {
8939 logError(testName, function, args, startTime, "", "CopyObject failed", err)
8940 return
8941 }
8942 if _, err = c.GetObject(context.Background(), bucketName, "decrypted-"+objectName, minio.GetObjectOptions{}); err != nil {
8943 logError(testName, function, args, startTime, "", "GetObject failed", err)
8944 return
8945 }
8946 successLogger(testName, function, args, startTime).Info()
8947}
8948
8949func testSSECMultipartEncryptedToSSECCopyObjectPart() {
8950 // initialize logging params
8951 startTime := time.Now()
8952 testName := getFuncName()
8953 function := "CopyObjectPart(destination, source)"
8954 args := map[string]interface{}{}
8955
8956 // Instantiate new minio client object
8957 client, err := minio.New(os.Getenv(serverEndpoint),
8958 &minio.Options{
8959 Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
8960 Secure: mustParseBool(os.Getenv(enableHTTPS)),
8961 })
8962 if err != nil {
8963 logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err)
8964 return
8965 }
8966
8967 // Instantiate new core client object.
8968 c := minio.Core{client}
8969
8970 // Enable tracing, write to stderr.
8971 // c.TraceOn(os.Stderr)
8972
8973 // Set user agent.
8974 c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
8975
8976 // Generate a new random bucket name.
8977 bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
8978
8979 // Make a new bucket.
8980 err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"})
8981 if err != nil {
8982 logError(testName, function, args, startTime, "", "MakeBucket failed", err)
8983 return
8984 }
8985 defer cleanupBucket(bucketName, client)
8986 // Make a buffer with 6MB of data
8987 buf := bytes.Repeat([]byte("abcdef"), 1024*1024)
8988
8989 // Save the data
8990 objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
8991 password := "correct horse battery staple"
8992 srcencryption := encrypt.DefaultPBKDF([]byte(password), []byte(bucketName+objectName))
8993
8994 // Upload a 6MB object using multipart mechanism
8995 uploadID, err := c.NewMultipartUpload(context.Background(), bucketName, objectName, minio.PutObjectOptions{ServerSideEncryption: srcencryption})
8996 if err != nil {
8997 logError(testName, function, args, startTime, "", "NewMultipartUpload call failed", err)
8998 return
8999 }
9000
9001 var completeParts []minio.CompletePart
9002
9003 part, err := c.PutObjectPart(context.Background(), bucketName, objectName, uploadID, 1,
9004 bytes.NewReader(buf[:5*1024*1024]), 5*1024*1024,
9005 minio.PutObjectPartOptions{SSE: srcencryption},
9006 )
9007 if err != nil {
9008 logError(testName, function, args, startTime, "", "PutObjectPart call failed", err)
9009 return
9010 }
9011 completeParts = append(completeParts, minio.CompletePart{PartNumber: part.PartNumber, ETag: part.ETag})
9012
9013 part, err = c.PutObjectPart(context.Background(), bucketName, objectName, uploadID, 2,
9014 bytes.NewReader(buf[5*1024*1024:]), 1024*1024,
9015 minio.PutObjectPartOptions{SSE: srcencryption},
9016 )
9017 if err != nil {
9018 logError(testName, function, args, startTime, "", "PutObjectPart call failed", err)
9019 return
9020 }
9021 completeParts = append(completeParts, minio.CompletePart{PartNumber: part.PartNumber, ETag: part.ETag})
9022
9023 // Complete the multipart upload
9024 _, err = c.CompleteMultipartUpload(context.Background(), bucketName, objectName, uploadID, completeParts, minio.PutObjectOptions{})
9025 if err != nil {
9026 logError(testName, function, args, startTime, "", "CompleteMultipartUpload call failed", err)
9027 return
9028 }
9029
9030 // Stat the object and check its length matches
9031 objInfo, err := c.StatObject(context.Background(), bucketName, objectName, minio.StatObjectOptions{ServerSideEncryption: srcencryption})
9032 if err != nil {
9033 logError(testName, function, args, startTime, "", "StatObject call failed", err)
9034 return
9035 }
9036
9037 destBucketName := bucketName
9038 destObjectName := objectName + "-dest"
9039 dstencryption := encrypt.DefaultPBKDF([]byte(password), []byte(destBucketName+destObjectName))
9040
9041 uploadID, err = c.NewMultipartUpload(context.Background(), destBucketName, destObjectName, minio.PutObjectOptions{ServerSideEncryption: dstencryption})
9042 if err != nil {
9043 logError(testName, function, args, startTime, "", "NewMultipartUpload call failed", err)
9044 return
9045 }
9046
9047 // Content of the destination object will be two copies of
9048 // `objectName` concatenated, followed by first byte of
9049 // `objectName`.
9050 metadata := make(map[string]string)
9051 header := make(http.Header)
9052 encrypt.SSECopy(srcencryption).Marshal(header)
9053 dstencryption.Marshal(header)
9054 for k, v := range header {
9055 metadata[k] = v[0]
9056 }
9057
9058 metadata["x-amz-copy-source-if-match"] = objInfo.ETag
9059
9060 // First of three parts
9061 fstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 1, 0, -1, metadata)
9062 if err != nil {
9063 logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err)
9064 return
9065 }
9066
9067 // Second of three parts
9068 sndPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 2, 0, -1, metadata)
9069 if err != nil {
9070 logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err)
9071 return
9072 }
9073
9074 // Last of three parts
9075 lstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 3, 0, 1, metadata)
9076 if err != nil {
9077 logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err)
9078 return
9079 }
9080
9081 // Complete the multipart upload
9082 _, err = c.CompleteMultipartUpload(context.Background(), destBucketName, destObjectName, uploadID, []minio.CompletePart{fstPart, sndPart, lstPart}, minio.PutObjectOptions{})
9083 if err != nil {
9084 logError(testName, function, args, startTime, "", "CompleteMultipartUpload call failed", err)
9085 return
9086 }
9087
9088 // Stat the object and check its length matches
9089 objInfo, err = c.StatObject(context.Background(), destBucketName, destObjectName, minio.StatObjectOptions{ServerSideEncryption: dstencryption})
9090 if err != nil {
9091 logError(testName, function, args, startTime, "", "StatObject call failed", err)
9092 return
9093 }
9094
9095 if objInfo.Size != (6*1024*1024)*2+1 {
9096 logError(testName, function, args, startTime, "", "Destination object has incorrect size!", err)
9097 return
9098 }
9099
9100 // Now we read the data back
9101 getOpts := minio.GetObjectOptions{ServerSideEncryption: dstencryption}
9102 getOpts.SetRange(0, 6*1024*1024-1)
9103 r, _, _, err := c.GetObject(context.Background(), destBucketName, destObjectName, getOpts)
9104 if err != nil {
9105 logError(testName, function, args, startTime, "", "GetObject call failed", err)
9106 return
9107 }
9108 getBuf := make([]byte, 6*1024*1024)
9109 _, err = readFull(r, getBuf)
9110 if err != nil {
9111 logError(testName, function, args, startTime, "", "Read buffer failed", err)
9112 return
9113 }
9114 if !bytes.Equal(getBuf, buf) {
9115 logError(testName, function, args, startTime, "", "Got unexpected data in first 6MB", err)
9116 return
9117 }
9118
9119 getOpts.SetRange(6*1024*1024, 0)
9120 r, _, _, err = c.GetObject(context.Background(), destBucketName, destObjectName, getOpts)
9121 if err != nil {
9122 logError(testName, function, args, startTime, "", "GetObject call failed", err)
9123 return
9124 }
9125 getBuf = make([]byte, 6*1024*1024+1)
9126 _, err = readFull(r, getBuf)
9127 if err != nil {
9128 logError(testName, function, args, startTime, "", "Read buffer failed", err)
9129 return
9130 }
9131 if !bytes.Equal(getBuf[:6*1024*1024], buf) {
9132 logError(testName, function, args, startTime, "", "Got unexpected data in second 6MB", err)
9133 return
9134 }
9135 if getBuf[6*1024*1024] != buf[0] {
9136 logError(testName, function, args, startTime, "", "Got unexpected data in last byte of copied object!", err)
9137 return
9138 }
9139
9140 successLogger(testName, function, args, startTime).Info()
9141
9142 // Do not need to remove destBucketName its same as bucketName.
9143}
9144
9145// Test Core CopyObjectPart implementation
9146func testSSECEncryptedToSSECCopyObjectPart() {
9147 // initialize logging params
9148 startTime := time.Now()
9149 testName := getFuncName()
9150 function := "CopyObjectPart(destination, source)"
9151 args := map[string]interface{}{}
9152
9153 // Instantiate new minio client object
9154 client, err := minio.New(os.Getenv(serverEndpoint),
9155 &minio.Options{
9156 Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
9157 Secure: mustParseBool(os.Getenv(enableHTTPS)),
9158 })
9159 if err != nil {
9160 logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err)
9161 return
9162 }
9163
9164 // Instantiate new core client object.
9165 c := minio.Core{client}
9166
9167 // Enable tracing, write to stderr.
9168 // c.TraceOn(os.Stderr)
9169
9170 // Set user agent.
9171 c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
9172
9173 // Generate a new random bucket name.
9174 bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
9175
9176 // Make a new bucket.
9177 err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"})
9178 if err != nil {
9179 logError(testName, function, args, startTime, "", "MakeBucket failed", err)
9180 return
9181 }
9182 defer cleanupBucket(bucketName, client)
9183 // Make a buffer with 5MB of data
9184 buf := bytes.Repeat([]byte("abcde"), 1024*1024)
9185
9186 // Save the data
9187 objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
9188 password := "correct horse battery staple"
9189 srcencryption := encrypt.DefaultPBKDF([]byte(password), []byte(bucketName+objectName))
9190 putmetadata := map[string]string{
9191 "Content-Type": "binary/octet-stream",
9192 }
9193 opts := minio.PutObjectOptions{
9194 UserMetadata: putmetadata,
9195 ServerSideEncryption: srcencryption,
9196 }
9197 uploadInfo, err := c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), "", "", opts)
9198 if err != nil {
9199 logError(testName, function, args, startTime, "", "PutObject call failed", err)
9200 return
9201 }
9202
9203 st, err := c.StatObject(context.Background(), bucketName, objectName, minio.StatObjectOptions{ServerSideEncryption: srcencryption})
9204 if err != nil {
9205 logError(testName, function, args, startTime, "", "StatObject call failed", err)
9206 return
9207 }
9208
9209 if st.Size != int64(len(buf)) {
9210 logError(testName, function, args, startTime, "", fmt.Sprintf("Error: number of bytes does not match, want %v, got %v\n", len(buf), st.Size), err)
9211 return
9212 }
9213
9214 destBucketName := bucketName
9215 destObjectName := objectName + "-dest"
9216 dstencryption := encrypt.DefaultPBKDF([]byte(password), []byte(destBucketName+destObjectName))
9217
9218 uploadID, err := c.NewMultipartUpload(context.Background(), destBucketName, destObjectName, minio.PutObjectOptions{ServerSideEncryption: dstencryption})
9219 if err != nil {
9220 logError(testName, function, args, startTime, "", "NewMultipartUpload call failed", err)
9221 return
9222 }
9223
9224 // Content of the destination object will be two copies of
9225 // `objectName` concatenated, followed by first byte of
9226 // `objectName`.
9227 metadata := make(map[string]string)
9228 header := make(http.Header)
9229 encrypt.SSECopy(srcencryption).Marshal(header)
9230 dstencryption.Marshal(header)
9231 for k, v := range header {
9232 metadata[k] = v[0]
9233 }
9234
9235 metadata["x-amz-copy-source-if-match"] = uploadInfo.ETag
9236
9237 // First of three parts
9238 fstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 1, 0, -1, metadata)
9239 if err != nil {
9240 logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err)
9241 return
9242 }
9243
9244 // Second of three parts
9245 sndPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 2, 0, -1, metadata)
9246 if err != nil {
9247 logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err)
9248 return
9249 }
9250
9251 // Last of three parts
9252 lstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 3, 0, 1, metadata)
9253 if err != nil {
9254 logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err)
9255 return
9256 }
9257
9258 // Complete the multipart upload
9259 _, err = c.CompleteMultipartUpload(context.Background(), destBucketName, destObjectName, uploadID, []minio.CompletePart{fstPart, sndPart, lstPart}, minio.PutObjectOptions{})
9260 if err != nil {
9261 logError(testName, function, args, startTime, "", "CompleteMultipartUpload call failed", err)
9262 return
9263 }
9264
9265 // Stat the object and check its length matches
9266 objInfo, err := c.StatObject(context.Background(), destBucketName, destObjectName, minio.StatObjectOptions{ServerSideEncryption: dstencryption})
9267 if err != nil {
9268 logError(testName, function, args, startTime, "", "StatObject call failed", err)
9269 return
9270 }
9271
9272 if objInfo.Size != (5*1024*1024)*2+1 {
9273 logError(testName, function, args, startTime, "", "Destination object has incorrect size!", err)
9274 return
9275 }
9276
9277 // Now we read the data back
9278 getOpts := minio.GetObjectOptions{ServerSideEncryption: dstencryption}
9279 getOpts.SetRange(0, 5*1024*1024-1)
9280 r, _, _, err := c.GetObject(context.Background(), destBucketName, destObjectName, getOpts)
9281 if err != nil {
9282 logError(testName, function, args, startTime, "", "GetObject call failed", err)
9283 return
9284 }
9285 getBuf := make([]byte, 5*1024*1024)
9286 _, err = readFull(r, getBuf)
9287 if err != nil {
9288 logError(testName, function, args, startTime, "", "Read buffer failed", err)
9289 return
9290 }
9291 if !bytes.Equal(getBuf, buf) {
9292 logError(testName, function, args, startTime, "", "Got unexpected data in first 5MB", err)
9293 return
9294 }
9295
9296 getOpts.SetRange(5*1024*1024, 0)
9297 r, _, _, err = c.GetObject(context.Background(), destBucketName, destObjectName, getOpts)
9298 if err != nil {
9299 logError(testName, function, args, startTime, "", "GetObject call failed", err)
9300 return
9301 }
9302 getBuf = make([]byte, 5*1024*1024+1)
9303 _, err = readFull(r, getBuf)
9304 if err != nil {
9305 logError(testName, function, args, startTime, "", "Read buffer failed", err)
9306 return
9307 }
9308 if !bytes.Equal(getBuf[:5*1024*1024], buf) {
9309 logError(testName, function, args, startTime, "", "Got unexpected data in second 5MB", err)
9310 return
9311 }
9312 if getBuf[5*1024*1024] != buf[0] {
9313 logError(testName, function, args, startTime, "", "Got unexpected data in last byte of copied object!", err)
9314 return
9315 }
9316
9317 successLogger(testName, function, args, startTime).Info()
9318
9319 // Do not need to remove destBucketName its same as bucketName.
9320}
9321
9322// Test Core CopyObjectPart implementation for SSEC encrypted to unencrypted copy
9323func testSSECEncryptedToUnencryptedCopyPart() {
9324 // initialize logging params
9325 startTime := time.Now()
9326 testName := getFuncName()
9327 function := "CopyObjectPart(destination, source)"
9328 args := map[string]interface{}{}
9329
9330 // Instantiate new minio client object
9331 client, err := minio.New(os.Getenv(serverEndpoint),
9332 &minio.Options{
9333 Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
9334 Secure: mustParseBool(os.Getenv(enableHTTPS)),
9335 })
9336 if err != nil {
9337 logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err)
9338 return
9339 }
9340
9341 // Instantiate new core client object.
9342 c := minio.Core{client}
9343
9344 // Enable tracing, write to stderr.
9345 // c.TraceOn(os.Stderr)
9346
9347 // Set user agent.
9348 c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
9349
9350 // Generate a new random bucket name.
9351 bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
9352
9353 // Make a new bucket.
9354 err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"})
9355 if err != nil {
9356 logError(testName, function, args, startTime, "", "MakeBucket failed", err)
9357 return
9358 }
9359 defer cleanupBucket(bucketName, client)
9360 // Make a buffer with 5MB of data
9361 buf := bytes.Repeat([]byte("abcde"), 1024*1024)
9362
9363 // Save the data
9364 objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
9365 password := "correct horse battery staple"
9366 srcencryption := encrypt.DefaultPBKDF([]byte(password), []byte(bucketName+objectName))
9367
9368 opts := minio.PutObjectOptions{
9369 UserMetadata: map[string]string{
9370 "Content-Type": "binary/octet-stream",
9371 },
9372 ServerSideEncryption: srcencryption,
9373 }
9374 uploadInfo, err := c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), "", "", opts)
9375 if err != nil {
9376 logError(testName, function, args, startTime, "", "PutObject call failed", err)
9377 return
9378 }
9379
9380 st, err := c.StatObject(context.Background(), bucketName, objectName, minio.StatObjectOptions{ServerSideEncryption: srcencryption})
9381 if err != nil {
9382 logError(testName, function, args, startTime, "", "StatObject call failed", err)
9383 return
9384 }
9385
9386 if st.Size != int64(len(buf)) {
9387 logError(testName, function, args, startTime, "", fmt.Sprintf("Error: number of bytes does not match, want %v, got %v\n", len(buf), st.Size), err)
9388 return
9389 }
9390
9391 destBucketName := bucketName
9392 destObjectName := objectName + "-dest"
9393 var dstencryption encrypt.ServerSide
9394
9395 uploadID, err := c.NewMultipartUpload(context.Background(), destBucketName, destObjectName, minio.PutObjectOptions{ServerSideEncryption: dstencryption})
9396 if err != nil {
9397 logError(testName, function, args, startTime, "", "NewMultipartUpload call failed", err)
9398 return
9399 }
9400
9401 // Content of the destination object will be two copies of
9402 // `objectName` concatenated, followed by first byte of
9403 // `objectName`.
9404 metadata := make(map[string]string)
9405 header := make(http.Header)
9406 encrypt.SSECopy(srcencryption).Marshal(header)
9407 for k, v := range header {
9408 metadata[k] = v[0]
9409 }
9410
9411 metadata["x-amz-copy-source-if-match"] = uploadInfo.ETag
9412
9413 // First of three parts
9414 fstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 1, 0, -1, metadata)
9415 if err != nil {
9416 logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err)
9417 return
9418 }
9419
9420 // Second of three parts
9421 sndPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 2, 0, -1, metadata)
9422 if err != nil {
9423 logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err)
9424 return
9425 }
9426
9427 // Last of three parts
9428 lstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 3, 0, 1, metadata)
9429 if err != nil {
9430 logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err)
9431 return
9432 }
9433
9434 // Complete the multipart upload
9435 _, err = c.CompleteMultipartUpload(context.Background(), destBucketName, destObjectName, uploadID, []minio.CompletePart{fstPart, sndPart, lstPart}, minio.PutObjectOptions{})
9436 if err != nil {
9437 logError(testName, function, args, startTime, "", "CompleteMultipartUpload call failed", err)
9438 return
9439 }
9440
9441 // Stat the object and check its length matches
9442 objInfo, err := c.StatObject(context.Background(), destBucketName, destObjectName, minio.StatObjectOptions{})
9443 if err != nil {
9444 logError(testName, function, args, startTime, "", "StatObject call failed", err)
9445 return
9446 }
9447
9448 if objInfo.Size != (5*1024*1024)*2+1 {
9449 logError(testName, function, args, startTime, "", "Destination object has incorrect size!", err)
9450 return
9451 }
9452
9453 // Now we read the data back
9454 getOpts := minio.GetObjectOptions{}
9455 getOpts.SetRange(0, 5*1024*1024-1)
9456 r, _, _, err := c.GetObject(context.Background(), destBucketName, destObjectName, getOpts)
9457 if err != nil {
9458 logError(testName, function, args, startTime, "", "GetObject call failed", err)
9459 return
9460 }
9461 getBuf := make([]byte, 5*1024*1024)
9462 _, err = readFull(r, getBuf)
9463 if err != nil {
9464 logError(testName, function, args, startTime, "", "Read buffer failed", err)
9465 return
9466 }
9467 if !bytes.Equal(getBuf, buf) {
9468 logError(testName, function, args, startTime, "", "Got unexpected data in first 5MB", err)
9469 return
9470 }
9471
9472 getOpts.SetRange(5*1024*1024, 0)
9473 r, _, _, err = c.GetObject(context.Background(), destBucketName, destObjectName, getOpts)
9474 if err != nil {
9475 logError(testName, function, args, startTime, "", "GetObject call failed", err)
9476 return
9477 }
9478 getBuf = make([]byte, 5*1024*1024+1)
9479 _, err = readFull(r, getBuf)
9480 if err != nil {
9481 logError(testName, function, args, startTime, "", "Read buffer failed", err)
9482 return
9483 }
9484 if !bytes.Equal(getBuf[:5*1024*1024], buf) {
9485 logError(testName, function, args, startTime, "", "Got unexpected data in second 5MB", err)
9486 return
9487 }
9488 if getBuf[5*1024*1024] != buf[0] {
9489 logError(testName, function, args, startTime, "", "Got unexpected data in last byte of copied object!", err)
9490 return
9491 }
9492
9493 successLogger(testName, function, args, startTime).Info()
9494
9495 // Do not need to remove destBucketName its same as bucketName.
9496}
9497
9498// Test Core CopyObjectPart implementation for SSEC encrypted to SSE-S3 encrypted copy
9499func testSSECEncryptedToSSES3CopyObjectPart() {
9500 // initialize logging params
9501 startTime := time.Now()
9502 testName := getFuncName()
9503 function := "CopyObjectPart(destination, source)"
9504 args := map[string]interface{}{}
9505
9506 // Instantiate new minio client object
9507 client, err := minio.New(os.Getenv(serverEndpoint),
9508 &minio.Options{
9509 Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
9510 Secure: mustParseBool(os.Getenv(enableHTTPS)),
9511 })
9512 if err != nil {
9513 logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err)
9514 return
9515 }
9516
9517 // Instantiate new core client object.
9518 c := minio.Core{client}
9519
9520 // Enable tracing, write to stderr.
9521 // c.TraceOn(os.Stderr)
9522
9523 // Set user agent.
9524 c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
9525
9526 // Generate a new random bucket name.
9527 bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
9528
9529 // Make a new bucket.
9530 err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"})
9531 if err != nil {
9532 logError(testName, function, args, startTime, "", "MakeBucket failed", err)
9533 return
9534 }
9535 defer cleanupBucket(bucketName, client)
9536 // Make a buffer with 5MB of data
9537 buf := bytes.Repeat([]byte("abcde"), 1024*1024)
9538
9539 // Save the data
9540 objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
9541 password := "correct horse battery staple"
9542 srcencryption := encrypt.DefaultPBKDF([]byte(password), []byte(bucketName+objectName))
9543 putmetadata := map[string]string{
9544 "Content-Type": "binary/octet-stream",
9545 }
9546 opts := minio.PutObjectOptions{
9547 UserMetadata: putmetadata,
9548 ServerSideEncryption: srcencryption,
9549 }
9550
9551 uploadInfo, err := c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), "", "", opts)
9552 if err != nil {
9553 logError(testName, function, args, startTime, "", "PutObject call failed", err)
9554 return
9555 }
9556
9557 st, err := c.StatObject(context.Background(), bucketName, objectName, minio.StatObjectOptions{ServerSideEncryption: srcencryption})
9558 if err != nil {
9559 logError(testName, function, args, startTime, "", "StatObject call failed", err)
9560 return
9561 }
9562
9563 if st.Size != int64(len(buf)) {
9564 logError(testName, function, args, startTime, "", fmt.Sprintf("Error: number of bytes does not match, want %v, got %v\n", len(buf), st.Size), err)
9565 return
9566 }
9567
9568 destBucketName := bucketName
9569 destObjectName := objectName + "-dest"
9570 dstencryption := encrypt.NewSSE()
9571
9572 uploadID, err := c.NewMultipartUpload(context.Background(), destBucketName, destObjectName, minio.PutObjectOptions{ServerSideEncryption: dstencryption})
9573 if err != nil {
9574 logError(testName, function, args, startTime, "", "NewMultipartUpload call failed", err)
9575 return
9576 }
9577
9578 // Content of the destination object will be two copies of
9579 // `objectName` concatenated, followed by first byte of
9580 // `objectName`.
9581 metadata := make(map[string]string)
9582 header := make(http.Header)
9583 encrypt.SSECopy(srcencryption).Marshal(header)
9584 dstencryption.Marshal(header)
9585
9586 for k, v := range header {
9587 metadata[k] = v[0]
9588 }
9589
9590 metadata["x-amz-copy-source-if-match"] = uploadInfo.ETag
9591
9592 // First of three parts
9593 fstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 1, 0, -1, metadata)
9594 if err != nil {
9595 logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err)
9596 return
9597 }
9598
9599 // Second of three parts
9600 sndPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 2, 0, -1, metadata)
9601 if err != nil {
9602 logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err)
9603 return
9604 }
9605
9606 // Last of three parts
9607 lstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 3, 0, 1, metadata)
9608 if err != nil {
9609 logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err)
9610 return
9611 }
9612
9613 // Complete the multipart upload
9614 _, err = c.CompleteMultipartUpload(context.Background(), destBucketName, destObjectName, uploadID, []minio.CompletePart{fstPart, sndPart, lstPart}, minio.PutObjectOptions{})
9615 if err != nil {
9616 logError(testName, function, args, startTime, "", "CompleteMultipartUpload call failed", err)
9617 return
9618 }
9619
9620 // Stat the object and check its length matches
9621 objInfo, err := c.StatObject(context.Background(), destBucketName, destObjectName, minio.StatObjectOptions{})
9622 if err != nil {
9623 logError(testName, function, args, startTime, "", "StatObject call failed", err)
9624 return
9625 }
9626
9627 if objInfo.Size != (5*1024*1024)*2+1 {
9628 logError(testName, function, args, startTime, "", "Destination object has incorrect size!", err)
9629 return
9630 }
9631
9632 // Now we read the data back
9633 getOpts := minio.GetObjectOptions{}
9634 getOpts.SetRange(0, 5*1024*1024-1)
9635 r, _, _, err := c.GetObject(context.Background(), destBucketName, destObjectName, getOpts)
9636 if err != nil {
9637 logError(testName, function, args, startTime, "", "GetObject call failed", err)
9638 return
9639 }
9640 getBuf := make([]byte, 5*1024*1024)
9641 _, err = readFull(r, getBuf)
9642 if err != nil {
9643 logError(testName, function, args, startTime, "", "Read buffer failed", err)
9644 return
9645 }
9646 if !bytes.Equal(getBuf, buf) {
9647 logError(testName, function, args, startTime, "", "Got unexpected data in first 5MB", err)
9648 return
9649 }
9650
9651 getOpts.SetRange(5*1024*1024, 0)
9652 r, _, _, err = c.GetObject(context.Background(), destBucketName, destObjectName, getOpts)
9653 if err != nil {
9654 logError(testName, function, args, startTime, "", "GetObject call failed", err)
9655 return
9656 }
9657 getBuf = make([]byte, 5*1024*1024+1)
9658 _, err = readFull(r, getBuf)
9659 if err != nil {
9660 logError(testName, function, args, startTime, "", "Read buffer failed", err)
9661 return
9662 }
9663 if !bytes.Equal(getBuf[:5*1024*1024], buf) {
9664 logError(testName, function, args, startTime, "", "Got unexpected data in second 5MB", err)
9665 return
9666 }
9667 if getBuf[5*1024*1024] != buf[0] {
9668 logError(testName, function, args, startTime, "", "Got unexpected data in last byte of copied object!", err)
9669 return
9670 }
9671
9672 successLogger(testName, function, args, startTime).Info()
9673
9674 // Do not need to remove destBucketName its same as bucketName.
9675}
9676
9677// Test Core CopyObjectPart implementation for unencrypted to SSEC encryption copy part
9678func testUnencryptedToSSECCopyObjectPart() {
9679 // initialize logging params
9680 startTime := time.Now()
9681 testName := getFuncName()
9682 function := "CopyObjectPart(destination, source)"
9683 args := map[string]interface{}{}
9684
9685 // Instantiate new minio client object
9686 client, err := minio.New(os.Getenv(serverEndpoint),
9687 &minio.Options{
9688 Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
9689 Secure: mustParseBool(os.Getenv(enableHTTPS)),
9690 })
9691 if err != nil {
9692 logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err)
9693 return
9694 }
9695
9696 // Instantiate new core client object.
9697 c := minio.Core{client}
9698
9699 // Enable tracing, write to stderr.
9700 // c.TraceOn(os.Stderr)
9701
9702 // Set user agent.
9703 c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
9704
9705 // Generate a new random bucket name.
9706 bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
9707
9708 // Make a new bucket.
9709 err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"})
9710 if err != nil {
9711 logError(testName, function, args, startTime, "", "MakeBucket failed", err)
9712 return
9713 }
9714 defer cleanupBucket(bucketName, client)
9715 // Make a buffer with 5MB of data
9716 buf := bytes.Repeat([]byte("abcde"), 1024*1024)
9717
9718 // Save the data
9719 objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
9720 password := "correct horse battery staple"
9721 putmetadata := map[string]string{
9722 "Content-Type": "binary/octet-stream",
9723 }
9724 opts := minio.PutObjectOptions{
9725 UserMetadata: putmetadata,
9726 }
9727 uploadInfo, err := c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), "", "", opts)
9728 if err != nil {
9729 logError(testName, function, args, startTime, "", "PutObject call failed", err)
9730 return
9731 }
9732
9733 st, err := c.StatObject(context.Background(), bucketName, objectName, minio.StatObjectOptions{})
9734 if err != nil {
9735 logError(testName, function, args, startTime, "", "StatObject call failed", err)
9736 return
9737 }
9738
9739 if st.Size != int64(len(buf)) {
9740 logError(testName, function, args, startTime, "", fmt.Sprintf("Error: number of bytes does not match, want %v, got %v\n", len(buf), st.Size), err)
9741 return
9742 }
9743
9744 destBucketName := bucketName
9745 destObjectName := objectName + "-dest"
9746 dstencryption := encrypt.DefaultPBKDF([]byte(password), []byte(destBucketName+destObjectName))
9747
9748 uploadID, err := c.NewMultipartUpload(context.Background(), destBucketName, destObjectName, minio.PutObjectOptions{ServerSideEncryption: dstencryption})
9749 if err != nil {
9750 logError(testName, function, args, startTime, "", "NewMultipartUpload call failed", err)
9751 return
9752 }
9753
9754 // Content of the destination object will be two copies of
9755 // `objectName` concatenated, followed by first byte of
9756 // `objectName`.
9757 metadata := make(map[string]string)
9758 header := make(http.Header)
9759 dstencryption.Marshal(header)
9760 for k, v := range header {
9761 metadata[k] = v[0]
9762 }
9763
9764 metadata["x-amz-copy-source-if-match"] = uploadInfo.ETag
9765
9766 // First of three parts
9767 fstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 1, 0, -1, metadata)
9768 if err != nil {
9769 logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err)
9770 return
9771 }
9772
9773 // Second of three parts
9774 sndPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 2, 0, -1, metadata)
9775 if err != nil {
9776 logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err)
9777 return
9778 }
9779
9780 // Last of three parts
9781 lstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 3, 0, 1, metadata)
9782 if err != nil {
9783 logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err)
9784 return
9785 }
9786
9787 // Complete the multipart upload
9788 _, err = c.CompleteMultipartUpload(context.Background(), destBucketName, destObjectName, uploadID, []minio.CompletePart{fstPart, sndPart, lstPart}, minio.PutObjectOptions{})
9789 if err != nil {
9790 logError(testName, function, args, startTime, "", "CompleteMultipartUpload call failed", err)
9791 return
9792 }
9793
9794 // Stat the object and check its length matches
9795 objInfo, err := c.StatObject(context.Background(), destBucketName, destObjectName, minio.StatObjectOptions{ServerSideEncryption: dstencryption})
9796 if err != nil {
9797 logError(testName, function, args, startTime, "", "StatObject call failed", err)
9798 return
9799 }
9800
9801 if objInfo.Size != (5*1024*1024)*2+1 {
9802 logError(testName, function, args, startTime, "", "Destination object has incorrect size!", err)
9803 return
9804 }
9805
9806 // Now we read the data back
9807 getOpts := minio.GetObjectOptions{ServerSideEncryption: dstencryption}
9808 getOpts.SetRange(0, 5*1024*1024-1)
9809 r, _, _, err := c.GetObject(context.Background(), destBucketName, destObjectName, getOpts)
9810 if err != nil {
9811 logError(testName, function, args, startTime, "", "GetObject call failed", err)
9812 return
9813 }
9814 getBuf := make([]byte, 5*1024*1024)
9815 _, err = readFull(r, getBuf)
9816 if err != nil {
9817 logError(testName, function, args, startTime, "", "Read buffer failed", err)
9818 return
9819 }
9820 if !bytes.Equal(getBuf, buf) {
9821 logError(testName, function, args, startTime, "", "Got unexpected data in first 5MB", err)
9822 return
9823 }
9824
9825 getOpts.SetRange(5*1024*1024, 0)
9826 r, _, _, err = c.GetObject(context.Background(), destBucketName, destObjectName, getOpts)
9827 if err != nil {
9828 logError(testName, function, args, startTime, "", "GetObject call failed", err)
9829 return
9830 }
9831 getBuf = make([]byte, 5*1024*1024+1)
9832 _, err = readFull(r, getBuf)
9833 if err != nil {
9834 logError(testName, function, args, startTime, "", "Read buffer failed", err)
9835 return
9836 }
9837 if !bytes.Equal(getBuf[:5*1024*1024], buf) {
9838 logError(testName, function, args, startTime, "", "Got unexpected data in second 5MB", err)
9839 return
9840 }
9841 if getBuf[5*1024*1024] != buf[0] {
9842 logError(testName, function, args, startTime, "", "Got unexpected data in last byte of copied object!", err)
9843 return
9844 }
9845
9846 successLogger(testName, function, args, startTime).Info()
9847
9848 // Do not need to remove destBucketName its same as bucketName.
9849}
9850
9851// Test Core CopyObjectPart implementation for unencrypted to unencrypted copy
9852func testUnencryptedToUnencryptedCopyPart() {
9853 // initialize logging params
9854 startTime := time.Now()
9855 testName := getFuncName()
9856 function := "CopyObjectPart(destination, source)"
9857 args := map[string]interface{}{}
9858
9859 // Instantiate new minio client object
9860 client, err := minio.New(os.Getenv(serverEndpoint),
9861 &minio.Options{
9862 Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
9863 Secure: mustParseBool(os.Getenv(enableHTTPS)),
9864 })
9865 if err != nil {
9866 logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err)
9867 return
9868 }
9869
9870 // Instantiate new core client object.
9871 c := minio.Core{client}
9872
9873 // Enable tracing, write to stderr.
9874 // c.TraceOn(os.Stderr)
9875
9876 // Set user agent.
9877 c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
9878
9879 // Generate a new random bucket name.
9880 bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
9881
9882 // Make a new bucket.
9883 err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"})
9884 if err != nil {
9885 logError(testName, function, args, startTime, "", "MakeBucket failed", err)
9886 return
9887 }
9888 defer cleanupBucket(bucketName, client)
9889 // Make a buffer with 5MB of data
9890 buf := bytes.Repeat([]byte("abcde"), 1024*1024)
9891
9892 // Save the data
9893 objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
9894 putmetadata := map[string]string{
9895 "Content-Type": "binary/octet-stream",
9896 }
9897 opts := minio.PutObjectOptions{
9898 UserMetadata: putmetadata,
9899 }
9900 uploadInfo, err := c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), "", "", opts)
9901 if err != nil {
9902 logError(testName, function, args, startTime, "", "PutObject call failed", err)
9903 return
9904 }
9905 st, err := c.StatObject(context.Background(), bucketName, objectName, minio.StatObjectOptions{})
9906 if err != nil {
9907 logError(testName, function, args, startTime, "", "StatObject call failed", err)
9908 return
9909 }
9910
9911 if st.Size != int64(len(buf)) {
9912 logError(testName, function, args, startTime, "", fmt.Sprintf("Error: number of bytes does not match, want %v, got %v\n", len(buf), st.Size), err)
9913 return
9914 }
9915
9916 destBucketName := bucketName
9917 destObjectName := objectName + "-dest"
9918
9919 uploadID, err := c.NewMultipartUpload(context.Background(), destBucketName, destObjectName, minio.PutObjectOptions{})
9920 if err != nil {
9921 logError(testName, function, args, startTime, "", "NewMultipartUpload call failed", err)
9922 return
9923 }
9924
9925 // Content of the destination object will be two copies of
9926 // `objectName` concatenated, followed by first byte of
9927 // `objectName`.
9928 metadata := make(map[string]string)
9929 header := make(http.Header)
9930 for k, v := range header {
9931 metadata[k] = v[0]
9932 }
9933
9934 metadata["x-amz-copy-source-if-match"] = uploadInfo.ETag
9935
9936 // First of three parts
9937 fstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 1, 0, -1, metadata)
9938 if err != nil {
9939 logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err)
9940 return
9941 }
9942
9943 // Second of three parts
9944 sndPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 2, 0, -1, metadata)
9945 if err != nil {
9946 logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err)
9947 return
9948 }
9949
9950 // Last of three parts
9951 lstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 3, 0, 1, metadata)
9952 if err != nil {
9953 logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err)
9954 return
9955 }
9956
9957 // Complete the multipart upload
9958 _, err = c.CompleteMultipartUpload(context.Background(), destBucketName, destObjectName, uploadID, []minio.CompletePart{fstPart, sndPart, lstPart}, minio.PutObjectOptions{})
9959 if err != nil {
9960 logError(testName, function, args, startTime, "", "CompleteMultipartUpload call failed", err)
9961 return
9962 }
9963
9964 // Stat the object and check its length matches
9965 objInfo, err := c.StatObject(context.Background(), destBucketName, destObjectName, minio.StatObjectOptions{})
9966 if err != nil {
9967 logError(testName, function, args, startTime, "", "StatObject call failed", err)
9968 return
9969 }
9970
9971 if objInfo.Size != (5*1024*1024)*2+1 {
9972 logError(testName, function, args, startTime, "", "Destination object has incorrect size!", err)
9973 return
9974 }
9975
9976 // Now we read the data back
9977 getOpts := minio.GetObjectOptions{}
9978 getOpts.SetRange(0, 5*1024*1024-1)
9979 r, _, _, err := c.GetObject(context.Background(), destBucketName, destObjectName, getOpts)
9980 if err != nil {
9981 logError(testName, function, args, startTime, "", "GetObject call failed", err)
9982 return
9983 }
9984 getBuf := make([]byte, 5*1024*1024)
9985 _, err = readFull(r, getBuf)
9986 if err != nil {
9987 logError(testName, function, args, startTime, "", "Read buffer failed", err)
9988 return
9989 }
9990 if !bytes.Equal(getBuf, buf) {
9991 logError(testName, function, args, startTime, "", "Got unexpected data in first 5MB", err)
9992 return
9993 }
9994
9995 getOpts.SetRange(5*1024*1024, 0)
9996 r, _, _, err = c.GetObject(context.Background(), destBucketName, destObjectName, getOpts)
9997 if err != nil {
9998 logError(testName, function, args, startTime, "", "GetObject call failed", err)
9999 return
10000 }
10001 getBuf = make([]byte, 5*1024*1024+1)
10002 _, err = readFull(r, getBuf)
10003 if err != nil {
10004 logError(testName, function, args, startTime, "", "Read buffer failed", err)
10005 return
10006 }
10007 if !bytes.Equal(getBuf[:5*1024*1024], buf) {
10008 logError(testName, function, args, startTime, "", "Got unexpected data in second 5MB", err)
10009 return
10010 }
10011 if getBuf[5*1024*1024] != buf[0] {
10012 logError(testName, function, args, startTime, "", "Got unexpected data in last byte of copied object!", err)
10013 return
10014 }
10015
10016 successLogger(testName, function, args, startTime).Info()
10017
10018 // Do not need to remove destBucketName its same as bucketName.
10019}
10020
10021// Test Core CopyObjectPart implementation for unencrypted to SSE-S3 encrypted copy
10022func testUnencryptedToSSES3CopyObjectPart() {
10023 // initialize logging params
10024 startTime := time.Now()
10025 testName := getFuncName()
10026 function := "CopyObjectPart(destination, source)"
10027 args := map[string]interface{}{}
10028
10029 // Instantiate new minio client object
10030 client, err := minio.New(os.Getenv(serverEndpoint),
10031 &minio.Options{
10032 Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
10033 Secure: mustParseBool(os.Getenv(enableHTTPS)),
10034 })
10035 if err != nil {
10036 logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err)
10037 return
10038 }
10039
10040 // Instantiate new core client object.
10041 c := minio.Core{client}
10042
10043 // Enable tracing, write to stderr.
10044 // c.TraceOn(os.Stderr)
10045
10046 // Set user agent.
10047 c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
10048
10049 // Generate a new random bucket name.
10050 bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
10051
10052 // Make a new bucket.
10053 err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"})
10054 if err != nil {
10055 logError(testName, function, args, startTime, "", "MakeBucket failed", err)
10056 return
10057 }
10058 defer cleanupBucket(bucketName, client)
10059 // Make a buffer with 5MB of data
10060 buf := bytes.Repeat([]byte("abcde"), 1024*1024)
10061
10062 // Save the data
10063 objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
10064 opts := minio.PutObjectOptions{
10065 UserMetadata: map[string]string{
10066 "Content-Type": "binary/octet-stream",
10067 },
10068 }
10069 uploadInfo, err := c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), "", "", opts)
10070 if err != nil {
10071 logError(testName, function, args, startTime, "", "PutObject call failed", err)
10072 return
10073 }
10074 st, err := c.StatObject(context.Background(), bucketName, objectName, minio.StatObjectOptions{})
10075 if err != nil {
10076 logError(testName, function, args, startTime, "", "StatObject call failed", err)
10077 return
10078 }
10079
10080 if st.Size != int64(len(buf)) {
10081 logError(testName, function, args, startTime, "", fmt.Sprintf("Error: number of bytes does not match, want %v, got %v\n", len(buf), st.Size), err)
10082 return
10083 }
10084
10085 destBucketName := bucketName
10086 destObjectName := objectName + "-dest"
10087 dstencryption := encrypt.NewSSE()
10088
10089 uploadID, err := c.NewMultipartUpload(context.Background(), destBucketName, destObjectName, minio.PutObjectOptions{ServerSideEncryption: dstencryption})
10090 if err != nil {
10091 logError(testName, function, args, startTime, "", "NewMultipartUpload call failed", err)
10092 return
10093 }
10094
10095 // Content of the destination object will be two copies of
10096 // `objectName` concatenated, followed by first byte of
10097 // `objectName`.
10098 metadata := make(map[string]string)
10099 header := make(http.Header)
10100 dstencryption.Marshal(header)
10101
10102 for k, v := range header {
10103 metadata[k] = v[0]
10104 }
10105
10106 metadata["x-amz-copy-source-if-match"] = uploadInfo.ETag
10107
10108 // First of three parts
10109 fstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 1, 0, -1, metadata)
10110 if err != nil {
10111 logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err)
10112 return
10113 }
10114
10115 // Second of three parts
10116 sndPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 2, 0, -1, metadata)
10117 if err != nil {
10118 logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err)
10119 return
10120 }
10121
10122 // Last of three parts
10123 lstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 3, 0, 1, metadata)
10124 if err != nil {
10125 logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err)
10126 return
10127 }
10128
10129 // Complete the multipart upload
10130 _, err = c.CompleteMultipartUpload(context.Background(), destBucketName, destObjectName, uploadID, []minio.CompletePart{fstPart, sndPart, lstPart}, minio.PutObjectOptions{})
10131 if err != nil {
10132 logError(testName, function, args, startTime, "", "CompleteMultipartUpload call failed", err)
10133 return
10134 }
10135
10136 // Stat the object and check its length matches
10137 objInfo, err := c.StatObject(context.Background(), destBucketName, destObjectName, minio.StatObjectOptions{})
10138 if err != nil {
10139 logError(testName, function, args, startTime, "", "StatObject call failed", err)
10140 return
10141 }
10142
10143 if objInfo.Size != (5*1024*1024)*2+1 {
10144 logError(testName, function, args, startTime, "", "Destination object has incorrect size!", err)
10145 return
10146 }
10147
10148 // Now we read the data back
10149 getOpts := minio.GetObjectOptions{}
10150 getOpts.SetRange(0, 5*1024*1024-1)
10151 r, _, _, err := c.GetObject(context.Background(), destBucketName, destObjectName, getOpts)
10152 if err != nil {
10153 logError(testName, function, args, startTime, "", "GetObject call failed", err)
10154 return
10155 }
10156 getBuf := make([]byte, 5*1024*1024)
10157 _, err = readFull(r, getBuf)
10158 if err != nil {
10159 logError(testName, function, args, startTime, "", "Read buffer failed", err)
10160 return
10161 }
10162 if !bytes.Equal(getBuf, buf) {
10163 logError(testName, function, args, startTime, "", "Got unexpected data in first 5MB", err)
10164 return
10165 }
10166
10167 getOpts.SetRange(5*1024*1024, 0)
10168 r, _, _, err = c.GetObject(context.Background(), destBucketName, destObjectName, getOpts)
10169 if err != nil {
10170 logError(testName, function, args, startTime, "", "GetObject call failed", err)
10171 return
10172 }
10173 getBuf = make([]byte, 5*1024*1024+1)
10174 _, err = readFull(r, getBuf)
10175 if err != nil {
10176 logError(testName, function, args, startTime, "", "Read buffer failed", err)
10177 return
10178 }
10179 if !bytes.Equal(getBuf[:5*1024*1024], buf) {
10180 logError(testName, function, args, startTime, "", "Got unexpected data in second 5MB", err)
10181 return
10182 }
10183 if getBuf[5*1024*1024] != buf[0] {
10184 logError(testName, function, args, startTime, "", "Got unexpected data in last byte of copied object!", err)
10185 return
10186 }
10187
10188 successLogger(testName, function, args, startTime).Info()
10189
10190 // Do not need to remove destBucketName its same as bucketName.
10191}
10192
10193// Test Core CopyObjectPart implementation for SSE-S3 to SSEC encryption copy part
10194func testSSES3EncryptedToSSECCopyObjectPart() {
10195 // initialize logging params
10196 startTime := time.Now()
10197 testName := getFuncName()
10198 function := "CopyObjectPart(destination, source)"
10199 args := map[string]interface{}{}
10200
10201 // Instantiate new minio client object
10202 client, err := minio.New(os.Getenv(serverEndpoint),
10203 &minio.Options{
10204 Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
10205 Secure: mustParseBool(os.Getenv(enableHTTPS)),
10206 })
10207 if err != nil {
10208 logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err)
10209 return
10210 }
10211
10212 // Instantiate new core client object.
10213 c := minio.Core{client}
10214
10215 // Enable tracing, write to stderr.
10216 // c.TraceOn(os.Stderr)
10217
10218 // Set user agent.
10219 c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
10220
10221 // Generate a new random bucket name.
10222 bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
10223
10224 // Make a new bucket.
10225 err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"})
10226 if err != nil {
10227 logError(testName, function, args, startTime, "", "MakeBucket failed", err)
10228 return
10229 }
10230 defer cleanupBucket(bucketName, client)
10231 // Make a buffer with 5MB of data
10232 buf := bytes.Repeat([]byte("abcde"), 1024*1024)
10233
10234 // Save the data
10235 objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
10236 password := "correct horse battery staple"
10237 srcEncryption := encrypt.NewSSE()
10238 opts := minio.PutObjectOptions{
10239 UserMetadata: map[string]string{
10240 "Content-Type": "binary/octet-stream",
10241 },
10242 ServerSideEncryption: srcEncryption,
10243 }
10244 uploadInfo, err := c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), "", "", opts)
10245 if err != nil {
10246 logError(testName, function, args, startTime, "", "PutObject call failed", err)
10247 return
10248 }
10249
10250 st, err := c.StatObject(context.Background(), bucketName, objectName, minio.StatObjectOptions{ServerSideEncryption: srcEncryption})
10251 if err != nil {
10252 logError(testName, function, args, startTime, "", "StatObject call failed", err)
10253 return
10254 }
10255
10256 if st.Size != int64(len(buf)) {
10257 logError(testName, function, args, startTime, "", fmt.Sprintf("Error: number of bytes does not match, want %v, got %v\n", len(buf), st.Size), err)
10258 return
10259 }
10260
10261 destBucketName := bucketName
10262 destObjectName := objectName + "-dest"
10263 dstencryption := encrypt.DefaultPBKDF([]byte(password), []byte(destBucketName+destObjectName))
10264
10265 uploadID, err := c.NewMultipartUpload(context.Background(), destBucketName, destObjectName, minio.PutObjectOptions{ServerSideEncryption: dstencryption})
10266 if err != nil {
10267 logError(testName, function, args, startTime, "", "NewMultipartUpload call failed", err)
10268 return
10269 }
10270
10271 // Content of the destination object will be two copies of
10272 // `objectName` concatenated, followed by first byte of
10273 // `objectName`.
10274 metadata := make(map[string]string)
10275 header := make(http.Header)
10276 dstencryption.Marshal(header)
10277 for k, v := range header {
10278 metadata[k] = v[0]
10279 }
10280
10281 metadata["x-amz-copy-source-if-match"] = uploadInfo.ETag
10282
10283 // First of three parts
10284 fstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 1, 0, -1, metadata)
10285 if err != nil {
10286 logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err)
10287 return
10288 }
10289
10290 // Second of three parts
10291 sndPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 2, 0, -1, metadata)
10292 if err != nil {
10293 logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err)
10294 return
10295 }
10296
10297 // Last of three parts
10298 lstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 3, 0, 1, metadata)
10299 if err != nil {
10300 logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err)
10301 return
10302 }
10303
10304 // Complete the multipart upload
10305 _, err = c.CompleteMultipartUpload(context.Background(), destBucketName, destObjectName, uploadID, []minio.CompletePart{fstPart, sndPart, lstPart}, minio.PutObjectOptions{})
10306 if err != nil {
10307 logError(testName, function, args, startTime, "", "CompleteMultipartUpload call failed", err)
10308 return
10309 }
10310
10311 // Stat the object and check its length matches
10312 objInfo, err := c.StatObject(context.Background(), destBucketName, destObjectName, minio.StatObjectOptions{ServerSideEncryption: dstencryption})
10313 if err != nil {
10314 logError(testName, function, args, startTime, "", "StatObject call failed", err)
10315 return
10316 }
10317
10318 if objInfo.Size != (5*1024*1024)*2+1 {
10319 logError(testName, function, args, startTime, "", "Destination object has incorrect size!", err)
10320 return
10321 }
10322
10323 // Now we read the data back
10324 getOpts := minio.GetObjectOptions{ServerSideEncryption: dstencryption}
10325 getOpts.SetRange(0, 5*1024*1024-1)
10326 r, _, _, err := c.GetObject(context.Background(), destBucketName, destObjectName, getOpts)
10327 if err != nil {
10328 logError(testName, function, args, startTime, "", "GetObject call failed", err)
10329 return
10330 }
10331 getBuf := make([]byte, 5*1024*1024)
10332 _, err = readFull(r, getBuf)
10333 if err != nil {
10334 logError(testName, function, args, startTime, "", "Read buffer failed", err)
10335 return
10336 }
10337 if !bytes.Equal(getBuf, buf) {
10338 logError(testName, function, args, startTime, "", "Got unexpected data in first 5MB", err)
10339 return
10340 }
10341
10342 getOpts.SetRange(5*1024*1024, 0)
10343 r, _, _, err = c.GetObject(context.Background(), destBucketName, destObjectName, getOpts)
10344 if err != nil {
10345 logError(testName, function, args, startTime, "", "GetObject call failed", err)
10346 return
10347 }
10348 getBuf = make([]byte, 5*1024*1024+1)
10349 _, err = readFull(r, getBuf)
10350 if err != nil {
10351 logError(testName, function, args, startTime, "", "Read buffer failed", err)
10352 return
10353 }
10354 if !bytes.Equal(getBuf[:5*1024*1024], buf) {
10355 logError(testName, function, args, startTime, "", "Got unexpected data in second 5MB", err)
10356 return
10357 }
10358 if getBuf[5*1024*1024] != buf[0] {
10359 logError(testName, function, args, startTime, "", "Got unexpected data in last byte of copied object!", err)
10360 return
10361 }
10362
10363 successLogger(testName, function, args, startTime).Info()
10364
10365 // Do not need to remove destBucketName its same as bucketName.
10366}
10367
10368// Test Core CopyObjectPart implementation for unencrypted to unencrypted copy
10369func testSSES3EncryptedToUnencryptedCopyPart() {
10370 // initialize logging params
10371 startTime := time.Now()
10372 testName := getFuncName()
10373 function := "CopyObjectPart(destination, source)"
10374 args := map[string]interface{}{}
10375
10376 // Instantiate new minio client object
10377 client, err := minio.New(os.Getenv(serverEndpoint),
10378 &minio.Options{
10379 Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
10380 Secure: mustParseBool(os.Getenv(enableHTTPS)),
10381 })
10382 if err != nil {
10383 logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err)
10384 return
10385 }
10386
10387 // Instantiate new core client object.
10388 c := minio.Core{client}
10389
10390 // Enable tracing, write to stderr.
10391 // c.TraceOn(os.Stderr)
10392
10393 // Set user agent.
10394 c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
10395
10396 // Generate a new random bucket name.
10397 bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
10398
10399 // Make a new bucket.
10400 err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"})
10401 if err != nil {
10402 logError(testName, function, args, startTime, "", "MakeBucket failed", err)
10403 return
10404 }
10405 defer cleanupBucket(bucketName, client)
10406 // Make a buffer with 5MB of data
10407 buf := bytes.Repeat([]byte("abcde"), 1024*1024)
10408
10409 // Save the data
10410 objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
10411 srcEncryption := encrypt.NewSSE()
10412 opts := minio.PutObjectOptions{
10413 UserMetadata: map[string]string{
10414 "Content-Type": "binary/octet-stream",
10415 },
10416 ServerSideEncryption: srcEncryption,
10417 }
10418 uploadInfo, err := c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), "", "", opts)
10419 if err != nil {
10420 logError(testName, function, args, startTime, "", "PutObject call failed", err)
10421 return
10422 }
10423 st, err := c.StatObject(context.Background(), bucketName, objectName, minio.StatObjectOptions{ServerSideEncryption: srcEncryption})
10424 if err != nil {
10425 logError(testName, function, args, startTime, "", "StatObject call failed", err)
10426 return
10427 }
10428
10429 if st.Size != int64(len(buf)) {
10430 logError(testName, function, args, startTime, "", fmt.Sprintf("Error: number of bytes does not match, want %v, got %v\n", len(buf), st.Size), err)
10431 return
10432 }
10433
10434 destBucketName := bucketName
10435 destObjectName := objectName + "-dest"
10436
10437 uploadID, err := c.NewMultipartUpload(context.Background(), destBucketName, destObjectName, minio.PutObjectOptions{})
10438 if err != nil {
10439 logError(testName, function, args, startTime, "", "NewMultipartUpload call failed", err)
10440 return
10441 }
10442
10443 // Content of the destination object will be two copies of
10444 // `objectName` concatenated, followed by first byte of
10445 // `objectName`.
10446 metadata := make(map[string]string)
10447 header := make(http.Header)
10448 for k, v := range header {
10449 metadata[k] = v[0]
10450 }
10451
10452 metadata["x-amz-copy-source-if-match"] = uploadInfo.ETag
10453
10454 // First of three parts
10455 fstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 1, 0, -1, metadata)
10456 if err != nil {
10457 logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err)
10458 return
10459 }
10460
10461 // Second of three parts
10462 sndPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 2, 0, -1, metadata)
10463 if err != nil {
10464 logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err)
10465 return
10466 }
10467
10468 // Last of three parts
10469 lstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 3, 0, 1, metadata)
10470 if err != nil {
10471 logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err)
10472 return
10473 }
10474
10475 // Complete the multipart upload
10476 _, err = c.CompleteMultipartUpload(context.Background(), destBucketName, destObjectName, uploadID, []minio.CompletePart{fstPart, sndPart, lstPart}, minio.PutObjectOptions{})
10477 if err != nil {
10478 logError(testName, function, args, startTime, "", "CompleteMultipartUpload call failed", err)
10479 return
10480 }
10481
10482 // Stat the object and check its length matches
10483 objInfo, err := c.StatObject(context.Background(), destBucketName, destObjectName, minio.StatObjectOptions{})
10484 if err != nil {
10485 logError(testName, function, args, startTime, "", "StatObject call failed", err)
10486 return
10487 }
10488
10489 if objInfo.Size != (5*1024*1024)*2+1 {
10490 logError(testName, function, args, startTime, "", "Destination object has incorrect size!", err)
10491 return
10492 }
10493
10494 // Now we read the data back
10495 getOpts := minio.GetObjectOptions{}
10496 getOpts.SetRange(0, 5*1024*1024-1)
10497 r, _, _, err := c.GetObject(context.Background(), destBucketName, destObjectName, getOpts)
10498 if err != nil {
10499 logError(testName, function, args, startTime, "", "GetObject call failed", err)
10500 return
10501 }
10502 getBuf := make([]byte, 5*1024*1024)
10503 _, err = readFull(r, getBuf)
10504 if err != nil {
10505 logError(testName, function, args, startTime, "", "Read buffer failed", err)
10506 return
10507 }
10508 if !bytes.Equal(getBuf, buf) {
10509 logError(testName, function, args, startTime, "", "Got unexpected data in first 5MB", err)
10510 return
10511 }
10512
10513 getOpts.SetRange(5*1024*1024, 0)
10514 r, _, _, err = c.GetObject(context.Background(), destBucketName, destObjectName, getOpts)
10515 if err != nil {
10516 logError(testName, function, args, startTime, "", "GetObject call failed", err)
10517 return
10518 }
10519 getBuf = make([]byte, 5*1024*1024+1)
10520 _, err = readFull(r, getBuf)
10521 if err != nil {
10522 logError(testName, function, args, startTime, "", "Read buffer failed", err)
10523 return
10524 }
10525 if !bytes.Equal(getBuf[:5*1024*1024], buf) {
10526 logError(testName, function, args, startTime, "", "Got unexpected data in second 5MB", err)
10527 return
10528 }
10529 if getBuf[5*1024*1024] != buf[0] {
10530 logError(testName, function, args, startTime, "", "Got unexpected data in last byte of copied object!", err)
10531 return
10532 }
10533
10534 successLogger(testName, function, args, startTime).Info()
10535
10536 // Do not need to remove destBucketName its same as bucketName.
10537}
10538
10539// Test Core CopyObjectPart implementation for unencrypted to SSE-S3 encrypted copy
10540func testSSES3EncryptedToSSES3CopyObjectPart() {
10541 // initialize logging params
10542 startTime := time.Now()
10543 testName := getFuncName()
10544 function := "CopyObjectPart(destination, source)"
10545 args := map[string]interface{}{}
10546
10547 // Instantiate new minio client object
10548 client, err := minio.New(os.Getenv(serverEndpoint),
10549 &minio.Options{
10550 Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
10551 Secure: mustParseBool(os.Getenv(enableHTTPS)),
10552 })
10553 if err != nil {
10554 logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err)
10555 return
10556 }
10557
10558 // Instantiate new core client object.
10559 c := minio.Core{client}
10560
10561 // Enable tracing, write to stderr.
10562 // c.TraceOn(os.Stderr)
10563
10564 // Set user agent.
10565 c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
10566
10567 // Generate a new random bucket name.
10568 bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
10569
10570 // Make a new bucket.
10571 err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"})
10572 if err != nil {
10573 logError(testName, function, args, startTime, "", "MakeBucket failed", err)
10574 return
10575 }
10576 defer cleanupBucket(bucketName, client)
10577 // Make a buffer with 5MB of data
10578 buf := bytes.Repeat([]byte("abcde"), 1024*1024)
10579
10580 // Save the data
10581 objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
10582 srcEncryption := encrypt.NewSSE()
10583 opts := minio.PutObjectOptions{
10584 UserMetadata: map[string]string{
10585 "Content-Type": "binary/octet-stream",
10586 },
10587 ServerSideEncryption: srcEncryption,
10588 }
10589
10590 uploadInfo, err := c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), "", "", opts)
10591 if err != nil {
10592 logError(testName, function, args, startTime, "", "PutObject call failed", err)
10593 return
10594 }
10595 st, err := c.StatObject(context.Background(), bucketName, objectName, minio.StatObjectOptions{ServerSideEncryption: srcEncryption})
10596 if err != nil {
10597 logError(testName, function, args, startTime, "", "StatObject call failed", err)
10598 return
10599 }
10600 if st.Size != int64(len(buf)) {
10601 logError(testName, function, args, startTime, "", fmt.Sprintf("Error: number of bytes does not match, want %v, got %v\n", len(buf), st.Size), err)
10602 return
10603 }
10604
10605 destBucketName := bucketName
10606 destObjectName := objectName + "-dest"
10607 dstencryption := encrypt.NewSSE()
10608
10609 uploadID, err := c.NewMultipartUpload(context.Background(), destBucketName, destObjectName, minio.PutObjectOptions{ServerSideEncryption: dstencryption})
10610 if err != nil {
10611 logError(testName, function, args, startTime, "", "NewMultipartUpload call failed", err)
10612 return
10613 }
10614
10615 // Content of the destination object will be two copies of
10616 // `objectName` concatenated, followed by first byte of
10617 // `objectName`.
10618 metadata := make(map[string]string)
10619 header := make(http.Header)
10620 dstencryption.Marshal(header)
10621
10622 for k, v := range header {
10623 metadata[k] = v[0]
10624 }
10625
10626 metadata["x-amz-copy-source-if-match"] = uploadInfo.ETag
10627
10628 // First of three parts
10629 fstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 1, 0, -1, metadata)
10630 if err != nil {
10631 logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err)
10632 return
10633 }
10634
10635 // Second of three parts
10636 sndPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 2, 0, -1, metadata)
10637 if err != nil {
10638 logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err)
10639 return
10640 }
10641
10642 // Last of three parts
10643 lstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 3, 0, 1, metadata)
10644 if err != nil {
10645 logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err)
10646 return
10647 }
10648
10649 // Complete the multipart upload
10650 _, err = c.CompleteMultipartUpload(context.Background(), destBucketName, destObjectName, uploadID, []minio.CompletePart{fstPart, sndPart, lstPart}, minio.PutObjectOptions{})
10651 if err != nil {
10652 logError(testName, function, args, startTime, "", "CompleteMultipartUpload call failed", err)
10653 return
10654 }
10655
10656 // Stat the object and check its length matches
10657 objInfo, err := c.StatObject(context.Background(), destBucketName, destObjectName, minio.StatObjectOptions{})
10658 if err != nil {
10659 logError(testName, function, args, startTime, "", "StatObject call failed", err)
10660 return
10661 }
10662
10663 if objInfo.Size != (5*1024*1024)*2+1 {
10664 logError(testName, function, args, startTime, "", "Destination object has incorrect size!", err)
10665 return
10666 }
10667
10668 // Now we read the data back
10669 getOpts := minio.GetObjectOptions{}
10670 getOpts.SetRange(0, 5*1024*1024-1)
10671 r, _, _, err := c.GetObject(context.Background(), destBucketName, destObjectName, getOpts)
10672 if err != nil {
10673 logError(testName, function, args, startTime, "", "GetObject call failed", err)
10674 return
10675 }
10676 getBuf := make([]byte, 5*1024*1024)
10677 _, err = readFull(r, getBuf)
10678 if err != nil {
10679 logError(testName, function, args, startTime, "", "Read buffer failed", err)
10680 return
10681 }
10682 if !bytes.Equal(getBuf, buf) {
10683 logError(testName, function, args, startTime, "", "Got unexpected data in first 5MB", err)
10684 return
10685 }
10686
10687 getOpts.SetRange(5*1024*1024, 0)
10688 r, _, _, err = c.GetObject(context.Background(), destBucketName, destObjectName, getOpts)
10689 if err != nil {
10690 logError(testName, function, args, startTime, "", "GetObject call failed", err)
10691 return
10692 }
10693 getBuf = make([]byte, 5*1024*1024+1)
10694 _, err = readFull(r, getBuf)
10695 if err != nil {
10696 logError(testName, function, args, startTime, "", "Read buffer failed", err)
10697 return
10698 }
10699 if !bytes.Equal(getBuf[:5*1024*1024], buf) {
10700 logError(testName, function, args, startTime, "", "Got unexpected data in second 5MB", err)
10701 return
10702 }
10703 if getBuf[5*1024*1024] != buf[0] {
10704 logError(testName, function, args, startTime, "", "Got unexpected data in last byte of copied object!", err)
10705 return
10706 }
10707
10708 successLogger(testName, function, args, startTime).Info()
10709
10710 // Do not need to remove destBucketName its same as bucketName.
10711}
10712
10713func testUserMetadataCopying() {
10714 // initialize logging params
10715 startTime := time.Now()
10716 testName := getFuncName()
10717 function := "CopyObject(destination, source)"
10718 args := map[string]interface{}{}
10719
10720 // Instantiate new minio client object
10721 c, err := minio.New(os.Getenv(serverEndpoint),
10722 &minio.Options{
10723 Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
10724 Secure: mustParseBool(os.Getenv(enableHTTPS)),
10725 })
10726 if err != nil {
10727 logError(testName, function, args, startTime, "", "MinIO client object creation failed", err)
10728 return
10729 }
10730
10731 // c.TraceOn(os.Stderr)
10732 testUserMetadataCopyingWrapper(c)
10733}
10734
10735func testUserMetadataCopyingWrapper(c *minio.Client) {
10736 // initialize logging params
10737 startTime := time.Now()
10738 testName := getFuncName()
10739 function := "CopyObject(destination, source)"
10740 args := map[string]interface{}{}
10741
10742 // Generate a new random bucket name.
10743 bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
10744 // Make a new bucket in 'us-east-1' (source bucket).
10745 err := c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"})
10746 if err != nil {
10747 logError(testName, function, args, startTime, "", "MakeBucket failed", err)
10748 return
10749 }
10750
10751 defer cleanupBucket(bucketName, c)
10752
10753 fetchMeta := func(object string) (h http.Header) {
10754 objInfo, err := c.StatObject(context.Background(), bucketName, object, minio.StatObjectOptions{})
10755 if err != nil {
10756 logError(testName, function, args, startTime, "", "Stat failed", err)
10757 return
10758 }
10759 h = make(http.Header)
10760 for k, vs := range objInfo.Metadata {
10761 if strings.HasPrefix(strings.ToLower(k), "x-amz-meta-") {
10762 h.Add(k, vs[0])
10763 }
10764 }
10765 return h
10766 }
10767
10768 // 1. create a client encrypted object to copy by uploading
10769 const srcSize = 1024 * 1024
10770 buf := bytes.Repeat([]byte("abcde"), srcSize) // gives a buffer of 5MiB
10771 metadata := make(http.Header)
10772 metadata.Set("x-amz-meta-myheader", "myvalue")
10773 m := make(map[string]string)
10774 m["x-amz-meta-myheader"] = "myvalue"
10775 _, err = c.PutObject(context.Background(), bucketName, "srcObject",
10776 bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{UserMetadata: m})
10777 if err != nil {
10778 logError(testName, function, args, startTime, "", "PutObjectWithMetadata failed", err)
10779 return
10780 }
10781 if !reflect.DeepEqual(metadata, fetchMeta("srcObject")) {
10782 logError(testName, function, args, startTime, "", "Metadata match failed", err)
10783 return
10784 }
10785
10786 // 2. create source
10787 src := minio.CopySrcOptions{
10788 Bucket: bucketName,
10789 Object: "srcObject",
10790 }
10791
10792 // 2.1 create destination with metadata set
10793 dst1 := minio.CopyDestOptions{
10794 Bucket: bucketName,
10795 Object: "dstObject-1",
10796 UserMetadata: map[string]string{"notmyheader": "notmyvalue"},
10797 ReplaceMetadata: true,
10798 }
10799
10800 // 3. Check that copying to an object with metadata set resets
10801 // the headers on the copy.
10802 args["source"] = src
10803 args["destination"] = dst1
10804 _, err = c.CopyObject(context.Background(), dst1, src)
10805 if err != nil {
10806 logError(testName, function, args, startTime, "", "CopyObject failed", err)
10807 return
10808 }
10809
10810 expectedHeaders := make(http.Header)
10811 expectedHeaders.Set("x-amz-meta-notmyheader", "notmyvalue")
10812 if !reflect.DeepEqual(expectedHeaders, fetchMeta("dstObject-1")) {
10813 logError(testName, function, args, startTime, "", "Metadata match failed", err)
10814 return
10815 }
10816
10817 // 4. create destination with no metadata set and same source
10818 dst2 := minio.CopyDestOptions{
10819 Bucket: bucketName,
10820 Object: "dstObject-2",
10821 }
10822
10823 // 5. Check that copying to an object with no metadata set,
10824 // copies metadata.
10825 args["source"] = src
10826 args["destination"] = dst2
10827 _, err = c.CopyObject(context.Background(), dst2, src)
10828 if err != nil {
10829 logError(testName, function, args, startTime, "", "CopyObject failed", err)
10830 return
10831 }
10832
10833 expectedHeaders = metadata
10834 if !reflect.DeepEqual(expectedHeaders, fetchMeta("dstObject-2")) {
10835 logError(testName, function, args, startTime, "", "Metadata match failed", err)
10836 return
10837 }
10838
10839 // 6. Compose a pair of sources.
10840 dst3 := minio.CopyDestOptions{
10841 Bucket: bucketName,
10842 Object: "dstObject-3",
10843 ReplaceMetadata: true,
10844 }
10845
10846 function = "ComposeObject(destination, sources)"
10847 args["source"] = []minio.CopySrcOptions{src, src}
10848 args["destination"] = dst3
10849 _, err = c.ComposeObject(context.Background(), dst3, src, src)
10850 if err != nil {
10851 logError(testName, function, args, startTime, "", "ComposeObject failed", err)
10852 return
10853 }
10854
10855 // Check that no headers are copied in this case
10856 if !reflect.DeepEqual(make(http.Header), fetchMeta("dstObject-3")) {
10857 logError(testName, function, args, startTime, "", "Metadata match failed", err)
10858 return
10859 }
10860
10861 // 7. Compose a pair of sources with dest user metadata set.
10862 dst4 := minio.CopyDestOptions{
10863 Bucket: bucketName,
10864 Object: "dstObject-4",
10865 UserMetadata: map[string]string{"notmyheader": "notmyvalue"},
10866 ReplaceMetadata: true,
10867 }
10868
10869 function = "ComposeObject(destination, sources)"
10870 args["source"] = []minio.CopySrcOptions{src, src}
10871 args["destination"] = dst4
10872 _, err = c.ComposeObject(context.Background(), dst4, src, src)
10873 if err != nil {
10874 logError(testName, function, args, startTime, "", "ComposeObject failed", err)
10875 return
10876 }
10877
10878 // Check that no headers are copied in this case
10879 expectedHeaders = make(http.Header)
10880 expectedHeaders.Set("x-amz-meta-notmyheader", "notmyvalue")
10881 if !reflect.DeepEqual(expectedHeaders, fetchMeta("dstObject-4")) {
10882 logError(testName, function, args, startTime, "", "Metadata match failed", err)
10883 return
10884 }
10885
10886 successLogger(testName, function, args, startTime).Info()
10887}
10888
10889func testUserMetadataCopyingV2() {
10890 // initialize logging params
10891 startTime := time.Now()
10892 testName := getFuncName()
10893 function := "CopyObject(destination, source)"
10894 args := map[string]interface{}{}
10895
10896 // Instantiate new minio client object
10897 c, err := minio.New(os.Getenv(serverEndpoint),
10898 &minio.Options{
10899 Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""),
10900 Secure: mustParseBool(os.Getenv(enableHTTPS)),
10901 })
10902 if err != nil {
10903 logError(testName, function, args, startTime, "", "MinIO client v2 object creation failed", err)
10904 return
10905 }
10906
10907 // c.TraceOn(os.Stderr)
10908 testUserMetadataCopyingWrapper(c)
10909}
10910
10911func testStorageClassMetadataPutObject() {
10912 // initialize logging params
10913 startTime := time.Now()
10914 function := "testStorageClassMetadataPutObject()"
10915 args := map[string]interface{}{}
10916 testName := getFuncName()
10917
10918 // Instantiate new minio client object
10919 c, err := minio.New(os.Getenv(serverEndpoint),
10920 &minio.Options{
10921 Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
10922 Secure: mustParseBool(os.Getenv(enableHTTPS)),
10923 })
10924 if err != nil {
10925 logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err)
10926 return
10927 }
10928
10929 // Generate a new random bucket name.
10930 bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
10931 // Make a new bucket in 'us-east-1' (source bucket).
10932 err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"})
10933 if err != nil {
10934 logError(testName, function, args, startTime, "", "MakeBucket failed", err)
10935 return
10936 }
10937
10938 defer cleanupBucket(bucketName, c)
10939
10940 fetchMeta := func(object string) (h http.Header) {
10941 objInfo, err := c.StatObject(context.Background(), bucketName, object, minio.StatObjectOptions{})
10942 if err != nil {
10943 logError(testName, function, args, startTime, "", "Stat failed", err)
10944 return
10945 }
10946 h = make(http.Header)
10947 for k, vs := range objInfo.Metadata {
10948 if strings.HasPrefix(strings.ToLower(k), "x-amz-storage-class") {
10949 for _, v := range vs {
10950 h.Add(k, v)
10951 }
10952 }
10953 }
10954 return h
10955 }
10956
10957 metadata := make(http.Header)
10958 metadata.Set("x-amz-storage-class", "REDUCED_REDUNDANCY")
10959
10960 emptyMetadata := make(http.Header)
10961
10962 const srcSize = 1024 * 1024
10963 buf := bytes.Repeat([]byte("abcde"), srcSize) // gives a buffer of 1MiB
10964
10965 _, err = c.PutObject(context.Background(), bucketName, "srcObjectRRSClass",
10966 bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{StorageClass: "REDUCED_REDUNDANCY"})
10967 if err != nil {
10968 logError(testName, function, args, startTime, "", "PutObject failed", err)
10969 return
10970 }
10971
10972 // Get the returned metadata
10973 returnedMeta := fetchMeta("srcObjectRRSClass")
10974
10975 // The response metada should either be equal to metadata (with REDUCED_REDUNDANCY) or emptyMetadata (in case of gateways)
10976 if !reflect.DeepEqual(metadata, returnedMeta) && !reflect.DeepEqual(emptyMetadata, returnedMeta) {
10977 logError(testName, function, args, startTime, "", "Metadata match failed", err)
10978 return
10979 }
10980
10981 metadata = make(http.Header)
10982 metadata.Set("x-amz-storage-class", "STANDARD")
10983
10984 _, err = c.PutObject(context.Background(), bucketName, "srcObjectSSClass",
10985 bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{StorageClass: "STANDARD"})
10986 if err != nil {
10987 logError(testName, function, args, startTime, "", "PutObject failed", err)
10988 return
10989 }
10990 if reflect.DeepEqual(metadata, fetchMeta("srcObjectSSClass")) {
10991 logError(testName, function, args, startTime, "", "Metadata verification failed, STANDARD storage class should not be a part of response metadata", err)
10992 return
10993 }
10994
10995 successLogger(testName, function, args, startTime).Info()
10996}
10997
10998func testStorageClassInvalidMetadataPutObject() {
10999 // initialize logging params
11000 startTime := time.Now()
11001 function := "testStorageClassInvalidMetadataPutObject()"
11002 args := map[string]interface{}{}
11003 testName := getFuncName()
11004
11005 // Instantiate new minio client object
11006 c, err := minio.New(os.Getenv(serverEndpoint),
11007 &minio.Options{
11008 Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
11009 Secure: mustParseBool(os.Getenv(enableHTTPS)),
11010 })
11011 if err != nil {
11012 logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err)
11013 return
11014 }
11015
11016 // Generate a new random bucket name.
11017 bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
11018 // Make a new bucket in 'us-east-1' (source bucket).
11019 err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"})
11020 if err != nil {
11021 logError(testName, function, args, startTime, "", "MakeBucket failed", err)
11022 return
11023 }
11024
11025 defer cleanupBucket(bucketName, c)
11026
11027 const srcSize = 1024 * 1024
11028 buf := bytes.Repeat([]byte("abcde"), srcSize) // gives a buffer of 1MiB
11029
11030 _, err = c.PutObject(context.Background(), bucketName, "srcObjectRRSClass",
11031 bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{StorageClass: "INVALID_STORAGE_CLASS"})
11032 if err == nil {
11033 logError(testName, function, args, startTime, "", "PutObject with invalid storage class passed, was expected to fail", err)
11034 return
11035 }
11036
11037 successLogger(testName, function, args, startTime).Info()
11038}
11039
11040func testStorageClassMetadataCopyObject() {
11041 // initialize logging params
11042 startTime := time.Now()
11043 function := "testStorageClassMetadataCopyObject()"
11044 args := map[string]interface{}{}
11045 testName := getFuncName()
11046
11047 // Instantiate new minio client object
11048 c, err := minio.New(os.Getenv(serverEndpoint),
11049 &minio.Options{
11050 Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
11051 Secure: mustParseBool(os.Getenv(enableHTTPS)),
11052 })
11053 if err != nil {
11054 logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err)
11055 return
11056 }
11057
11058 // Generate a new random bucket name.
11059 bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
11060 // Make a new bucket in 'us-east-1' (source bucket).
11061 err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"})
11062 if err != nil {
11063 logError(testName, function, args, startTime, "", "MakeBucket failed", err)
11064 return
11065 }
11066
11067 defer cleanupBucket(bucketName, c)
11068
11069 fetchMeta := func(object string) (h http.Header) {
11070 objInfo, err := c.StatObject(context.Background(), bucketName, object, minio.StatObjectOptions{})
11071 args["bucket"] = bucketName
11072 args["object"] = object
11073 if err != nil {
11074 logError(testName, function, args, startTime, "", "Stat failed", err)
11075 return
11076 }
11077 h = make(http.Header)
11078 for k, vs := range objInfo.Metadata {
11079 if strings.HasPrefix(strings.ToLower(k), "x-amz-storage-class") {
11080 for _, v := range vs {
11081 h.Add(k, v)
11082 }
11083 }
11084 }
11085 return h
11086 }
11087
11088 metadata := make(http.Header)
11089 metadata.Set("x-amz-storage-class", "REDUCED_REDUNDANCY")
11090
11091 emptyMetadata := make(http.Header)
11092
11093 const srcSize = 1024 * 1024
11094 buf := bytes.Repeat([]byte("abcde"), srcSize)
11095
11096 // Put an object with RRS Storage class
11097 _, err = c.PutObject(context.Background(), bucketName, "srcObjectRRSClass",
11098 bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{StorageClass: "REDUCED_REDUNDANCY"})
11099 if err != nil {
11100 logError(testName, function, args, startTime, "", "PutObject failed", err)
11101 return
11102 }
11103
11104 // Make server side copy of object uploaded in previous step
11105 src := minio.CopySrcOptions{
11106 Bucket: bucketName,
11107 Object: "srcObjectRRSClass",
11108 }
11109 dst := minio.CopyDestOptions{
11110 Bucket: bucketName,
11111 Object: "srcObjectRRSClassCopy",
11112 }
11113 if _, err = c.CopyObject(context.Background(), dst, src); err != nil {
11114 logError(testName, function, args, startTime, "", "CopyObject failed on RRS", err)
11115 return
11116 }
11117
11118 // Get the returned metadata
11119 returnedMeta := fetchMeta("srcObjectRRSClassCopy")
11120
11121 // The response metada should either be equal to metadata (with REDUCED_REDUNDANCY) or emptyMetadata (in case of gateways)
11122 if !reflect.DeepEqual(metadata, returnedMeta) && !reflect.DeepEqual(emptyMetadata, returnedMeta) {
11123 logError(testName, function, args, startTime, "", "Metadata match failed", err)
11124 return
11125 }
11126
11127 metadata = make(http.Header)
11128 metadata.Set("x-amz-storage-class", "STANDARD")
11129
11130 // Put an object with Standard Storage class
11131 _, err = c.PutObject(context.Background(), bucketName, "srcObjectSSClass",
11132 bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{StorageClass: "STANDARD"})
11133 if err != nil {
11134 logError(testName, function, args, startTime, "", "PutObject failed", err)
11135 return
11136 }
11137
11138 // Make server side copy of object uploaded in previous step
11139 src = minio.CopySrcOptions{
11140 Bucket: bucketName,
11141 Object: "srcObjectSSClass",
11142 }
11143 dst = minio.CopyDestOptions{
11144 Bucket: bucketName,
11145 Object: "srcObjectSSClassCopy",
11146 }
11147 if _, err = c.CopyObject(context.Background(), dst, src); err != nil {
11148 logError(testName, function, args, startTime, "", "CopyObject failed on SS", err)
11149 return
11150 }
11151 // Fetch the meta data of copied object
11152 if reflect.DeepEqual(metadata, fetchMeta("srcObjectSSClassCopy")) {
11153 logError(testName, function, args, startTime, "", "Metadata verification failed, STANDARD storage class should not be a part of response metadata", err)
11154 return
11155 }
11156
11157 successLogger(testName, function, args, startTime).Info()
11158}
11159
11160// Test put object with size -1 byte object.
11161func testPutObjectNoLengthV2() {
11162 // initialize logging params
11163 startTime := time.Now()
11164 testName := getFuncName()
11165 function := "PutObject(bucketName, objectName, reader, size, opts)"
11166 args := map[string]interface{}{
11167 "bucketName": "",
11168 "objectName": "",
11169 "size": -1,
11170 "opts": "",
11171 }
11172
11173 // Seed random based on current time.
11174 rand.Seed(time.Now().Unix())
11175
11176 // Instantiate new minio client object.
11177 c, err := minio.New(os.Getenv(serverEndpoint),
11178 &minio.Options{
11179 Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""),
11180 Secure: mustParseBool(os.Getenv(enableHTTPS)),
11181 })
11182 if err != nil {
11183 logError(testName, function, args, startTime, "", "MinIO client v2 object creation failed", err)
11184 return
11185 }
11186
11187 // Enable tracing, write to stderr.
11188 // c.TraceOn(os.Stderr)
11189
11190 // Set user agent.
11191 c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
11192
11193 // Generate a new random bucket name.
11194 bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
11195 args["bucketName"] = bucketName
11196
11197 // Make a new bucket.
11198 err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"})
11199 if err != nil {
11200 logError(testName, function, args, startTime, "", "MakeBucket failed", err)
11201 return
11202 }
11203
11204 defer cleanupBucket(bucketName, c)
11205
11206 objectName := bucketName + "unique"
11207 args["objectName"] = objectName
11208
11209 bufSize := dataFileMap["datafile-129-MB"]
11210 reader := getDataReader("datafile-129-MB")
11211 defer reader.Close()
11212 args["size"] = bufSize
11213
11214 // Upload an object.
11215 _, err = c.PutObject(context.Background(), bucketName, objectName, reader, -1, minio.PutObjectOptions{})
11216 if err != nil {
11217 logError(testName, function, args, startTime, "", "PutObjectWithSize failed", err)
11218 return
11219 }
11220
11221 st, err := c.StatObject(context.Background(), bucketName, objectName, minio.StatObjectOptions{})
11222 if err != nil {
11223 logError(testName, function, args, startTime, "", "StatObject failed", err)
11224 return
11225 }
11226
11227 if st.Size != int64(bufSize) {
11228 logError(testName, function, args, startTime, "", "Expected upload object size "+string(bufSize)+" got "+string(st.Size), err)
11229 return
11230 }
11231
11232 successLogger(testName, function, args, startTime).Info()
11233}
11234
11235// Test put objects of unknown size.
11236func testPutObjectsUnknownV2() {
11237 // initialize logging params
11238 startTime := time.Now()
11239 testName := getFuncName()
11240 function := "PutObject(bucketName, objectName, reader,size,opts)"
11241 args := map[string]interface{}{
11242 "bucketName": "",
11243 "objectName": "",
11244 "size": "",
11245 "opts": "",
11246 }
11247
11248 // Seed random based on current time.
11249 rand.Seed(time.Now().Unix())
11250
11251 // Instantiate new minio client object.
11252 c, err := minio.New(os.Getenv(serverEndpoint),
11253 &minio.Options{
11254 Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""),
11255 Secure: mustParseBool(os.Getenv(enableHTTPS)),
11256 })
11257 if err != nil {
11258 logError(testName, function, args, startTime, "", "MinIO client v2 object creation failed", err)
11259 return
11260 }
11261
11262 // Enable tracing, write to stderr.
11263 // c.TraceOn(os.Stderr)
11264
11265 // Set user agent.
11266 c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
11267
11268 // Generate a new random bucket name.
11269 bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
11270 args["bucketName"] = bucketName
11271
11272 // Make a new bucket.
11273 err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"})
11274 if err != nil {
11275 logError(testName, function, args, startTime, "", "MakeBucket failed", err)
11276 return
11277 }
11278
11279 defer cleanupBucket(bucketName, c)
11280
11281 // Issues are revealed by trying to upload multiple files of unknown size
11282 // sequentially (on 4GB machines)
11283 for i := 1; i <= 4; i++ {
11284 // Simulate that we could be receiving byte slices of data that we want
11285 // to upload as a file
11286 rpipe, wpipe := io.Pipe()
11287 defer rpipe.Close()
11288 go func() {
11289 b := []byte("test")
11290 wpipe.Write(b)
11291 wpipe.Close()
11292 }()
11293
11294 // Upload the object.
11295 objectName := fmt.Sprintf("%sunique%d", bucketName, i)
11296 args["objectName"] = objectName
11297
11298 ui, err := c.PutObject(context.Background(), bucketName, objectName, rpipe, -1, minio.PutObjectOptions{})
11299 if err != nil {
11300 logError(testName, function, args, startTime, "", "PutObjectStreaming failed", err)
11301 return
11302 }
11303
11304 if ui.Size != 4 {
11305 logError(testName, function, args, startTime, "", "Expected upload object size "+string(4)+" got "+string(ui.Size), nil)
11306 return
11307 }
11308
11309 st, err := c.StatObject(context.Background(), bucketName, objectName, minio.StatObjectOptions{})
11310 if err != nil {
11311 logError(testName, function, args, startTime, "", "StatObjectStreaming failed", err)
11312 return
11313 }
11314
11315 if st.Size != int64(4) {
11316 logError(testName, function, args, startTime, "", "Expected upload object size "+string(4)+" got "+string(st.Size), err)
11317 return
11318 }
11319
11320 }
11321
11322 successLogger(testName, function, args, startTime).Info()
11323}
11324
11325// Test put object with 0 byte object.
11326func testPutObject0ByteV2() {
11327 // initialize logging params
11328 startTime := time.Now()
11329 testName := getFuncName()
11330 function := "PutObject(bucketName, objectName, reader, size, opts)"
11331 args := map[string]interface{}{
11332 "bucketName": "",
11333 "objectName": "",
11334 "size": 0,
11335 "opts": "",
11336 }
11337
11338 // Seed random based on current time.
11339 rand.Seed(time.Now().Unix())
11340
11341 // Instantiate new minio client object.
11342 c, err := minio.New(os.Getenv(serverEndpoint),
11343 &minio.Options{
11344 Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""),
11345 Secure: mustParseBool(os.Getenv(enableHTTPS)),
11346 })
11347 if err != nil {
11348 logError(testName, function, args, startTime, "", "MinIO client v2 object creation failed", err)
11349 return
11350 }
11351
11352 // Enable tracing, write to stderr.
11353 // c.TraceOn(os.Stderr)
11354
11355 // Set user agent.
11356 c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
11357
11358 // Generate a new random bucket name.
11359 bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
11360 args["bucketName"] = bucketName
11361
11362 // Make a new bucket.
11363 err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"})
11364 if err != nil {
11365 logError(testName, function, args, startTime, "", "MakeBucket failed", err)
11366 return
11367 }
11368
11369 defer cleanupBucket(bucketName, c)
11370
11371 objectName := bucketName + "unique"
11372 args["objectName"] = objectName
11373 args["opts"] = minio.PutObjectOptions{}
11374
11375 // Upload an object.
11376 _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader([]byte("")), 0, minio.PutObjectOptions{})
11377 if err != nil {
11378 logError(testName, function, args, startTime, "", "PutObjectWithSize failed", err)
11379 return
11380 }
11381 st, err := c.StatObject(context.Background(), bucketName, objectName, minio.StatObjectOptions{})
11382 if err != nil {
11383 logError(testName, function, args, startTime, "", "StatObjectWithSize failed", err)
11384 return
11385 }
11386 if st.Size != 0 {
11387 logError(testName, function, args, startTime, "", "Expected upload object size 0 but got "+string(st.Size), err)
11388 return
11389 }
11390
11391 successLogger(testName, function, args, startTime).Info()
11392}
11393
11394// Test expected error cases
11395func testComposeObjectErrorCases() {
11396 // initialize logging params
11397 startTime := time.Now()
11398 testName := getFuncName()
11399 function := "ComposeObject(destination, sourceList)"
11400 args := map[string]interface{}{}
11401
11402 // Instantiate new minio client object
11403 c, err := minio.New(os.Getenv(serverEndpoint),
11404 &minio.Options{
11405 Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
11406 Secure: mustParseBool(os.Getenv(enableHTTPS)),
11407 })
11408 if err != nil {
11409 logError(testName, function, args, startTime, "", "MinIO client object creation failed", err)
11410 return
11411 }
11412
11413 testComposeObjectErrorCasesWrapper(c)
11414}
11415
11416// Test concatenating multiple 10K objects V4
11417func testCompose10KSources() {
11418 // initialize logging params
11419 startTime := time.Now()
11420 testName := getFuncName()
11421 function := "ComposeObject(destination, sourceList)"
11422 args := map[string]interface{}{}
11423
11424 // Instantiate new minio client object
11425 c, err := minio.New(os.Getenv(serverEndpoint),
11426 &minio.Options{
11427 Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
11428 Secure: mustParseBool(os.Getenv(enableHTTPS)),
11429 })
11430 if err != nil {
11431 logError(testName, function, args, startTime, "", "MinIO client object creation failed", err)
11432 return
11433 }
11434
11435 testComposeMultipleSources(c)
11436}
11437
11438// Tests comprehensive list of all methods.
11439func testFunctionalV2() {
11440 // initialize logging params
11441 startTime := time.Now()
11442 testName := getFuncName()
11443 function := "testFunctionalV2()"
11444 functionAll := ""
11445 args := map[string]interface{}{}
11446
11447 // Seed random based on current time.
11448 rand.Seed(time.Now().Unix())
11449
11450 c, err := minio.New(os.Getenv(serverEndpoint),
11451 &minio.Options{
11452 Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""),
11453 Secure: mustParseBool(os.Getenv(enableHTTPS)),
11454 })
11455 if err != nil {
11456 logError(testName, function, args, startTime, "", "MinIO client v2 object creation failed", err)
11457 return
11458 }
11459
11460 // Enable to debug
11461 // c.TraceOn(os.Stderr)
11462
11463 // Set user agent.
11464 c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
11465
11466 // Generate a new random bucket name.
11467 bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
11468 location := "us-east-1"
11469 // Make a new bucket.
11470 function = "MakeBucket(bucketName, location)"
11471 functionAll = "MakeBucket(bucketName, location)"
11472 args = map[string]interface{}{
11473 "bucketName": bucketName,
11474 "location": location,
11475 }
11476 err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: location})
11477 if err != nil {
11478 logError(testName, function, args, startTime, "", "MakeBucket failed", err)
11479 return
11480 }
11481
11482 defer cleanupBucket(bucketName, c)
11483
11484 // Generate a random file name.
11485 fileName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
11486 file, err := os.Create(fileName)
11487 if err != nil {
11488 logError(testName, function, args, startTime, "", "file create failed", err)
11489 return
11490 }
11491 for i := 0; i < 3; i++ {
11492 buf := make([]byte, rand.Intn(1<<19))
11493 _, err = file.Write(buf)
11494 if err != nil {
11495 logError(testName, function, args, startTime, "", "file write failed", err)
11496 return
11497 }
11498 }
11499 file.Close()
11500
11501 // Verify if bucket exits and you have access.
11502 var exists bool
11503 function = "BucketExists(bucketName)"
11504 functionAll += ", " + function
11505 args = map[string]interface{}{
11506 "bucketName": bucketName,
11507 }
11508 exists, err = c.BucketExists(context.Background(), bucketName)
11509 if err != nil {
11510 logError(testName, function, args, startTime, "", "BucketExists failed", err)
11511 return
11512 }
11513 if !exists {
11514 logError(testName, function, args, startTime, "", "Could not find existing bucket "+bucketName, err)
11515 return
11516 }
11517
11518 // Make the bucket 'public read/write'.
11519 function = "SetBucketPolicy(bucketName, bucketPolicy)"
11520 functionAll += ", " + function
11521
11522 readWritePolicy := `{"Version": "2012-10-17","Statement": [{"Action": ["s3:ListBucketMultipartUploads", "s3:ListBucket"],"Effect": "Allow","Principal": {"AWS": ["*"]},"Resource": ["arn:aws:s3:::` + bucketName + `"],"Sid": ""}]}`
11523
11524 args = map[string]interface{}{
11525 "bucketName": bucketName,
11526 "bucketPolicy": readWritePolicy,
11527 }
11528 err = c.SetBucketPolicy(context.Background(), bucketName, readWritePolicy)
11529
11530 if err != nil {
11531 logError(testName, function, args, startTime, "", "SetBucketPolicy failed", err)
11532 return
11533 }
11534
11535 // List all buckets.
11536 function = "ListBuckets()"
11537 functionAll += ", " + function
11538 args = nil
11539 buckets, err := c.ListBuckets(context.Background())
11540 if len(buckets) == 0 {
11541 logError(testName, function, args, startTime, "", "List buckets cannot be empty", err)
11542 return
11543 }
11544 if err != nil {
11545 logError(testName, function, args, startTime, "", "ListBuckets failed", err)
11546 return
11547 }
11548
11549 // Verify if previously created bucket is listed in list buckets.
11550 bucketFound := false
11551 for _, bucket := range buckets {
11552 if bucket.Name == bucketName {
11553 bucketFound = true
11554 }
11555 }
11556
11557 // If bucket not found error out.
11558 if !bucketFound {
11559 logError(testName, function, args, startTime, "", "Bucket "+bucketName+"not found", err)
11560 return
11561 }
11562
11563 objectName := bucketName + "unique"
11564
11565 // Generate data
11566 buf := bytes.Repeat([]byte("n"), rand.Intn(1<<19))
11567
11568 args = map[string]interface{}{
11569 "bucketName": bucketName,
11570 "objectName": objectName,
11571 "contentType": "",
11572 }
11573 _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{})
11574 if err != nil {
11575 logError(testName, function, args, startTime, "", "PutObject failed", err)
11576 return
11577 }
11578
11579 st, err := c.StatObject(context.Background(), bucketName, objectName, minio.StatObjectOptions{})
11580 if err != nil {
11581 logError(testName, function, args, startTime, "", "StatObject failed", err)
11582 return
11583 }
11584 if st.Size != int64(len(buf)) {
11585 logError(testName, function, args, startTime, "", "Expected uploaded object length "+string(len(buf))+" got "+string(st.Size), err)
11586 return
11587 }
11588
11589 objectNameNoLength := objectName + "-nolength"
11590 args["objectName"] = objectNameNoLength
11591 _, err = c.PutObject(context.Background(), bucketName, objectNameNoLength, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ContentType: "binary/octet-stream"})
11592 if err != nil {
11593 logError(testName, function, args, startTime, "", "PutObject failed", err)
11594 return
11595 }
11596 st, err = c.StatObject(context.Background(), bucketName, objectNameNoLength, minio.StatObjectOptions{})
11597 if err != nil {
11598 logError(testName, function, args, startTime, "", "StatObject failed", err)
11599 return
11600 }
11601 if st.Size != int64(len(buf)) {
11602 logError(testName, function, args, startTime, "", "Expected uploaded object length "+string(len(buf))+" got "+string(st.Size), err)
11603 return
11604 }
11605
11606 // Instantiate a done channel to close all listing.
11607 doneCh := make(chan struct{})
11608 defer close(doneCh)
11609
11610 objFound := false
11611 isRecursive := true // Recursive is true.
11612 function = "ListObjects(bucketName, objectName, isRecursive, doneCh)"
11613 functionAll += ", " + function
11614 args = map[string]interface{}{
11615 "bucketName": bucketName,
11616 "objectName": objectName,
11617 "isRecursive": isRecursive,
11618 }
11619 for obj := range c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{UseV1: true, Prefix: objectName, Recursive: isRecursive}) {
11620 if obj.Key == objectName {
11621 objFound = true
11622 break
11623 }
11624 }
11625 if !objFound {
11626 logError(testName, function, args, startTime, "", "Could not find existing object "+objectName, err)
11627 return
11628 }
11629
11630 incompObjNotFound := true
11631 function = "ListIncompleteUploads(bucketName, objectName, isRecursive, doneCh)"
11632 functionAll += ", " + function
11633 args = map[string]interface{}{
11634 "bucketName": bucketName,
11635 "objectName": objectName,
11636 "isRecursive": isRecursive,
11637 }
11638 for objIncompl := range c.ListIncompleteUploads(context.Background(), bucketName, objectName, isRecursive) {
11639 if objIncompl.Key != "" {
11640 incompObjNotFound = false
11641 break
11642 }
11643 }
11644 if !incompObjNotFound {
11645 logError(testName, function, args, startTime, "", "Unexpected dangling incomplete upload found", err)
11646 return
11647 }
11648
11649 function = "GetObject(bucketName, objectName)"
11650 functionAll += ", " + function
11651 args = map[string]interface{}{
11652 "bucketName": bucketName,
11653 "objectName": objectName,
11654 }
11655 newReader, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{})
11656 if err != nil {
11657 logError(testName, function, args, startTime, "", "GetObject failed", err)
11658 return
11659 }
11660
11661 newReadBytes, err := io.ReadAll(newReader)
11662 if err != nil {
11663 logError(testName, function, args, startTime, "", "ReadAll failed", err)
11664 return
11665 }
11666 newReader.Close()
11667
11668 if !bytes.Equal(newReadBytes, buf) {
11669 logError(testName, function, args, startTime, "", "Bytes mismatch", err)
11670 return
11671 }
11672
11673 function = "FGetObject(bucketName, objectName, fileName)"
11674 functionAll += ", " + function
11675 args = map[string]interface{}{
11676 "bucketName": bucketName,
11677 "objectName": objectName,
11678 "fileName": fileName + "-f",
11679 }
11680 err = c.FGetObject(context.Background(), bucketName, objectName, fileName+"-f", minio.GetObjectOptions{})
11681 if err != nil {
11682 logError(testName, function, args, startTime, "", "FgetObject failed", err)
11683 return
11684 }
11685
11686 // Generate presigned HEAD object url.
11687 function = "PresignedHeadObject(bucketName, objectName, expires, reqParams)"
11688 functionAll += ", " + function
11689 args = map[string]interface{}{
11690 "bucketName": bucketName,
11691 "objectName": objectName,
11692 "expires": 3600 * time.Second,
11693 }
11694 presignedHeadURL, err := c.PresignedHeadObject(context.Background(), bucketName, objectName, 3600*time.Second, nil)
11695 if err != nil {
11696 logError(testName, function, args, startTime, "", "PresignedHeadObject failed", err)
11697 return
11698 }
11699
11700 transport, err := minio.DefaultTransport(mustParseBool(os.Getenv(enableHTTPS)))
11701 if err != nil {
11702 logError(testName, function, args, startTime, "", "DefaultTransport failed", err)
11703 return
11704 }
11705
11706 httpClient := &http.Client{
11707 // Setting a sensible time out of 30secs to wait for response
11708 // headers. Request is pro-actively canceled after 30secs
11709 // with no response.
11710 Timeout: 30 * time.Second,
11711 Transport: transport,
11712 }
11713
11714 req, err := http.NewRequest(http.MethodHead, presignedHeadURL.String(), nil)
11715 if err != nil {
11716 logError(testName, function, args, startTime, "", "PresignedHeadObject URL head request failed", err)
11717 return
11718 }
11719
11720 // Verify if presigned url works.
11721 resp, err := httpClient.Do(req)
11722 if err != nil {
11723 logError(testName, function, args, startTime, "", "PresignedHeadObject URL head request failed", err)
11724 return
11725 }
11726 if resp.StatusCode != http.StatusOK {
11727 logError(testName, function, args, startTime, "", "PresignedHeadObject URL returns status "+string(resp.StatusCode), err)
11728 return
11729 }
11730 if resp.Header.Get("ETag") == "" {
11731 logError(testName, function, args, startTime, "", "Got empty ETag", err)
11732 return
11733 }
11734 resp.Body.Close()
11735
11736 // Generate presigned GET object url.
11737 function = "PresignedGetObject(bucketName, objectName, expires, reqParams)"
11738 functionAll += ", " + function
11739 args = map[string]interface{}{
11740 "bucketName": bucketName,
11741 "objectName": objectName,
11742 "expires": 3600 * time.Second,
11743 }
11744 presignedGetURL, err := c.PresignedGetObject(context.Background(), bucketName, objectName, 3600*time.Second, nil)
11745 if err != nil {
11746 logError(testName, function, args, startTime, "", "PresignedGetObject failed", err)
11747 return
11748 }
11749
11750 // Verify if presigned url works.
11751 req, err = http.NewRequest(http.MethodGet, presignedGetURL.String(), nil)
11752 if err != nil {
11753 logError(testName, function, args, startTime, "", "PresignedGetObject request incorrect", err)
11754 return
11755 }
11756
11757 resp, err = httpClient.Do(req)
11758 if err != nil {
11759 logError(testName, function, args, startTime, "", "PresignedGetObject response incorrect", err)
11760 return
11761 }
11762
11763 if resp.StatusCode != http.StatusOK {
11764 logError(testName, function, args, startTime, "", "PresignedGetObject URL returns status "+string(resp.StatusCode), err)
11765 return
11766 }
11767 newPresignedBytes, err := io.ReadAll(resp.Body)
11768 if err != nil {
11769 logError(testName, function, args, startTime, "", "ReadAll failed", err)
11770 return
11771 }
11772 resp.Body.Close()
11773 if !bytes.Equal(newPresignedBytes, buf) {
11774 logError(testName, function, args, startTime, "", "Bytes mismatch", err)
11775 return
11776 }
11777
11778 // Set request parameters.
11779 reqParams := make(url.Values)
11780 reqParams.Set("response-content-disposition", "attachment; filename=\"test.txt\"")
11781 // Generate presigned GET object url.
11782 args["reqParams"] = reqParams
11783 presignedGetURL, err = c.PresignedGetObject(context.Background(), bucketName, objectName, 3600*time.Second, reqParams)
11784 if err != nil {
11785 logError(testName, function, args, startTime, "", "PresignedGetObject failed", err)
11786 return
11787 }
11788
11789 // Verify if presigned url works.
11790 req, err = http.NewRequest(http.MethodGet, presignedGetURL.String(), nil)
11791 if err != nil {
11792 logError(testName, function, args, startTime, "", "PresignedGetObject request incorrect", err)
11793 return
11794 }
11795
11796 resp, err = httpClient.Do(req)
11797 if err != nil {
11798 logError(testName, function, args, startTime, "", "PresignedGetObject response incorrect", err)
11799 return
11800 }
11801
11802 if resp.StatusCode != http.StatusOK {
11803 logError(testName, function, args, startTime, "", "PresignedGetObject URL returns status "+string(resp.StatusCode), err)
11804 return
11805 }
11806 newPresignedBytes, err = io.ReadAll(resp.Body)
11807 if err != nil {
11808 logError(testName, function, args, startTime, "", "ReadAll failed", err)
11809 return
11810 }
11811 if !bytes.Equal(newPresignedBytes, buf) {
11812 logError(testName, function, args, startTime, "", "Bytes mismatch", err)
11813 return
11814 }
11815 // Verify content disposition.
11816 if resp.Header.Get("Content-Disposition") != "attachment; filename=\"test.txt\"" {
11817 logError(testName, function, args, startTime, "", "wrong Content-Disposition received ", err)
11818 return
11819 }
11820
11821 function = "PresignedPutObject(bucketName, objectName, expires)"
11822 functionAll += ", " + function
11823 args = map[string]interface{}{
11824 "bucketName": bucketName,
11825 "objectName": objectName + "-presigned",
11826 "expires": 3600 * time.Second,
11827 }
11828 presignedPutURL, err := c.PresignedPutObject(context.Background(), bucketName, objectName+"-presigned", 3600*time.Second)
11829 if err != nil {
11830 logError(testName, function, args, startTime, "", "PresignedPutObject failed", err)
11831 return
11832 }
11833
11834 // Generate data more than 32K
11835 buf = bytes.Repeat([]byte("1"), rand.Intn(1<<10)+32*1024)
11836
11837 req, err = http.NewRequest(http.MethodPut, presignedPutURL.String(), bytes.NewReader(buf))
11838 if err != nil {
11839 logError(testName, function, args, startTime, "", "HTTP request to PresignedPutObject URL failed", err)
11840 return
11841 }
11842
11843 resp, err = httpClient.Do(req)
11844 if err != nil {
11845 logError(testName, function, args, startTime, "", "HTTP request to PresignedPutObject URL failed", err)
11846 return
11847 }
11848
11849 // Download the uploaded object to verify
11850 args = map[string]interface{}{
11851 "bucketName": bucketName,
11852 "objectName": objectName + "-presigned",
11853 }
11854 newReader, err = c.GetObject(context.Background(), bucketName, objectName+"-presigned", minio.GetObjectOptions{})
11855 if err != nil {
11856 logError(testName, function, args, startTime, "", "GetObject of uploaded presigned object failed", err)
11857 return
11858 }
11859
11860 newReadBytes, err = io.ReadAll(newReader)
11861 if err != nil {
11862 logError(testName, function, args, startTime, "", "ReadAll failed during get on presigned put object", err)
11863 return
11864 }
11865 newReader.Close()
11866
11867 if !bytes.Equal(newReadBytes, buf) {
11868 logError(testName, function, args, startTime, "", "Bytes mismatch on presigned object upload verification", err)
11869 return
11870 }
11871
11872 function = "PresignHeader(method, bucketName, objectName, expires, reqParams, extraHeaders)"
11873 functionAll += ", " + function
11874 presignExtraHeaders := map[string][]string{
11875 "mysecret": {"abcxxx"},
11876 }
11877 args = map[string]interface{}{
11878 "method": "PUT",
11879 "bucketName": bucketName,
11880 "objectName": objectName + "-presign-custom",
11881 "expires": 3600 * time.Second,
11882 "extraHeaders": presignExtraHeaders,
11883 }
11884 _, err = c.PresignHeader(context.Background(), "PUT", bucketName, objectName+"-presign-custom", 3600*time.Second, nil, presignExtraHeaders)
11885 if err == nil {
11886 logError(testName, function, args, startTime, "", "Presigned with extra headers succeeded", err)
11887 return
11888 }
11889
11890 os.Remove(fileName)
11891 os.Remove(fileName + "-f")
11892 successLogger(testName, functionAll, args, startTime).Info()
11893}
11894
11895// Test get object with GetObject with context
11896func testGetObjectContext() {
11897 // initialize logging params
11898 startTime := time.Now()
11899 testName := getFuncName()
11900 function := "GetObject(ctx, bucketName, objectName)"
11901 args := map[string]interface{}{
11902 "ctx": "",
11903 "bucketName": "",
11904 "objectName": "",
11905 }
11906 // Seed random based on current time.
11907 rand.Seed(time.Now().Unix())
11908
11909 // Instantiate new minio client object.
11910 c, err := minio.New(os.Getenv(serverEndpoint),
11911 &minio.Options{
11912 Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
11913 Secure: mustParseBool(os.Getenv(enableHTTPS)),
11914 })
11915 if err != nil {
11916 logError(testName, function, args, startTime, "", "MinIO client v4 object creation failed", err)
11917 return
11918 }
11919
11920 // Enable tracing, write to stderr.
11921 // c.TraceOn(os.Stderr)
11922
11923 // Set user agent.
11924 c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
11925
11926 // Generate a new random bucket name.
11927 bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
11928 args["bucketName"] = bucketName
11929
11930 // Make a new bucket.
11931 err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"})
11932 if err != nil {
11933 logError(testName, function, args, startTime, "", "MakeBucket failed", err)
11934 return
11935 }
11936
11937 defer cleanupBucket(bucketName, c)
11938
11939 bufSize := dataFileMap["datafile-33-kB"]
11940 reader := getDataReader("datafile-33-kB")
11941 defer reader.Close()
11942 // Save the data
11943 objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
11944 args["objectName"] = objectName
11945
11946 _, err = c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"})
11947 if err != nil {
11948 logError(testName, function, args, startTime, "", "PutObject failed", err)
11949 return
11950 }
11951
11952 ctx, cancel := context.WithTimeout(context.Background(), 1*time.Nanosecond)
11953 args["ctx"] = ctx
11954 cancel()
11955
11956 r, err := c.GetObject(ctx, bucketName, objectName, minio.GetObjectOptions{})
11957 if err != nil {
11958 logError(testName, function, args, startTime, "", "GetObject failed unexpectedly", err)
11959 return
11960 }
11961
11962 if _, err = r.Stat(); err == nil {
11963 logError(testName, function, args, startTime, "", "GetObject should fail on short timeout", err)
11964 return
11965 }
11966 r.Close()
11967
11968 ctx, cancel = context.WithTimeout(context.Background(), 1*time.Hour)
11969 args["ctx"] = ctx
11970 defer cancel()
11971
11972 // Read the data back
11973 r, err = c.GetObject(ctx, bucketName, objectName, minio.GetObjectOptions{})
11974 if err != nil {
11975 logError(testName, function, args, startTime, "", "GetObject failed", err)
11976 return
11977 }
11978
11979 st, err := r.Stat()
11980 if err != nil {
11981 logError(testName, function, args, startTime, "", "object Stat call failed", err)
11982 return
11983 }
11984 if st.Size != int64(bufSize) {
11985 logError(testName, function, args, startTime, "", "Number of bytes in stat does not match: want "+string(bufSize)+", got"+string(st.Size), err)
11986 return
11987 }
11988 if err := r.Close(); err != nil {
11989 logError(testName, function, args, startTime, "", "object Close() call failed", err)
11990 return
11991 }
11992
11993 successLogger(testName, function, args, startTime).Info()
11994}
11995
11996// Test get object with FGetObject with a user provided context
11997func testFGetObjectContext() {
11998 // initialize logging params
11999 startTime := time.Now()
12000 testName := getFuncName()
12001 function := "FGetObject(ctx, bucketName, objectName, fileName)"
12002 args := map[string]interface{}{
12003 "ctx": "",
12004 "bucketName": "",
12005 "objectName": "",
12006 "fileName": "",
12007 }
12008 // Seed random based on current time.
12009 rand.Seed(time.Now().Unix())
12010
12011 // Instantiate new minio client object.
12012 c, err := minio.New(os.Getenv(serverEndpoint),
12013 &minio.Options{
12014 Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
12015 Secure: mustParseBool(os.Getenv(enableHTTPS)),
12016 })
12017 if err != nil {
12018 logError(testName, function, args, startTime, "", "MinIO client v4 object creation failed", err)
12019 return
12020 }
12021
12022 // Enable tracing, write to stderr.
12023 // c.TraceOn(os.Stderr)
12024
12025 // Set user agent.
12026 c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
12027
12028 // Generate a new random bucket name.
12029 bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
12030 args["bucketName"] = bucketName
12031
12032 // Make a new bucket.
12033 err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"})
12034 if err != nil {
12035 logError(testName, function, args, startTime, "", "MakeBucket failed", err)
12036 return
12037 }
12038
12039 defer cleanupBucket(bucketName, c)
12040
12041 bufSize := dataFileMap["datafile-1-MB"]
12042 reader := getDataReader("datafile-1-MB")
12043 defer reader.Close()
12044 // Save the data
12045 objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
12046 args["objectName"] = objectName
12047
12048 _, err = c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"})
12049 if err != nil {
12050 logError(testName, function, args, startTime, "", "PutObject failed", err)
12051 return
12052 }
12053
12054 ctx, cancel := context.WithTimeout(context.Background(), 1*time.Nanosecond)
12055 args["ctx"] = ctx
12056 defer cancel()
12057
12058 fileName := "tempfile-context"
12059 args["fileName"] = fileName
12060 // Read the data back
12061 err = c.FGetObject(ctx, bucketName, objectName, fileName+"-f", minio.GetObjectOptions{})
12062 if err == nil {
12063 logError(testName, function, args, startTime, "", "FGetObject should fail on short timeout", err)
12064 return
12065 }
12066 ctx, cancel = context.WithTimeout(context.Background(), 1*time.Hour)
12067 defer cancel()
12068
12069 // Read the data back
12070 err = c.FGetObject(ctx, bucketName, objectName, fileName+"-fcontext", minio.GetObjectOptions{})
12071 if err != nil {
12072 logError(testName, function, args, startTime, "", "FGetObject with long timeout failed", err)
12073 return
12074 }
12075 if err = os.Remove(fileName + "-fcontext"); err != nil {
12076 logError(testName, function, args, startTime, "", "Remove file failed", err)
12077 return
12078 }
12079
12080 successLogger(testName, function, args, startTime).Info()
12081}
12082
12083// Test get object with GetObject with a user provided context
12084func testGetObjectRanges() {
12085 // initialize logging params
12086 startTime := time.Now()
12087 testName := getFuncName()
12088 function := "GetObject(ctx, bucketName, objectName, fileName)"
12089 args := map[string]interface{}{
12090 "ctx": "",
12091 "bucketName": "",
12092 "objectName": "",
12093 "fileName": "",
12094 }
12095 ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute)
12096 defer cancel()
12097
12098 rng := rand.NewSource(time.Now().UnixNano())
12099 // Instantiate new minio client object.
12100 c, err := minio.New(os.Getenv(serverEndpoint),
12101 &minio.Options{
12102 Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
12103 Secure: mustParseBool(os.Getenv(enableHTTPS)),
12104 })
12105 if err != nil {
12106 logError(testName, function, args, startTime, "", "MinIO client v4 object creation failed", err)
12107 return
12108 }
12109
12110 // Enable tracing, write to stderr.
12111 // c.TraceOn(os.Stderr)
12112
12113 // Set user agent.
12114 c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
12115
12116 // Generate a new random bucket name.
12117 bucketName := randString(60, rng, "minio-go-test-")
12118 args["bucketName"] = bucketName
12119
12120 // Make a new bucket.
12121 err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"})
12122 if err != nil {
12123 logError(testName, function, args, startTime, "", "MakeBucket failed", err)
12124 return
12125 }
12126
12127 defer cleanupBucket(bucketName, c)
12128
12129 bufSize := dataFileMap["datafile-129-MB"]
12130 reader := getDataReader("datafile-129-MB")
12131 defer reader.Close()
12132 // Save the data
12133 objectName := randString(60, rng, "")
12134 args["objectName"] = objectName
12135
12136 _, err = c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"})
12137 if err != nil {
12138 logError(testName, function, args, startTime, "", "PutObject failed", err)
12139 return
12140 }
12141
12142 // Read the data back
12143 tests := []struct {
12144 start int64
12145 end int64
12146 }{
12147 {
12148 start: 1024,
12149 end: 1024 + 1<<20,
12150 },
12151 {
12152 start: 20e6,
12153 end: 20e6 + 10000,
12154 },
12155 {
12156 start: 40e6,
12157 end: 40e6 + 10000,
12158 },
12159 {
12160 start: 60e6,
12161 end: 60e6 + 10000,
12162 },
12163 {
12164 start: 80e6,
12165 end: 80e6 + 10000,
12166 },
12167 {
12168 start: 120e6,
12169 end: int64(bufSize),
12170 },
12171 }
12172 for _, test := range tests {
12173 wantRC := getDataReader("datafile-129-MB")
12174 io.CopyN(io.Discard, wantRC, test.start)
12175 want := mustCrcReader(io.LimitReader(wantRC, test.end-test.start+1))
12176 opts := minio.GetObjectOptions{}
12177 opts.SetRange(test.start, test.end)
12178 args["opts"] = fmt.Sprintf("%+v", test)
12179 obj, err := c.GetObject(ctx, bucketName, objectName, opts)
12180 if err != nil {
12181 logError(testName, function, args, startTime, "", "FGetObject with long timeout failed", err)
12182 return
12183 }
12184 err = crcMatches(obj, want)
12185 if err != nil {
12186 logError(testName, function, args, startTime, "", fmt.Sprintf("GetObject offset %d -> %d", test.start, test.end), err)
12187 return
12188 }
12189 }
12190
12191 successLogger(testName, function, args, startTime).Info()
12192}
12193
12194// Test get object ACLs with GetObjectACL with custom provided context
12195func testGetObjectACLContext() {
12196 // initialize logging params
12197 startTime := time.Now()
12198 testName := getFuncName()
12199 function := "GetObjectACL(ctx, bucketName, objectName)"
12200 args := map[string]interface{}{
12201 "ctx": "",
12202 "bucketName": "",
12203 "objectName": "",
12204 }
12205 // Seed random based on current time.
12206 rand.Seed(time.Now().Unix())
12207
12208 // Instantiate new minio client object.
12209 c, err := minio.New(os.Getenv(serverEndpoint),
12210 &minio.Options{
12211 Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
12212 Secure: mustParseBool(os.Getenv(enableHTTPS)),
12213 })
12214 if err != nil {
12215 logError(testName, function, args, startTime, "", "MinIO client v4 object creation failed", err)
12216 return
12217 }
12218
12219 // Enable tracing, write to stderr.
12220 // c.TraceOn(os.Stderr)
12221
12222 // Set user agent.
12223 c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
12224
12225 // Generate a new random bucket name.
12226 bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
12227 args["bucketName"] = bucketName
12228
12229 // Make a new bucket.
12230 err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"})
12231 if err != nil {
12232 logError(testName, function, args, startTime, "", "MakeBucket failed", err)
12233 return
12234 }
12235
12236 defer cleanupBucket(bucketName, c)
12237
12238 bufSize := dataFileMap["datafile-1-MB"]
12239 reader := getDataReader("datafile-1-MB")
12240 defer reader.Close()
12241 // Save the data
12242 objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
12243 args["objectName"] = objectName
12244
12245 // Add meta data to add a canned acl
12246 metaData := map[string]string{
12247 "X-Amz-Acl": "public-read-write",
12248 }
12249
12250 _, err = c.PutObject(context.Background(), bucketName,
12251 objectName, reader, int64(bufSize),
12252 minio.PutObjectOptions{
12253 ContentType: "binary/octet-stream",
12254 UserMetadata: metaData,
12255 })
12256
12257 if err != nil {
12258 logError(testName, function, args, startTime, "", "PutObject failed", err)
12259 return
12260 }
12261
12262 ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
12263 args["ctx"] = ctx
12264 defer cancel()
12265
12266 // Read the data back
12267 objectInfo, getObjectACLErr := c.GetObjectACL(ctx, bucketName, objectName)
12268 if getObjectACLErr != nil {
12269 logError(testName, function, args, startTime, "", "GetObjectACL failed. ", getObjectACLErr)
12270 return
12271 }
12272
12273 s, ok := objectInfo.Metadata["X-Amz-Acl"]
12274 if !ok {
12275 logError(testName, function, args, startTime, "", "GetObjectACL fail unable to find \"X-Amz-Acl\"", nil)
12276 return
12277 }
12278
12279 if len(s) != 1 {
12280 logError(testName, function, args, startTime, "", "GetObjectACL fail \"X-Amz-Acl\" canned acl expected \"1\" got "+fmt.Sprintf(`"%d"`, len(s)), nil)
12281 return
12282 }
12283
12284 // Do a very limited testing if this is not AWS S3
12285 if os.Getenv(serverEndpoint) != "s3.amazonaws.com" {
12286 if s[0] != "private" {
12287 logError(testName, function, args, startTime, "", "GetObjectACL fail \"X-Amz-Acl\" expected \"private\" but got"+fmt.Sprintf("%q", s[0]), nil)
12288 return
12289 }
12290
12291 successLogger(testName, function, args, startTime).Info()
12292 return
12293 }
12294
12295 if s[0] != "public-read-write" {
12296 logError(testName, function, args, startTime, "", "GetObjectACL fail \"X-Amz-Acl\" expected \"public-read-write\" but got"+fmt.Sprintf("%q", s[0]), nil)
12297 return
12298 }
12299
12300 bufSize = dataFileMap["datafile-1-MB"]
12301 reader2 := getDataReader("datafile-1-MB")
12302 defer reader2.Close()
12303 // Save the data
12304 objectName = randString(60, rand.NewSource(time.Now().UnixNano()), "")
12305 args["objectName"] = objectName
12306
12307 // Add meta data to add a canned acl
12308 metaData = map[string]string{
12309 "X-Amz-Grant-Read": "[email protected]",
12310 "X-Amz-Grant-Write": "[email protected]",
12311 }
12312
12313 _, err = c.PutObject(context.Background(), bucketName, objectName, reader2, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream", UserMetadata: metaData})
12314 if err != nil {
12315 logError(testName, function, args, startTime, "", "PutObject failed", err)
12316 return
12317 }
12318
12319 ctx, cancel = context.WithTimeout(context.Background(), 10*time.Second)
12320 args["ctx"] = ctx
12321 defer cancel()
12322
12323 // Read the data back
12324 objectInfo, getObjectACLErr = c.GetObjectACL(ctx, bucketName, objectName)
12325 if getObjectACLErr == nil {
12326 logError(testName, function, args, startTime, "", "GetObjectACL fail", getObjectACLErr)
12327 return
12328 }
12329
12330 if len(objectInfo.Metadata) != 3 {
12331 logError(testName, function, args, startTime, "", "GetObjectACL fail expected \"3\" ACLs but got "+fmt.Sprintf(`"%d"`, len(objectInfo.Metadata)), nil)
12332 return
12333 }
12334
12335 s, ok = objectInfo.Metadata["X-Amz-Grant-Read"]
12336 if !ok {
12337 logError(testName, function, args, startTime, "", "GetObjectACL fail unable to find \"X-Amz-Grant-Read\"", nil)
12338 return
12339 }
12340
12341 if len(s) != 1 {
12342 logError(testName, function, args, startTime, "", "GetObjectACL fail \"X-Amz-Grant-Read\" acl expected \"1\" got "+fmt.Sprintf(`"%d"`, len(s)), nil)
12343 return
12344 }
12345
12346 if s[0] != "[email protected]" {
12347 logError(testName, function, args, startTime, "", "GetObjectACL fail \"X-Amz-Grant-Read\" acl expected \"[email protected]\" got "+fmt.Sprintf("%q", s), nil)
12348 return
12349 }
12350
12351 s, ok = objectInfo.Metadata["X-Amz-Grant-Write"]
12352 if !ok {
12353 logError(testName, function, args, startTime, "", "GetObjectACL fail unable to find \"X-Amz-Grant-Write\"", nil)
12354 return
12355 }
12356
12357 if len(s) != 1 {
12358 logError(testName, function, args, startTime, "", "GetObjectACL fail \"X-Amz-Grant-Write\" acl expected \"1\" got "+fmt.Sprintf(`"%d"`, len(s)), nil)
12359 return
12360 }
12361
12362 if s[0] != "[email protected]" {
12363 logError(testName, function, args, startTime, "", "GetObjectACL fail \"X-Amz-Grant-Write\" acl expected \"[email protected]\" got "+fmt.Sprintf("%q", s), nil)
12364 return
12365 }
12366
12367 successLogger(testName, function, args, startTime).Info()
12368}
12369
12370// Test validates putObject with context to see if request cancellation is honored for V2.
12371func testPutObjectContextV2() {
12372 // initialize logging params
12373 startTime := time.Now()
12374 testName := getFuncName()
12375 function := "PutObject(ctx, bucketName, objectName, reader, size, opts)"
12376 args := map[string]interface{}{
12377 "ctx": "",
12378 "bucketName": "",
12379 "objectName": "",
12380 "size": "",
12381 "opts": "",
12382 }
12383 // Instantiate new minio client object.
12384 c, err := minio.New(os.Getenv(serverEndpoint),
12385 &minio.Options{
12386 Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""),
12387 Secure: mustParseBool(os.Getenv(enableHTTPS)),
12388 })
12389 if err != nil {
12390 logError(testName, function, args, startTime, "", "MinIO client v2 object creation failed", err)
12391 return
12392 }
12393
12394 // Enable tracing, write to stderr.
12395 // c.TraceOn(os.Stderr)
12396
12397 // Set user agent.
12398 c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
12399
12400 // Make a new bucket.
12401 bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
12402 args["bucketName"] = bucketName
12403
12404 err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"})
12405 if err != nil {
12406 logError(testName, function, args, startTime, "", "MakeBucket failed", err)
12407 return
12408 }
12409 defer cleanupBucket(bucketName, c)
12410 bufSize := dataFileMap["datatfile-33-kB"]
12411 reader := getDataReader("datafile-33-kB")
12412 defer reader.Close()
12413
12414 objectName := fmt.Sprintf("test-file-%v", rand.Uint32())
12415 args["objectName"] = objectName
12416
12417 ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
12418 args["ctx"] = ctx
12419 args["size"] = bufSize
12420 defer cancel()
12421
12422 _, err = c.PutObject(ctx, bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"})
12423 if err != nil {
12424 logError(testName, function, args, startTime, "", "PutObject with short timeout failed", err)
12425 return
12426 }
12427
12428 ctx, cancel = context.WithTimeout(context.Background(), 1*time.Hour)
12429 args["ctx"] = ctx
12430
12431 defer cancel()
12432 reader = getDataReader("datafile-33-kB")
12433 defer reader.Close()
12434 _, err = c.PutObject(ctx, bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"})
12435 if err != nil {
12436 logError(testName, function, args, startTime, "", "PutObject with long timeout failed", err)
12437 return
12438 }
12439
12440 successLogger(testName, function, args, startTime).Info()
12441}
12442
12443// Test get object with GetObject with custom context
12444func testGetObjectContextV2() {
12445 // initialize logging params
12446 startTime := time.Now()
12447 testName := getFuncName()
12448 function := "GetObject(ctx, bucketName, objectName)"
12449 args := map[string]interface{}{
12450 "ctx": "",
12451 "bucketName": "",
12452 "objectName": "",
12453 }
12454 // Seed random based on current time.
12455 rand.Seed(time.Now().Unix())
12456
12457 // Instantiate new minio client object.
12458 c, err := minio.New(os.Getenv(serverEndpoint),
12459 &minio.Options{
12460 Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""),
12461 Secure: mustParseBool(os.Getenv(enableHTTPS)),
12462 })
12463 if err != nil {
12464 logError(testName, function, args, startTime, "", "MinIO client v2 object creation failed", err)
12465 return
12466 }
12467
12468 // Enable tracing, write to stderr.
12469 // c.TraceOn(os.Stderr)
12470
12471 // Set user agent.
12472 c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
12473
12474 // Generate a new random bucket name.
12475 bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
12476 args["bucketName"] = bucketName
12477
12478 // Make a new bucket.
12479 err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"})
12480 if err != nil {
12481 logError(testName, function, args, startTime, "", "MakeBucket failed", err)
12482 return
12483 }
12484
12485 defer cleanupBucket(bucketName, c)
12486
12487 bufSize := dataFileMap["datafile-33-kB"]
12488 reader := getDataReader("datafile-33-kB")
12489 defer reader.Close()
12490 // Save the data
12491 objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
12492 args["objectName"] = objectName
12493
12494 _, err = c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"})
12495 if err != nil {
12496 logError(testName, function, args, startTime, "", "PutObject call failed", err)
12497 return
12498 }
12499
12500 ctx, cancel := context.WithTimeout(context.Background(), 1*time.Nanosecond)
12501 args["ctx"] = ctx
12502 cancel()
12503
12504 r, err := c.GetObject(ctx, bucketName, objectName, minio.GetObjectOptions{})
12505 if err != nil {
12506 logError(testName, function, args, startTime, "", "GetObject failed unexpectedly", err)
12507 return
12508 }
12509 if _, err = r.Stat(); err == nil {
12510 logError(testName, function, args, startTime, "", "GetObject should fail on short timeout", err)
12511 return
12512 }
12513 r.Close()
12514
12515 ctx, cancel = context.WithTimeout(context.Background(), 1*time.Hour)
12516 defer cancel()
12517
12518 // Read the data back
12519 r, err = c.GetObject(ctx, bucketName, objectName, minio.GetObjectOptions{})
12520 if err != nil {
12521 logError(testName, function, args, startTime, "", "GetObject shouldn't fail on longer timeout", err)
12522 return
12523 }
12524
12525 st, err := r.Stat()
12526 if err != nil {
12527 logError(testName, function, args, startTime, "", "object Stat call failed", err)
12528 return
12529 }
12530 if st.Size != int64(bufSize) {
12531 logError(testName, function, args, startTime, "", "Number of bytes in stat does not match, expected "+string(bufSize)+" got "+string(st.Size), err)
12532 return
12533 }
12534 if err := r.Close(); err != nil {
12535 logError(testName, function, args, startTime, "", " object Close() call failed", err)
12536 return
12537 }
12538
12539 successLogger(testName, function, args, startTime).Info()
12540}
12541
12542// Test get object with FGetObject with custom context
12543func testFGetObjectContextV2() {
12544 // initialize logging params
12545 startTime := time.Now()
12546 testName := getFuncName()
12547 function := "FGetObject(ctx, bucketName, objectName,fileName)"
12548 args := map[string]interface{}{
12549 "ctx": "",
12550 "bucketName": "",
12551 "objectName": "",
12552 "fileName": "",
12553 }
12554 // Seed random based on current time.
12555 rand.Seed(time.Now().Unix())
12556
12557 // Instantiate new minio client object.
12558 c, err := minio.New(os.Getenv(serverEndpoint),
12559 &minio.Options{
12560 Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""),
12561 Secure: mustParseBool(os.Getenv(enableHTTPS)),
12562 })
12563 if err != nil {
12564 logError(testName, function, args, startTime, "", "MinIO client v2 object creation failed", err)
12565 return
12566 }
12567
12568 // Enable tracing, write to stderr.
12569 // c.TraceOn(os.Stderr)
12570
12571 // Set user agent.
12572 c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
12573
12574 // Generate a new random bucket name.
12575 bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
12576 args["bucketName"] = bucketName
12577
12578 // Make a new bucket.
12579 err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"})
12580 if err != nil {
12581 logError(testName, function, args, startTime, "", "MakeBucket call failed", err)
12582 return
12583 }
12584
12585 defer cleanupBucket(bucketName, c)
12586
12587 bufSize := dataFileMap["datatfile-1-MB"]
12588 reader := getDataReader("datafile-1-MB")
12589 defer reader.Close()
12590 // Save the data
12591 objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
12592 args["objectName"] = objectName
12593
12594 _, err = c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"})
12595 if err != nil {
12596 logError(testName, function, args, startTime, "", "PutObject call failed", err)
12597 return
12598 }
12599
12600 ctx, cancel := context.WithTimeout(context.Background(), 1*time.Nanosecond)
12601 args["ctx"] = ctx
12602 defer cancel()
12603
12604 fileName := "tempfile-context"
12605 args["fileName"] = fileName
12606
12607 // Read the data back
12608 err = c.FGetObject(ctx, bucketName, objectName, fileName+"-f", minio.GetObjectOptions{})
12609 if err == nil {
12610 logError(testName, function, args, startTime, "", "FGetObject should fail on short timeout", err)
12611 return
12612 }
12613 ctx, cancel = context.WithTimeout(context.Background(), 1*time.Hour)
12614 defer cancel()
12615
12616 // Read the data back
12617 err = c.FGetObject(ctx, bucketName, objectName, fileName+"-fcontext", minio.GetObjectOptions{})
12618 if err != nil {
12619 logError(testName, function, args, startTime, "", "FGetObject call shouldn't fail on long timeout", err)
12620 return
12621 }
12622
12623 if err = os.Remove(fileName + "-fcontext"); err != nil {
12624 logError(testName, function, args, startTime, "", "Remove file failed", err)
12625 return
12626 }
12627
12628 successLogger(testName, function, args, startTime).Info()
12629}
12630
12631// Test list object v1 and V2
12632func testListObjects() {
12633 // initialize logging params
12634 startTime := time.Now()
12635 testName := getFuncName()
12636 function := "ListObjects(bucketName, objectPrefix, recursive, doneCh)"
12637 args := map[string]interface{}{
12638 "bucketName": "",
12639 "objectPrefix": "",
12640 "recursive": "true",
12641 }
12642 // Seed random based on current time.
12643 rand.Seed(time.Now().Unix())
12644
12645 // Instantiate new minio client object.
12646 c, err := minio.New(os.Getenv(serverEndpoint),
12647 &minio.Options{
12648 Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
12649 Secure: mustParseBool(os.Getenv(enableHTTPS)),
12650 })
12651 if err != nil {
12652 logError(testName, function, args, startTime, "", "MinIO client v4 object creation failed", err)
12653 return
12654 }
12655
12656 // Enable tracing, write to stderr.
12657 // c.TraceOn(os.Stderr)
12658
12659 // Set user agent.
12660 c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
12661
12662 // Generate a new random bucket name.
12663 bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
12664 args["bucketName"] = bucketName
12665
12666 // Make a new bucket.
12667 err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"})
12668 if err != nil {
12669 logError(testName, function, args, startTime, "", "MakeBucket failed", err)
12670 return
12671 }
12672
12673 defer cleanupBucket(bucketName, c)
12674
12675 testObjects := []struct {
12676 name string
12677 storageClass string
12678 }{
12679 // Special characters
12680 {"foo bar", "STANDARD"},
12681 {"foo-%", "STANDARD"},
12682 {"random-object-1", "STANDARD"},
12683 {"random-object-2", "REDUCED_REDUNDANCY"},
12684 }
12685
12686 for i, object := range testObjects {
12687 bufSize := dataFileMap["datafile-33-kB"]
12688 reader := getDataReader("datafile-33-kB")
12689 defer reader.Close()
12690 _, err = c.PutObject(context.Background(), bucketName, object.name, reader, int64(bufSize),
12691 minio.PutObjectOptions{ContentType: "binary/octet-stream", StorageClass: object.storageClass})
12692 if err != nil {
12693 logError(testName, function, args, startTime, "", fmt.Sprintf("PutObject %d call failed", i+1), err)
12694 return
12695 }
12696 }
12697
12698 testList := func(listFn func(context.Context, string, minio.ListObjectsOptions) <-chan minio.ObjectInfo, bucket string, opts minio.ListObjectsOptions) {
12699 var objCursor int
12700
12701 // check for object name and storage-class from listing object result
12702 for objInfo := range listFn(context.Background(), bucket, opts) {
12703 if objInfo.Err != nil {
12704 logError(testName, function, args, startTime, "", "ListObjects failed unexpectedly", err)
12705 return
12706 }
12707 if objInfo.Key != testObjects[objCursor].name {
12708 logError(testName, function, args, startTime, "", "ListObjects does not return expected object name", err)
12709 return
12710 }
12711 if objInfo.StorageClass != testObjects[objCursor].storageClass {
12712 // Ignored as Gateways (Azure/GCS etc) wont return storage class
12713 ignoredLog(testName, function, args, startTime, "ListObjects doesn't return expected storage class").Info()
12714 }
12715 objCursor++
12716 }
12717
12718 if objCursor != len(testObjects) {
12719 logError(testName, function, args, startTime, "", "ListObjects returned unexpected number of items", errors.New(""))
12720 return
12721 }
12722 }
12723
12724 testList(c.ListObjects, bucketName, minio.ListObjectsOptions{Recursive: true, UseV1: true})
12725 testList(c.ListObjects, bucketName, minio.ListObjectsOptions{Recursive: true})
12726 testList(c.ListObjects, bucketName, minio.ListObjectsOptions{Recursive: true, WithMetadata: true})
12727
12728 successLogger(testName, function, args, startTime).Info()
12729}
12730
12731// Test deleting multiple objects with object retention set in Governance mode
12732func testRemoveObjects() {
12733 // initialize logging params
12734 startTime := time.Now()
12735 testName := getFuncName()
12736 function := "RemoveObjects(bucketName, objectsCh, opts)"
12737 args := map[string]interface{}{
12738 "bucketName": "",
12739 "objectPrefix": "",
12740 "recursive": "true",
12741 }
12742 // Seed random based on current time.
12743 rand.Seed(time.Now().Unix())
12744
12745 // Instantiate new minio client object.
12746 c, err := minio.New(os.Getenv(serverEndpoint),
12747 &minio.Options{
12748 Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
12749 Secure: mustParseBool(os.Getenv(enableHTTPS)),
12750 })
12751 if err != nil {
12752 logError(testName, function, args, startTime, "", "MinIO client v4 object creation failed", err)
12753 return
12754 }
12755
12756 // Enable tracing, write to stderr.
12757 // c.TraceOn(os.Stderr)
12758
12759 // Set user agent.
12760 c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
12761
12762 // Generate a new random bucket name.
12763 bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
12764 args["bucketName"] = bucketName
12765 objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
12766 args["objectName"] = objectName
12767
12768 // Make a new bucket.
12769 err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1", ObjectLocking: true})
12770 if err != nil {
12771 logError(testName, function, args, startTime, "", "MakeBucket failed", err)
12772 return
12773 }
12774
12775 bufSize := dataFileMap["datafile-129-MB"]
12776 reader := getDataReader("datafile-129-MB")
12777 defer reader.Close()
12778
12779 _, err = c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{})
12780 if err != nil {
12781 logError(testName, function, args, startTime, "", "Error uploading object", err)
12782 return
12783 }
12784
12785 // Replace with smaller...
12786 bufSize = dataFileMap["datafile-10-kB"]
12787 reader = getDataReader("datafile-10-kB")
12788 defer reader.Close()
12789
12790 _, err = c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{})
12791 if err != nil {
12792 logError(testName, function, args, startTime, "", "Error uploading object", err)
12793 }
12794
12795 t := time.Date(2030, time.April, 25, 14, 0, 0, 0, time.UTC)
12796 m := minio.RetentionMode(minio.Governance)
12797 opts := minio.PutObjectRetentionOptions{
12798 GovernanceBypass: false,
12799 RetainUntilDate: &t,
12800 Mode: &m,
12801 }
12802 err = c.PutObjectRetention(context.Background(), bucketName, objectName, opts)
12803 if err != nil {
12804 logError(testName, function, args, startTime, "", "Error setting retention", err)
12805 return
12806 }
12807
12808 objectsCh := make(chan minio.ObjectInfo)
12809 // Send object names that are needed to be removed to objectsCh
12810 go func() {
12811 defer close(objectsCh)
12812 // List all objects from a bucket-name with a matching prefix.
12813 for object := range c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{UseV1: true, Recursive: true}) {
12814 if object.Err != nil {
12815 logError(testName, function, args, startTime, "", "Error listing objects", object.Err)
12816 return
12817 }
12818 objectsCh <- object
12819 }
12820 }()
12821
12822 for rErr := range c.RemoveObjects(context.Background(), bucketName, objectsCh, minio.RemoveObjectsOptions{}) {
12823 // Error is expected here because Retention is set on the object
12824 // and RemoveObjects is called without Bypass Governance
12825 if rErr.Err == nil {
12826 logError(testName, function, args, startTime, "", "Expected error during deletion", nil)
12827 return
12828 }
12829 }
12830
12831 objectsCh1 := make(chan minio.ObjectInfo)
12832
12833 // Send object names that are needed to be removed to objectsCh
12834 go func() {
12835 defer close(objectsCh1)
12836 // List all objects from a bucket-name with a matching prefix.
12837 for object := range c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{UseV1: true, Recursive: true}) {
12838 if object.Err != nil {
12839 logError(testName, function, args, startTime, "", "Error listing objects", object.Err)
12840 return
12841 }
12842 objectsCh1 <- object
12843 }
12844 }()
12845
12846 opts1 := minio.RemoveObjectsOptions{
12847 GovernanceBypass: true,
12848 }
12849
12850 for rErr := range c.RemoveObjects(context.Background(), bucketName, objectsCh1, opts1) {
12851 // Error is not expected here because Retention is set on the object
12852 // and RemoveObjects is called with Bypass Governance
12853 logError(testName, function, args, startTime, "", "Error detected during deletion", rErr.Err)
12854 return
12855 }
12856
12857 // Delete all objects and buckets
12858 if err = cleanupVersionedBucket(bucketName, c); err != nil {
12859 logError(testName, function, args, startTime, "", "CleanupBucket failed", err)
12860 return
12861 }
12862
12863 successLogger(testName, function, args, startTime).Info()
12864}
12865
12866// Convert string to bool and always return false if any error
12867func mustParseBool(str string) bool {
12868 b, err := strconv.ParseBool(str)
12869 if err != nil {
12870 return false
12871 }
12872 return b
12873}
12874
12875func main() {
12876 // Output to stdout instead of the default stderr
12877 log.SetOutput(os.Stdout)
12878 // create custom formatter
12879 mintFormatter := mintJSONFormatter{}
12880 // set custom formatter
12881 log.SetFormatter(&mintFormatter)
12882 // log Info or above -- success cases are Info level, failures are Fatal level
12883 log.SetLevel(log.InfoLevel)
12884
12885 tls := mustParseBool(os.Getenv(enableHTTPS))
12886 kms := mustParseBool(os.Getenv(enableKMS))
12887 if os.Getenv(enableKMS) == "" {
12888 // Default to KMS tests.
12889 kms = true
12890 }
12891
12892 // execute tests
12893 if isFullMode() {
12894 testMakeBucketErrorV2()
12895 testGetObjectClosedTwiceV2()
12896 testFPutObjectV2()
12897 testMakeBucketRegionsV2()
12898 testGetObjectReadSeekFunctionalV2()
12899 testGetObjectReadAtFunctionalV2()
12900 testGetObjectRanges()
12901 testCopyObjectV2()
12902 testFunctionalV2()
12903 testComposeObjectErrorCasesV2()
12904 testCompose10KSourcesV2()
12905 testUserMetadataCopyingV2()
12906 testPutObjectWithChecksums()
12907 testPutMultipartObjectWithChecksums()
12908 testPutObject0ByteV2()
12909 testPutObjectNoLengthV2()
12910 testPutObjectsUnknownV2()
12911 testGetObjectContextV2()
12912 testFPutObjectContextV2()
12913 testFGetObjectContextV2()
12914 testPutObjectContextV2()
12915 testPutObjectWithVersioning()
12916 testMakeBucketError()
12917 testMakeBucketRegions()
12918 testPutObjectWithMetadata()
12919 testPutObjectReadAt()
12920 testPutObjectStreaming()
12921 testGetObjectSeekEnd()
12922 testGetObjectClosedTwice()
12923 testGetObjectS3Zip()
12924 testRemoveMultipleObjects()
12925 testRemoveMultipleObjectsWithResult()
12926 testFPutObjectMultipart()
12927 testFPutObject()
12928 testGetObjectReadSeekFunctional()
12929 testGetObjectReadAtFunctional()
12930 testGetObjectReadAtWhenEOFWasReached()
12931 testPresignedPostPolicy()
12932 testCopyObject()
12933 testComposeObjectErrorCases()
12934 testCompose10KSources()
12935 testUserMetadataCopying()
12936 testBucketNotification()
12937 testFunctional()
12938 testGetObjectModified()
12939 testPutObjectUploadSeekedObject()
12940 testGetObjectContext()
12941 testFPutObjectContext()
12942 testFGetObjectContext()
12943 testGetObjectACLContext()
12944 testPutObjectContext()
12945 testStorageClassMetadataPutObject()
12946 testStorageClassInvalidMetadataPutObject()
12947 testStorageClassMetadataCopyObject()
12948 testPutObjectWithContentLanguage()
12949 testListObjects()
12950 testRemoveObjects()
12951 testListObjectVersions()
12952 testStatObjectWithVersioning()
12953 testGetObjectWithVersioning()
12954 testCopyObjectWithVersioning()
12955 testConcurrentCopyObjectWithVersioning()
12956 testComposeObjectWithVersioning()
12957 testRemoveObjectWithVersioning()
12958 testRemoveObjectsWithVersioning()
12959 testObjectTaggingWithVersioning()
12960 testTrailingChecksums()
12961 testPutObjectWithAutomaticChecksums()
12962
12963 // SSE-C tests will only work over TLS connection.
12964 if tls {
12965 testSSECEncryptionPutGet()
12966 testSSECEncryptionFPut()
12967 testSSECEncryptedGetObjectReadAtFunctional()
12968 testSSECEncryptedGetObjectReadSeekFunctional()
12969 testEncryptedCopyObjectV2()
12970 testEncryptedSSECToSSECCopyObject()
12971 testEncryptedSSECToUnencryptedCopyObject()
12972 testUnencryptedToSSECCopyObject()
12973 testUnencryptedToUnencryptedCopyObject()
12974 testEncryptedEmptyObject()
12975 testDecryptedCopyObject()
12976 testSSECEncryptedToSSECCopyObjectPart()
12977 testSSECMultipartEncryptedToSSECCopyObjectPart()
12978 testSSECEncryptedToUnencryptedCopyPart()
12979 testUnencryptedToSSECCopyObjectPart()
12980 testUnencryptedToUnencryptedCopyPart()
12981 testEncryptedSSECToSSES3CopyObject()
12982 testEncryptedSSES3ToSSECCopyObject()
12983 testSSECEncryptedToSSES3CopyObjectPart()
12984 testSSES3EncryptedToSSECCopyObjectPart()
12985 }
12986
12987 // KMS tests
12988 if kms {
12989 testSSES3EncryptionPutGet()
12990 testSSES3EncryptionFPut()
12991 testSSES3EncryptedGetObjectReadAtFunctional()
12992 testSSES3EncryptedGetObjectReadSeekFunctional()
12993 testEncryptedSSES3ToSSES3CopyObject()
12994 testEncryptedSSES3ToUnencryptedCopyObject()
12995 testUnencryptedToSSES3CopyObject()
12996 testUnencryptedToSSES3CopyObjectPart()
12997 testSSES3EncryptedToUnencryptedCopyPart()
12998 testSSES3EncryptedToSSES3CopyObjectPart()
12999 }
13000 } else {
13001 testFunctional()
13002 testFunctionalV2()
13003 }
13004}
diff --git a/vendor/github.com/minio/minio-go/v7/hook-reader.go b/vendor/github.com/minio/minio-go/v7/hook-reader.go
new file mode 100644
index 0000000..07bc7db
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/hook-reader.go
@@ -0,0 +1,101 @@
1/*
2 * MinIO Go Library for Amazon S3 Compatible Cloud Storage
3 * Copyright 2015-2017 MinIO, Inc.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18package minio
19
20import (
21 "fmt"
22 "io"
23 "sync"
24)
25
26// hookReader hooks additional reader in the source stream. It is
27// useful for making progress bars. Second reader is appropriately
28// notified about the exact number of bytes read from the primary
29// source on each Read operation.
30type hookReader struct {
31 mu sync.RWMutex
32 source io.Reader
33 hook io.Reader
34}
35
36// Seek implements io.Seeker. Seeks source first, and if necessary
37// seeks hook if Seek method is appropriately found.
38func (hr *hookReader) Seek(offset int64, whence int) (n int64, err error) {
39 hr.mu.Lock()
40 defer hr.mu.Unlock()
41
42 // Verify for source has embedded Seeker, use it.
43 sourceSeeker, ok := hr.source.(io.Seeker)
44 if ok {
45 n, err = sourceSeeker.Seek(offset, whence)
46 if err != nil {
47 return 0, err
48 }
49 }
50
51 if hr.hook != nil {
52 // Verify if hook has embedded Seeker, use it.
53 hookSeeker, ok := hr.hook.(io.Seeker)
54 if ok {
55 var m int64
56 m, err = hookSeeker.Seek(offset, whence)
57 if err != nil {
58 return 0, err
59 }
60 if n != m {
61 return 0, fmt.Errorf("hook seeker seeked %d bytes, expected source %d bytes", m, n)
62 }
63 }
64 }
65
66 return n, nil
67}
68
69// Read implements io.Reader. Always reads from the source, the return
70// value 'n' number of bytes are reported through the hook. Returns
71// error for all non io.EOF conditions.
72func (hr *hookReader) Read(b []byte) (n int, err error) {
73 hr.mu.RLock()
74 defer hr.mu.RUnlock()
75
76 n, err = hr.source.Read(b)
77 if err != nil && err != io.EOF {
78 return n, err
79 }
80 if hr.hook != nil {
81 // Progress the hook with the total read bytes from the source.
82 if _, herr := hr.hook.Read(b[:n]); herr != nil {
83 if herr != io.EOF {
84 return n, herr
85 }
86 }
87 }
88 return n, err
89}
90
91// newHook returns a io.ReadSeeker which implements hookReader that
92// reports the data read from the source to the hook.
93func newHook(source, hook io.Reader) io.Reader {
94 if hook == nil {
95 return &hookReader{source: source}
96 }
97 return &hookReader{
98 source: source,
99 hook: hook,
100 }
101}
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/assume_role.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/assume_role.go
new file mode 100644
index 0000000..800c4a2
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/assume_role.go
@@ -0,0 +1,242 @@
1/*
2 * MinIO Go Library for Amazon S3 Compatible Cloud Storage
3 * Copyright 2020 MinIO, Inc.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18package credentials
19
20import (
21 "bytes"
22 "crypto/sha256"
23 "encoding/hex"
24 "encoding/xml"
25 "errors"
26 "io"
27 "net/http"
28 "net/url"
29 "strconv"
30 "strings"
31 "time"
32
33 "github.com/minio/minio-go/v7/pkg/signer"
34)
35
36// AssumeRoleResponse contains the result of successful AssumeRole request.
37type AssumeRoleResponse struct {
38 XMLName xml.Name `xml:"https://sts.amazonaws.com/doc/2011-06-15/ AssumeRoleResponse" json:"-"`
39
40 Result AssumeRoleResult `xml:"AssumeRoleResult"`
41 ResponseMetadata struct {
42 RequestID string `xml:"RequestId,omitempty"`
43 } `xml:"ResponseMetadata,omitempty"`
44}
45
46// AssumeRoleResult - Contains the response to a successful AssumeRole
47// request, including temporary credentials that can be used to make
48// MinIO API requests.
49type AssumeRoleResult struct {
50 // The identifiers for the temporary security credentials that the operation
51 // returns.
52 AssumedRoleUser AssumedRoleUser `xml:",omitempty"`
53
54 // The temporary security credentials, which include an access key ID, a secret
55 // access key, and a security (or session) token.
56 //
57 // Note: The size of the security token that STS APIs return is not fixed. We
58 // strongly recommend that you make no assumptions about the maximum size. As
59 // of this writing, the typical size is less than 4096 bytes, but that can vary.
60 // Also, future updates to AWS might require larger sizes.
61 Credentials struct {
62 AccessKey string `xml:"AccessKeyId" json:"accessKey,omitempty"`
63 SecretKey string `xml:"SecretAccessKey" json:"secretKey,omitempty"`
64 Expiration time.Time `xml:"Expiration" json:"expiration,omitempty"`
65 SessionToken string `xml:"SessionToken" json:"sessionToken,omitempty"`
66 } `xml:",omitempty"`
67
68 // A percentage value that indicates the size of the policy in packed form.
69 // The service rejects any policy with a packed size greater than 100 percent,
70 // which means the policy exceeded the allowed space.
71 PackedPolicySize int `xml:",omitempty"`
72}
73
74// A STSAssumeRole retrieves credentials from MinIO service, and keeps track if
75// those credentials are expired.
76type STSAssumeRole struct {
77 Expiry
78
79 // Required http Client to use when connecting to MinIO STS service.
80 Client *http.Client
81
82 // STS endpoint to fetch STS credentials.
83 STSEndpoint string
84
85 // various options for this request.
86 Options STSAssumeRoleOptions
87}
88
89// STSAssumeRoleOptions collection of various input options
90// to obtain AssumeRole credentials.
91type STSAssumeRoleOptions struct {
92 // Mandatory inputs.
93 AccessKey string
94 SecretKey string
95
96 SessionToken string // Optional if the first request is made with temporary credentials.
97 Policy string // Optional to assign a policy to the assumed role
98
99 Location string // Optional commonly needed with AWS STS.
100 DurationSeconds int // Optional defaults to 1 hour.
101
102 // Optional only valid if using with AWS STS
103 RoleARN string
104 RoleSessionName string
105 ExternalID string
106}
107
108// NewSTSAssumeRole returns a pointer to a new
109// Credentials object wrapping the STSAssumeRole.
110func NewSTSAssumeRole(stsEndpoint string, opts STSAssumeRoleOptions) (*Credentials, error) {
111 if stsEndpoint == "" {
112 return nil, errors.New("STS endpoint cannot be empty")
113 }
114 if opts.AccessKey == "" || opts.SecretKey == "" {
115 return nil, errors.New("AssumeRole credentials access/secretkey is mandatory")
116 }
117 return New(&STSAssumeRole{
118 Client: &http.Client{
119 Transport: http.DefaultTransport,
120 },
121 STSEndpoint: stsEndpoint,
122 Options: opts,
123 }), nil
124}
125
126const defaultDurationSeconds = 3600
127
128// closeResponse close non nil response with any response Body.
129// convenient wrapper to drain any remaining data on response body.
130//
131// Subsequently this allows golang http RoundTripper
132// to re-use the same connection for future requests.
133func closeResponse(resp *http.Response) {
134 // Callers should close resp.Body when done reading from it.
135 // If resp.Body is not closed, the Client's underlying RoundTripper
136 // (typically Transport) may not be able to re-use a persistent TCP
137 // connection to the server for a subsequent "keep-alive" request.
138 if resp != nil && resp.Body != nil {
139 // Drain any remaining Body and then close the connection.
140 // Without this closing connection would disallow re-using
141 // the same connection for future uses.
142 // - http://stackoverflow.com/a/17961593/4465767
143 io.Copy(io.Discard, resp.Body)
144 resp.Body.Close()
145 }
146}
147
148func getAssumeRoleCredentials(clnt *http.Client, endpoint string, opts STSAssumeRoleOptions) (AssumeRoleResponse, error) {
149 v := url.Values{}
150 v.Set("Action", "AssumeRole")
151 v.Set("Version", STSVersion)
152 if opts.RoleARN != "" {
153 v.Set("RoleArn", opts.RoleARN)
154 }
155 if opts.RoleSessionName != "" {
156 v.Set("RoleSessionName", opts.RoleSessionName)
157 }
158 if opts.DurationSeconds > defaultDurationSeconds {
159 v.Set("DurationSeconds", strconv.Itoa(opts.DurationSeconds))
160 } else {
161 v.Set("DurationSeconds", strconv.Itoa(defaultDurationSeconds))
162 }
163 if opts.Policy != "" {
164 v.Set("Policy", opts.Policy)
165 }
166 if opts.ExternalID != "" {
167 v.Set("ExternalId", opts.ExternalID)
168 }
169
170 u, err := url.Parse(endpoint)
171 if err != nil {
172 return AssumeRoleResponse{}, err
173 }
174 u.Path = "/"
175
176 postBody := strings.NewReader(v.Encode())
177 hash := sha256.New()
178 if _, err = io.Copy(hash, postBody); err != nil {
179 return AssumeRoleResponse{}, err
180 }
181 postBody.Seek(0, 0)
182
183 req, err := http.NewRequest(http.MethodPost, u.String(), postBody)
184 if err != nil {
185 return AssumeRoleResponse{}, err
186 }
187 req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
188 req.Header.Set("X-Amz-Content-Sha256", hex.EncodeToString(hash.Sum(nil)))
189 if opts.SessionToken != "" {
190 req.Header.Set("X-Amz-Security-Token", opts.SessionToken)
191 }
192 req = signer.SignV4STS(*req, opts.AccessKey, opts.SecretKey, opts.Location)
193
194 resp, err := clnt.Do(req)
195 if err != nil {
196 return AssumeRoleResponse{}, err
197 }
198 defer closeResponse(resp)
199 if resp.StatusCode != http.StatusOK {
200 var errResp ErrorResponse
201 buf, err := io.ReadAll(resp.Body)
202 if err != nil {
203 return AssumeRoleResponse{}, err
204 }
205 _, err = xmlDecodeAndBody(bytes.NewReader(buf), &errResp)
206 if err != nil {
207 var s3Err Error
208 if _, err = xmlDecodeAndBody(bytes.NewReader(buf), &s3Err); err != nil {
209 return AssumeRoleResponse{}, err
210 }
211 errResp.RequestID = s3Err.RequestID
212 errResp.STSError.Code = s3Err.Code
213 errResp.STSError.Message = s3Err.Message
214 }
215 return AssumeRoleResponse{}, errResp
216 }
217
218 a := AssumeRoleResponse{}
219 if _, err = xmlDecodeAndBody(resp.Body, &a); err != nil {
220 return AssumeRoleResponse{}, err
221 }
222 return a, nil
223}
224
225// Retrieve retrieves credentials from the MinIO service.
226// Error will be returned if the request fails.
227func (m *STSAssumeRole) Retrieve() (Value, error) {
228 a, err := getAssumeRoleCredentials(m.Client, m.STSEndpoint, m.Options)
229 if err != nil {
230 return Value{}, err
231 }
232
233 // Expiry window is set to 10secs.
234 m.SetExpiration(a.Result.Credentials.Expiration, DefaultExpiryWindow)
235
236 return Value{
237 AccessKeyID: a.Result.Credentials.AccessKey,
238 SecretAccessKey: a.Result.Credentials.SecretKey,
239 SessionToken: a.Result.Credentials.SessionToken,
240 SignerType: SignatureV4,
241 }, nil
242}
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/chain.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/chain.go
new file mode 100644
index 0000000..ddccfb1
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/chain.go
@@ -0,0 +1,88 @@
1/*
2 * MinIO Go Library for Amazon S3 Compatible Cloud Storage
3 * Copyright 2017 MinIO, Inc.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18package credentials
19
20// A Chain will search for a provider which returns credentials
21// and cache that provider until Retrieve is called again.
22//
23// The Chain provides a way of chaining multiple providers together
24// which will pick the first available using priority order of the
25// Providers in the list.
26//
27// If none of the Providers retrieve valid credentials Value, ChainProvider's
28// Retrieve() will return the no credentials value.
29//
30// If a Provider is found which returns valid credentials Value ChainProvider
31// will cache that Provider for all calls to IsExpired(), until Retrieve is
32// called again after IsExpired() is true.
33//
34// creds := credentials.NewChainCredentials(
35// []credentials.Provider{
36// &credentials.EnvAWSS3{},
37// &credentials.EnvMinio{},
38// })
39//
40// // Usage of ChainCredentials.
41// mc, err := minio.NewWithCredentials(endpoint, creds, secure, "us-east-1")
42// if err != nil {
43// log.Fatalln(err)
44// }
45type Chain struct {
46 Providers []Provider
47 curr Provider
48}
49
50// NewChainCredentials returns a pointer to a new Credentials object
51// wrapping a chain of providers.
52func NewChainCredentials(providers []Provider) *Credentials {
53 return New(&Chain{
54 Providers: append([]Provider{}, providers...),
55 })
56}
57
58// Retrieve returns the credentials value, returns no credentials(anonymous)
59// if no credentials provider returned any value.
60//
61// If a provider is found with credentials, it will be cached and any calls
62// to IsExpired() will return the expired state of the cached provider.
63func (c *Chain) Retrieve() (Value, error) {
64 for _, p := range c.Providers {
65 creds, _ := p.Retrieve()
66 // Always prioritize non-anonymous providers, if any.
67 if creds.AccessKeyID == "" && creds.SecretAccessKey == "" {
68 continue
69 }
70 c.curr = p
71 return creds, nil
72 }
73 // At this point we have exhausted all the providers and
74 // are left without any credentials return anonymous.
75 return Value{
76 SignerType: SignatureAnonymous,
77 }, nil
78}
79
80// IsExpired will returned the expired state of the currently cached provider
81// if there is one. If there is no current provider, true will be returned.
82func (c *Chain) IsExpired() bool {
83 if c.curr != nil {
84 return c.curr.IsExpired()
85 }
86
87 return true
88}
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/config.json.sample b/vendor/github.com/minio/minio-go/v7/pkg/credentials/config.json.sample
new file mode 100644
index 0000000..d793c9e
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/config.json.sample
@@ -0,0 +1,17 @@
1{
2 "version": "8",
3 "hosts": {
4 "play": {
5 "url": "https://play.min.io",
6 "accessKey": "Q3AM3UQ867SPQQA43P2F",
7 "secretKey": "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG",
8 "api": "S3v2"
9 },
10 "s3": {
11 "url": "https://s3.amazonaws.com",
12 "accessKey": "accessKey",
13 "secretKey": "secret",
14 "api": "S3v4"
15 }
16 }
17} \ No newline at end of file
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/credentials.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/credentials.go
new file mode 100644
index 0000000..af61049
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/credentials.go
@@ -0,0 +1,193 @@
1/*
2 * MinIO Go Library for Amazon S3 Compatible Cloud Storage
3 * Copyright 2017 MinIO, Inc.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18package credentials
19
20import (
21 "sync"
22 "time"
23)
24
25const (
26 // STSVersion sts version string
27 STSVersion = "2011-06-15"
28
29 // How much duration to slash from the given expiration duration
30 defaultExpiryWindow = 0.8
31)
32
33// A Value is the AWS credentials value for individual credential fields.
34type Value struct {
35 // AWS Access key ID
36 AccessKeyID string
37
38 // AWS Secret Access Key
39 SecretAccessKey string
40
41 // AWS Session Token
42 SessionToken string
43
44 // Signature Type.
45 SignerType SignatureType
46}
47
48// A Provider is the interface for any component which will provide credentials
49// Value. A provider is required to manage its own Expired state, and what to
50// be expired means.
51type Provider interface {
52 // Retrieve returns nil if it successfully retrieved the value.
53 // Error is returned if the value were not obtainable, or empty.
54 Retrieve() (Value, error)
55
56 // IsExpired returns if the credentials are no longer valid, and need
57 // to be retrieved.
58 IsExpired() bool
59}
60
61// A Expiry provides shared expiration logic to be used by credentials
62// providers to implement expiry functionality.
63//
64// The best method to use this struct is as an anonymous field within the
65// provider's struct.
66//
67// Example:
68//
69// type IAMCredentialProvider struct {
70// Expiry
71// ...
72// }
73type Expiry struct {
74 // The date/time when to expire on
75 expiration time.Time
76
77 // If set will be used by IsExpired to determine the current time.
78 // Defaults to time.Now if CurrentTime is not set.
79 CurrentTime func() time.Time
80}
81
82// SetExpiration sets the expiration IsExpired will check when called.
83//
84// If window is greater than 0 the expiration time will be reduced by the
85// window value.
86//
87// Using a window is helpful to trigger credentials to expire sooner than
88// the expiration time given to ensure no requests are made with expired
89// tokens.
90func (e *Expiry) SetExpiration(expiration time.Time, window time.Duration) {
91 if e.CurrentTime == nil {
92 e.CurrentTime = time.Now
93 }
94 cut := window
95 if cut < 0 {
96 expireIn := expiration.Sub(e.CurrentTime())
97 cut = time.Duration(float64(expireIn) * (1 - defaultExpiryWindow))
98 }
99 e.expiration = expiration.Add(-cut)
100}
101
102// IsExpired returns if the credentials are expired.
103func (e *Expiry) IsExpired() bool {
104 if e.CurrentTime == nil {
105 e.CurrentTime = time.Now
106 }
107 return e.expiration.Before(e.CurrentTime())
108}
109
110// Credentials - A container for synchronous safe retrieval of credentials Value.
111// Credentials will cache the credentials value until they expire. Once the value
112// expires the next Get will attempt to retrieve valid credentials.
113//
114// Credentials is safe to use across multiple goroutines and will manage the
115// synchronous state so the Providers do not need to implement their own
116// synchronization.
117//
118// The first Credentials.Get() will always call Provider.Retrieve() to get the
119// first instance of the credentials Value. All calls to Get() after that
120// will return the cached credentials Value until IsExpired() returns true.
121type Credentials struct {
122 sync.Mutex
123
124 creds Value
125 forceRefresh bool
126 provider Provider
127}
128
129// New returns a pointer to a new Credentials with the provider set.
130func New(provider Provider) *Credentials {
131 return &Credentials{
132 provider: provider,
133 forceRefresh: true,
134 }
135}
136
137// Get returns the credentials value, or error if the credentials Value failed
138// to be retrieved.
139//
140// Will return the cached credentials Value if it has not expired. If the
141// credentials Value has expired the Provider's Retrieve() will be called
142// to refresh the credentials.
143//
144// If Credentials.Expire() was called the credentials Value will be force
145// expired, and the next call to Get() will cause them to be refreshed.
146func (c *Credentials) Get() (Value, error) {
147 if c == nil {
148 return Value{}, nil
149 }
150
151 c.Lock()
152 defer c.Unlock()
153
154 if c.isExpired() {
155 creds, err := c.provider.Retrieve()
156 if err != nil {
157 return Value{}, err
158 }
159 c.creds = creds
160 c.forceRefresh = false
161 }
162
163 return c.creds, nil
164}
165
166// Expire expires the credentials and forces them to be retrieved on the
167// next call to Get().
168//
169// This will override the Provider's expired state, and force Credentials
170// to call the Provider's Retrieve().
171func (c *Credentials) Expire() {
172 c.Lock()
173 defer c.Unlock()
174
175 c.forceRefresh = true
176}
177
178// IsExpired returns if the credentials are no longer valid, and need
179// to be refreshed.
180//
181// If the Credentials were forced to be expired with Expire() this will
182// reflect that override.
183func (c *Credentials) IsExpired() bool {
184 c.Lock()
185 defer c.Unlock()
186
187 return c.isExpired()
188}
189
190// isExpired helper method wrapping the definition of expired credentials.
191func (c *Credentials) isExpired() bool {
192 return c.forceRefresh || c.provider.IsExpired()
193}
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/credentials.json b/vendor/github.com/minio/minio-go/v7/pkg/credentials/credentials.json
new file mode 100644
index 0000000..afbfad5
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/credentials.json
@@ -0,0 +1,7 @@
1{
2 "Version": 1,
3 "SessionToken": "token",
4 "AccessKeyId": "accessKey",
5 "SecretAccessKey": "secret",
6 "Expiration": "9999-04-27T16:02:25.000Z"
7}
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/credentials.sample b/vendor/github.com/minio/minio-go/v7/pkg/credentials/credentials.sample
new file mode 100644
index 0000000..e2dc1bf
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/credentials.sample
@@ -0,0 +1,15 @@
1[default]
2aws_access_key_id = accessKey
3aws_secret_access_key = secret
4aws_session_token = token
5
6[no_token]
7aws_access_key_id = accessKey
8aws_secret_access_key = secret
9
10[with_colon]
11aws_access_key_id: accessKey
12aws_secret_access_key: secret
13
14[with_process]
15credential_process = /bin/cat credentials.json
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/doc.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/doc.go
new file mode 100644
index 0000000..fbfb105
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/doc.go
@@ -0,0 +1,60 @@
1/*
2 * MinIO Go Library for Amazon S3 Compatible Cloud Storage
3 * Copyright 2017 MinIO, Inc.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18// Package credentials provides credential retrieval and management
19// for S3 compatible object storage.
20//
21// By default the Credentials.Get() will cache the successful result of a
22// Provider's Retrieve() until Provider.IsExpired() returns true. At which
23// point Credentials will call Provider's Retrieve() to get new credential Value.
24//
25// The Provider is responsible for determining when credentials have expired.
26// It is also important to note that Credentials will always call Retrieve the
27// first time Credentials.Get() is called.
28//
29// Example of using the environment variable credentials.
30//
31// creds := NewFromEnv()
32// // Retrieve the credentials value
33// credValue, err := creds.Get()
34// if err != nil {
35// // handle error
36// }
37//
38// Example of forcing credentials to expire and be refreshed on the next Get().
39// This may be helpful to proactively expire credentials and refresh them sooner
40// than they would naturally expire on their own.
41//
42// creds := NewFromIAM("")
43// creds.Expire()
44// credsValue, err := creds.Get()
45// // New credentials will be retrieved instead of from cache.
46//
47// # Custom Provider
48//
49// Each Provider built into this package also provides a helper method to generate
50// a Credentials pointer setup with the provider. To use a custom Provider just
51// create a type which satisfies the Provider interface and pass it to the
52// NewCredentials method.
53//
54// type MyProvider struct{}
55// func (m *MyProvider) Retrieve() (Value, error) {...}
56// func (m *MyProvider) IsExpired() bool {...}
57//
58// creds := NewCredentials(&MyProvider{})
59// credValue, err := creds.Get()
60package credentials
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/env_aws.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/env_aws.go
new file mode 100644
index 0000000..b6e60d0
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/env_aws.go
@@ -0,0 +1,71 @@
1/*
2 * MinIO Go Library for Amazon S3 Compatible Cloud Storage
3 * Copyright 2017 MinIO, Inc.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18package credentials
19
20import "os"
21
22// A EnvAWS retrieves credentials from the environment variables of the
23// running process. EnvAWSironment credentials never expire.
24//
25// EnvAWSironment variables used:
26//
27// * Access Key ID: AWS_ACCESS_KEY_ID or AWS_ACCESS_KEY.
28// * Secret Access Key: AWS_SECRET_ACCESS_KEY or AWS_SECRET_KEY.
29// * Secret Token: AWS_SESSION_TOKEN.
30type EnvAWS struct {
31 retrieved bool
32}
33
34// NewEnvAWS returns a pointer to a new Credentials object
35// wrapping the environment variable provider.
36func NewEnvAWS() *Credentials {
37 return New(&EnvAWS{})
38}
39
40// Retrieve retrieves the keys from the environment.
41func (e *EnvAWS) Retrieve() (Value, error) {
42 e.retrieved = false
43
44 id := os.Getenv("AWS_ACCESS_KEY_ID")
45 if id == "" {
46 id = os.Getenv("AWS_ACCESS_KEY")
47 }
48
49 secret := os.Getenv("AWS_SECRET_ACCESS_KEY")
50 if secret == "" {
51 secret = os.Getenv("AWS_SECRET_KEY")
52 }
53
54 signerType := SignatureV4
55 if id == "" || secret == "" {
56 signerType = SignatureAnonymous
57 }
58
59 e.retrieved = true
60 return Value{
61 AccessKeyID: id,
62 SecretAccessKey: secret,
63 SessionToken: os.Getenv("AWS_SESSION_TOKEN"),
64 SignerType: signerType,
65 }, nil
66}
67
68// IsExpired returns if the credentials have been retrieved.
69func (e *EnvAWS) IsExpired() bool {
70 return !e.retrieved
71}
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/env_minio.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/env_minio.go
new file mode 100644
index 0000000..5bfeab1
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/env_minio.go
@@ -0,0 +1,68 @@
1/*
2 * MinIO Go Library for Amazon S3 Compatible Cloud Storage
3 * Copyright 2017 MinIO, Inc.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18package credentials
19
20import "os"
21
22// A EnvMinio retrieves credentials from the environment variables of the
23// running process. EnvMinioironment credentials never expire.
24//
25// Environment variables used:
26//
27// * Access Key ID: MINIO_ACCESS_KEY.
28// * Secret Access Key: MINIO_SECRET_KEY.
29// * Access Key ID: MINIO_ROOT_USER.
30// * Secret Access Key: MINIO_ROOT_PASSWORD.
31type EnvMinio struct {
32 retrieved bool
33}
34
35// NewEnvMinio returns a pointer to a new Credentials object
36// wrapping the environment variable provider.
37func NewEnvMinio() *Credentials {
38 return New(&EnvMinio{})
39}
40
41// Retrieve retrieves the keys from the environment.
42func (e *EnvMinio) Retrieve() (Value, error) {
43 e.retrieved = false
44
45 id := os.Getenv("MINIO_ROOT_USER")
46 secret := os.Getenv("MINIO_ROOT_PASSWORD")
47
48 signerType := SignatureV4
49 if id == "" || secret == "" {
50 id = os.Getenv("MINIO_ACCESS_KEY")
51 secret = os.Getenv("MINIO_SECRET_KEY")
52 if id == "" || secret == "" {
53 signerType = SignatureAnonymous
54 }
55 }
56
57 e.retrieved = true
58 return Value{
59 AccessKeyID: id,
60 SecretAccessKey: secret,
61 SignerType: signerType,
62 }, nil
63}
64
65// IsExpired returns if the credentials have been retrieved.
66func (e *EnvMinio) IsExpired() bool {
67 return !e.retrieved
68}
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/error_response.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/error_response.go
new file mode 100644
index 0000000..07a9c2f
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/error_response.go
@@ -0,0 +1,95 @@
1/*
2 * MinIO Go Library for Amazon S3 Compatible Cloud Storage
3 * Copyright 2021 MinIO, Inc.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18package credentials
19
20import (
21 "bytes"
22 "encoding/xml"
23 "fmt"
24 "io"
25)
26
27// ErrorResponse - Is the typed error returned.
28// ErrorResponse struct should be comparable since it is compared inside
29// golang http API (https://github.com/golang/go/issues/29768)
30type ErrorResponse struct {
31 XMLName xml.Name `xml:"https://sts.amazonaws.com/doc/2011-06-15/ ErrorResponse" json:"-"`
32 STSError struct {
33 Type string `xml:"Type"`
34 Code string `xml:"Code"`
35 Message string `xml:"Message"`
36 } `xml:"Error"`
37 RequestID string `xml:"RequestId"`
38}
39
40// Error - Is the typed error returned by all API operations.
41type Error struct {
42 XMLName xml.Name `xml:"Error" json:"-"`
43 Code string
44 Message string
45 BucketName string
46 Key string
47 Resource string
48 RequestID string `xml:"RequestId"`
49 HostID string `xml:"HostId"`
50
51 // Region where the bucket is located. This header is returned
52 // only in HEAD bucket and ListObjects response.
53 Region string
54
55 // Captures the server string returned in response header.
56 Server string
57
58 // Underlying HTTP status code for the returned error
59 StatusCode int `xml:"-" json:"-"`
60}
61
62// Error - Returns S3 error string.
63func (e Error) Error() string {
64 if e.Message == "" {
65 return fmt.Sprintf("Error response code %s.", e.Code)
66 }
67 return e.Message
68}
69
70// Error - Returns STS error string.
71func (e ErrorResponse) Error() string {
72 if e.STSError.Message == "" {
73 return fmt.Sprintf("Error response code %s.", e.STSError.Code)
74 }
75 return e.STSError.Message
76}
77
78// xmlDecoder provide decoded value in xml.
79func xmlDecoder(body io.Reader, v interface{}) error {
80 d := xml.NewDecoder(body)
81 return d.Decode(v)
82}
83
84// xmlDecodeAndBody reads the whole body up to 1MB and
85// tries to XML decode it into v.
86// The body that was read and any error from reading or decoding is returned.
87func xmlDecodeAndBody(bodyReader io.Reader, v interface{}) ([]byte, error) {
88 // read the whole body (up to 1MB)
89 const maxBodyLength = 1 << 20
90 body, err := io.ReadAll(io.LimitReader(bodyReader, maxBodyLength))
91 if err != nil {
92 return nil, err
93 }
94 return bytes.TrimSpace(body), xmlDecoder(bytes.NewReader(body), v)
95}
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/file_aws_credentials.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/file_aws_credentials.go
new file mode 100644
index 0000000..5b07376
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/file_aws_credentials.go
@@ -0,0 +1,157 @@
1/*
2 * MinIO Go Library for Amazon S3 Compatible Cloud Storage
3 * Copyright 2017 MinIO, Inc.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18package credentials
19
20import (
21 "encoding/json"
22 "errors"
23 "os"
24 "os/exec"
25 "path/filepath"
26 "strings"
27 "time"
28
29 ini "gopkg.in/ini.v1"
30)
31
32// A externalProcessCredentials stores the output of a credential_process
33type externalProcessCredentials struct {
34 Version int
35 SessionToken string
36 AccessKeyID string `json:"AccessKeyId"`
37 SecretAccessKey string
38 Expiration time.Time
39}
40
41// A FileAWSCredentials retrieves credentials from the current user's home
42// directory, and keeps track if those credentials are expired.
43//
44// Profile ini file example: $HOME/.aws/credentials
45type FileAWSCredentials struct {
46 Expiry
47
48 // Path to the shared credentials file.
49 //
50 // If empty will look for "AWS_SHARED_CREDENTIALS_FILE" env variable. If the
51 // env value is empty will default to current user's home directory.
52 // Linux/OSX: "$HOME/.aws/credentials"
53 // Windows: "%USERPROFILE%\.aws\credentials"
54 Filename string
55
56 // AWS Profile to extract credentials from the shared credentials file. If empty
57 // will default to environment variable "AWS_PROFILE" or "default" if
58 // environment variable is also not set.
59 Profile string
60
61 // retrieved states if the credentials have been successfully retrieved.
62 retrieved bool
63}
64
65// NewFileAWSCredentials returns a pointer to a new Credentials object
66// wrapping the Profile file provider.
67func NewFileAWSCredentials(filename, profile string) *Credentials {
68 return New(&FileAWSCredentials{
69 Filename: filename,
70 Profile: profile,
71 })
72}
73
74// Retrieve reads and extracts the shared credentials from the current
75// users home directory.
76func (p *FileAWSCredentials) Retrieve() (Value, error) {
77 if p.Filename == "" {
78 p.Filename = os.Getenv("AWS_SHARED_CREDENTIALS_FILE")
79 if p.Filename == "" {
80 homeDir, err := os.UserHomeDir()
81 if err != nil {
82 return Value{}, err
83 }
84 p.Filename = filepath.Join(homeDir, ".aws", "credentials")
85 }
86 }
87 if p.Profile == "" {
88 p.Profile = os.Getenv("AWS_PROFILE")
89 if p.Profile == "" {
90 p.Profile = "default"
91 }
92 }
93
94 p.retrieved = false
95
96 iniProfile, err := loadProfile(p.Filename, p.Profile)
97 if err != nil {
98 return Value{}, err
99 }
100
101 // Default to empty string if not found.
102 id := iniProfile.Key("aws_access_key_id")
103 // Default to empty string if not found.
104 secret := iniProfile.Key("aws_secret_access_key")
105 // Default to empty string if not found.
106 token := iniProfile.Key("aws_session_token")
107
108 // If credential_process is defined, obtain credentials by executing
109 // the external process
110 credentialProcess := strings.TrimSpace(iniProfile.Key("credential_process").String())
111 if credentialProcess != "" {
112 args := strings.Fields(credentialProcess)
113 if len(args) <= 1 {
114 return Value{}, errors.New("invalid credential process args")
115 }
116 cmd := exec.Command(args[0], args[1:]...)
117 out, err := cmd.Output()
118 if err != nil {
119 return Value{}, err
120 }
121 var externalProcessCredentials externalProcessCredentials
122 err = json.Unmarshal([]byte(out), &externalProcessCredentials)
123 if err != nil {
124 return Value{}, err
125 }
126 p.retrieved = true
127 p.SetExpiration(externalProcessCredentials.Expiration, DefaultExpiryWindow)
128 return Value{
129 AccessKeyID: externalProcessCredentials.AccessKeyID,
130 SecretAccessKey: externalProcessCredentials.SecretAccessKey,
131 SessionToken: externalProcessCredentials.SessionToken,
132 SignerType: SignatureV4,
133 }, nil
134 }
135 p.retrieved = true
136 return Value{
137 AccessKeyID: id.String(),
138 SecretAccessKey: secret.String(),
139 SessionToken: token.String(),
140 SignerType: SignatureV4,
141 }, nil
142}
143
144// loadProfiles loads from the file pointed to by shared credentials filename for profile.
145// The credentials retrieved from the profile will be returned or error. Error will be
146// returned if it fails to read from the file, or the data is invalid.
147func loadProfile(filename, profile string) (*ini.Section, error) {
148 config, err := ini.Load(filename)
149 if err != nil {
150 return nil, err
151 }
152 iniProfile, err := config.GetSection(profile)
153 if err != nil {
154 return nil, err
155 }
156 return iniProfile, nil
157}
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/file_minio_client.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/file_minio_client.go
new file mode 100644
index 0000000..eb77767
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/file_minio_client.go
@@ -0,0 +1,139 @@
1/*
2 * MinIO Go Library for Amazon S3 Compatible Cloud Storage
3 * Copyright 2017 MinIO, Inc.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18package credentials
19
20import (
21 "os"
22 "path/filepath"
23 "runtime"
24
25 jsoniter "github.com/json-iterator/go"
26)
27
28// A FileMinioClient retrieves credentials from the current user's home
29// directory, and keeps track if those credentials are expired.
30//
31// Configuration file example: $HOME/.mc/config.json
32type FileMinioClient struct {
33 // Path to the shared credentials file.
34 //
35 // If empty will look for "MINIO_SHARED_CREDENTIALS_FILE" env variable. If the
36 // env value is empty will default to current user's home directory.
37 // Linux/OSX: "$HOME/.mc/config.json"
38 // Windows: "%USERALIAS%\mc\config.json"
39 Filename string
40
41 // MinIO Alias to extract credentials from the shared credentials file. If empty
42 // will default to environment variable "MINIO_ALIAS" or "default" if
43 // environment variable is also not set.
44 Alias string
45
46 // retrieved states if the credentials have been successfully retrieved.
47 retrieved bool
48}
49
50// NewFileMinioClient returns a pointer to a new Credentials object
51// wrapping the Alias file provider.
52func NewFileMinioClient(filename, alias string) *Credentials {
53 return New(&FileMinioClient{
54 Filename: filename,
55 Alias: alias,
56 })
57}
58
59// Retrieve reads and extracts the shared credentials from the current
60// users home directory.
61func (p *FileMinioClient) Retrieve() (Value, error) {
62 if p.Filename == "" {
63 if value, ok := os.LookupEnv("MINIO_SHARED_CREDENTIALS_FILE"); ok {
64 p.Filename = value
65 } else {
66 homeDir, err := os.UserHomeDir()
67 if err != nil {
68 return Value{}, err
69 }
70 p.Filename = filepath.Join(homeDir, ".mc", "config.json")
71 if runtime.GOOS == "windows" {
72 p.Filename = filepath.Join(homeDir, "mc", "config.json")
73 }
74 }
75 }
76
77 if p.Alias == "" {
78 p.Alias = os.Getenv("MINIO_ALIAS")
79 if p.Alias == "" {
80 p.Alias = "s3"
81 }
82 }
83
84 p.retrieved = false
85
86 hostCfg, err := loadAlias(p.Filename, p.Alias)
87 if err != nil {
88 return Value{}, err
89 }
90
91 p.retrieved = true
92 return Value{
93 AccessKeyID: hostCfg.AccessKey,
94 SecretAccessKey: hostCfg.SecretKey,
95 SignerType: parseSignatureType(hostCfg.API),
96 }, nil
97}
98
99// IsExpired returns if the shared credentials have expired.
100func (p *FileMinioClient) IsExpired() bool {
101 return !p.retrieved
102}
103
104// hostConfig configuration of a host.
105type hostConfig struct {
106 URL string `json:"url"`
107 AccessKey string `json:"accessKey"`
108 SecretKey string `json:"secretKey"`
109 API string `json:"api"`
110}
111
112// config config version.
113type config struct {
114 Version string `json:"version"`
115 Hosts map[string]hostConfig `json:"hosts"`
116 Aliases map[string]hostConfig `json:"aliases"`
117}
118
119// loadAliass loads from the file pointed to by shared credentials filename for alias.
120// The credentials retrieved from the alias will be returned or error. Error will be
121// returned if it fails to read from the file.
122func loadAlias(filename, alias string) (hostConfig, error) {
123 cfg := &config{}
124 json := jsoniter.ConfigCompatibleWithStandardLibrary
125
126 configBytes, err := os.ReadFile(filename)
127 if err != nil {
128 return hostConfig{}, err
129 }
130 if err = json.Unmarshal(configBytes, cfg); err != nil {
131 return hostConfig{}, err
132 }
133
134 if cfg.Version == "10" {
135 return cfg.Aliases[alias], nil
136 }
137
138 return cfg.Hosts[alias], nil
139}
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/iam_aws.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/iam_aws.go
new file mode 100644
index 0000000..c5153c4
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/iam_aws.go
@@ -0,0 +1,433 @@
1/*
2 * MinIO Go Library for Amazon S3 Compatible Cloud Storage
3 * Copyright 2017 MinIO, Inc.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18package credentials
19
20import (
21 "bufio"
22 "context"
23 "errors"
24 "fmt"
25 "io"
26 "net"
27 "net/http"
28 "net/url"
29 "os"
30 "path"
31 "strings"
32 "time"
33
34 jsoniter "github.com/json-iterator/go"
35)
36
37// DefaultExpiryWindow - Default expiry window.
38// ExpiryWindow will allow the credentials to trigger refreshing
39// prior to the credentials actually expiring. This is beneficial
40// so race conditions with expiring credentials do not cause
41// request to fail unexpectedly due to ExpiredTokenException exceptions.
42// DefaultExpiryWindow can be used as parameter to (*Expiry).SetExpiration.
43// When used the tokens refresh will be triggered when 80% of the elapsed
44// time until the actual expiration time is passed.
45const DefaultExpiryWindow = -1
46
47// A IAM retrieves credentials from the EC2 service, and keeps track if
48// those credentials are expired.
49type IAM struct {
50 Expiry
51
52 // Required http Client to use when connecting to IAM metadata service.
53 Client *http.Client
54
55 // Custom endpoint to fetch IAM role credentials.
56 Endpoint string
57
58 // Region configurable custom region for STS
59 Region string
60
61 // Support for container authorization token https://docs.aws.amazon.com/sdkref/latest/guide/feature-container-credentials.html
62 Container struct {
63 AuthorizationToken string
64 CredentialsFullURI string
65 CredentialsRelativeURI string
66 }
67
68 // EKS based k8s RBAC authorization - https://docs.aws.amazon.com/eks/latest/userguide/pod-configuration.html
69 EKSIdentity struct {
70 TokenFile string
71 RoleARN string
72 RoleSessionName string
73 }
74}
75
76// IAM Roles for Amazon EC2
77// http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html
78const (
79 DefaultIAMRoleEndpoint = "http://169.254.169.254"
80 DefaultECSRoleEndpoint = "http://169.254.170.2"
81 DefaultSTSRoleEndpoint = "https://sts.amazonaws.com"
82 DefaultIAMSecurityCredsPath = "/latest/meta-data/iam/security-credentials/"
83 TokenRequestTTLHeader = "X-aws-ec2-metadata-token-ttl-seconds"
84 TokenPath = "/latest/api/token"
85 TokenTTL = "21600"
86 TokenRequestHeader = "X-aws-ec2-metadata-token"
87)
88
89// NewIAM returns a pointer to a new Credentials object wrapping the IAM.
90func NewIAM(endpoint string) *Credentials {
91 return New(&IAM{
92 Client: &http.Client{
93 Transport: http.DefaultTransport,
94 },
95 Endpoint: endpoint,
96 })
97}
98
99// Retrieve retrieves credentials from the EC2 service.
100// Error will be returned if the request fails, or unable to extract
101// the desired
102func (m *IAM) Retrieve() (Value, error) {
103 token := os.Getenv("AWS_CONTAINER_AUTHORIZATION_TOKEN")
104 if token == "" {
105 token = m.Container.AuthorizationToken
106 }
107
108 relativeURI := os.Getenv("AWS_CONTAINER_CREDENTIALS_RELATIVE_URI")
109 if relativeURI == "" {
110 relativeURI = m.Container.CredentialsRelativeURI
111 }
112
113 fullURI := os.Getenv("AWS_CONTAINER_CREDENTIALS_FULL_URI")
114 if fullURI == "" {
115 fullURI = m.Container.CredentialsFullURI
116 }
117
118 identityFile := os.Getenv("AWS_WEB_IDENTITY_TOKEN_FILE")
119 if identityFile == "" {
120 identityFile = m.EKSIdentity.TokenFile
121 }
122
123 roleArn := os.Getenv("AWS_ROLE_ARN")
124 if roleArn == "" {
125 roleArn = m.EKSIdentity.RoleARN
126 }
127
128 roleSessionName := os.Getenv("AWS_ROLE_SESSION_NAME")
129 if roleSessionName == "" {
130 roleSessionName = m.EKSIdentity.RoleSessionName
131 }
132
133 region := os.Getenv("AWS_REGION")
134 if region == "" {
135 region = m.Region
136 }
137
138 var roleCreds ec2RoleCredRespBody
139 var err error
140
141 endpoint := m.Endpoint
142 switch {
143 case identityFile != "":
144 if len(endpoint) == 0 {
145 if region != "" {
146 if strings.HasPrefix(region, "cn-") {
147 endpoint = "https://sts." + region + ".amazonaws.com.cn"
148 } else {
149 endpoint = "https://sts." + region + ".amazonaws.com"
150 }
151 } else {
152 endpoint = DefaultSTSRoleEndpoint
153 }
154 }
155
156 creds := &STSWebIdentity{
157 Client: m.Client,
158 STSEndpoint: endpoint,
159 GetWebIDTokenExpiry: func() (*WebIdentityToken, error) {
160 token, err := os.ReadFile(identityFile)
161 if err != nil {
162 return nil, err
163 }
164
165 return &WebIdentityToken{Token: string(token)}, nil
166 },
167 RoleARN: roleArn,
168 roleSessionName: roleSessionName,
169 }
170
171 stsWebIdentityCreds, err := creds.Retrieve()
172 if err == nil {
173 m.SetExpiration(creds.Expiration(), DefaultExpiryWindow)
174 }
175 return stsWebIdentityCreds, err
176
177 case relativeURI != "":
178 if len(endpoint) == 0 {
179 endpoint = fmt.Sprintf("%s%s", DefaultECSRoleEndpoint, relativeURI)
180 }
181
182 roleCreds, err = getEcsTaskCredentials(m.Client, endpoint, token)
183
184 case fullURI != "":
185 if len(endpoint) == 0 {
186 endpoint = fullURI
187 var ok bool
188 if ok, err = isLoopback(endpoint); !ok {
189 if err == nil {
190 err = fmt.Errorf("uri host is not a loopback address: %s", endpoint)
191 }
192 break
193 }
194 }
195
196 roleCreds, err = getEcsTaskCredentials(m.Client, endpoint, token)
197
198 default:
199 roleCreds, err = getCredentials(m.Client, endpoint)
200 }
201
202 if err != nil {
203 return Value{}, err
204 }
205 // Expiry window is set to 10secs.
206 m.SetExpiration(roleCreds.Expiration, DefaultExpiryWindow)
207
208 return Value{
209 AccessKeyID: roleCreds.AccessKeyID,
210 SecretAccessKey: roleCreds.SecretAccessKey,
211 SessionToken: roleCreds.Token,
212 SignerType: SignatureV4,
213 }, nil
214}
215
216// A ec2RoleCredRespBody provides the shape for unmarshaling credential
217// request responses.
218type ec2RoleCredRespBody struct {
219 // Success State
220 Expiration time.Time
221 AccessKeyID string
222 SecretAccessKey string
223 Token string
224
225 // Error state
226 Code string
227 Message string
228
229 // Unused params.
230 LastUpdated time.Time
231 Type string
232}
233
234// Get the final IAM role URL where the request will
235// be sent to fetch the rolling access credentials.
236// http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html
237func getIAMRoleURL(endpoint string) (*url.URL, error) {
238 u, err := url.Parse(endpoint)
239 if err != nil {
240 return nil, err
241 }
242 u.Path = DefaultIAMSecurityCredsPath
243 return u, nil
244}
245
246// listRoleNames lists of credential role names associated
247// with the current EC2 service. If there are no credentials,
248// or there is an error making or receiving the request.
249// http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html
250func listRoleNames(client *http.Client, u *url.URL, token string) ([]string, error) {
251 req, err := http.NewRequest(http.MethodGet, u.String(), nil)
252 if err != nil {
253 return nil, err
254 }
255 if token != "" {
256 req.Header.Add(TokenRequestHeader, token)
257 }
258 resp, err := client.Do(req)
259 if err != nil {
260 return nil, err
261 }
262 defer resp.Body.Close()
263 if resp.StatusCode != http.StatusOK {
264 return nil, errors.New(resp.Status)
265 }
266
267 credsList := []string{}
268 s := bufio.NewScanner(resp.Body)
269 for s.Scan() {
270 credsList = append(credsList, s.Text())
271 }
272
273 if err := s.Err(); err != nil {
274 return nil, err
275 }
276
277 return credsList, nil
278}
279
280func getEcsTaskCredentials(client *http.Client, endpoint, token string) (ec2RoleCredRespBody, error) {
281 req, err := http.NewRequest(http.MethodGet, endpoint, nil)
282 if err != nil {
283 return ec2RoleCredRespBody{}, err
284 }
285
286 if token != "" {
287 req.Header.Set("Authorization", token)
288 }
289
290 resp, err := client.Do(req)
291 if err != nil {
292 return ec2RoleCredRespBody{}, err
293 }
294 defer resp.Body.Close()
295 if resp.StatusCode != http.StatusOK {
296 return ec2RoleCredRespBody{}, errors.New(resp.Status)
297 }
298
299 respCreds := ec2RoleCredRespBody{}
300 if err := jsoniter.NewDecoder(resp.Body).Decode(&respCreds); err != nil {
301 return ec2RoleCredRespBody{}, err
302 }
303
304 return respCreds, nil
305}
306
307func fetchIMDSToken(client *http.Client, endpoint string) (string, error) {
308 ctx, cancel := context.WithTimeout(context.Background(), time.Second)
309 defer cancel()
310
311 req, err := http.NewRequestWithContext(ctx, http.MethodPut, endpoint+TokenPath, nil)
312 if err != nil {
313 return "", err
314 }
315 req.Header.Add(TokenRequestTTLHeader, TokenTTL)
316 resp, err := client.Do(req)
317 if err != nil {
318 return "", err
319 }
320 defer resp.Body.Close()
321 data, err := io.ReadAll(resp.Body)
322 if err != nil {
323 return "", err
324 }
325 if resp.StatusCode != http.StatusOK {
326 return "", errors.New(resp.Status)
327 }
328 return string(data), nil
329}
330
331// getCredentials - obtains the credentials from the IAM role name associated with
332// the current EC2 service.
333//
334// If the credentials cannot be found, or there is an error
335// reading the response an error will be returned.
336func getCredentials(client *http.Client, endpoint string) (ec2RoleCredRespBody, error) {
337 if endpoint == "" {
338 endpoint = DefaultIAMRoleEndpoint
339 }
340
341 // https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/configuring-instance-metadata-service.html
342 token, err := fetchIMDSToken(client, endpoint)
343 if err != nil {
344 // Return only errors for valid situations, if the IMDSv2 is not enabled
345 // we will not be able to get the token, in such a situation we have
346 // to rely on IMDSv1 behavior as a fallback, this check ensures that.
347 // Refer https://github.com/minio/minio-go/issues/1866
348 if !errors.Is(err, context.DeadlineExceeded) && !errors.Is(err, context.Canceled) {
349 return ec2RoleCredRespBody{}, err
350 }
351 }
352
353 // http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html
354 u, err := getIAMRoleURL(endpoint)
355 if err != nil {
356 return ec2RoleCredRespBody{}, err
357 }
358
359 // http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html
360 roleNames, err := listRoleNames(client, u, token)
361 if err != nil {
362 return ec2RoleCredRespBody{}, err
363 }
364
365 if len(roleNames) == 0 {
366 return ec2RoleCredRespBody{}, errors.New("No IAM roles attached to this EC2 service")
367 }
368
369 // http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html
370 // - An instance profile can contain only one IAM role. This limit cannot be increased.
371 roleName := roleNames[0]
372
373 // http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html
374 // The following command retrieves the security credentials for an
375 // IAM role named `s3access`.
376 //
377 // $ curl http://169.254.169.254/latest/meta-data/iam/security-credentials/s3access
378 //
379 u.Path = path.Join(u.Path, roleName)
380 req, err := http.NewRequest(http.MethodGet, u.String(), nil)
381 if err != nil {
382 return ec2RoleCredRespBody{}, err
383 }
384 if token != "" {
385 req.Header.Add(TokenRequestHeader, token)
386 }
387
388 resp, err := client.Do(req)
389 if err != nil {
390 return ec2RoleCredRespBody{}, err
391 }
392 defer resp.Body.Close()
393 if resp.StatusCode != http.StatusOK {
394 return ec2RoleCredRespBody{}, errors.New(resp.Status)
395 }
396
397 respCreds := ec2RoleCredRespBody{}
398 if err := jsoniter.NewDecoder(resp.Body).Decode(&respCreds); err != nil {
399 return ec2RoleCredRespBody{}, err
400 }
401
402 if respCreds.Code != "Success" {
403 // If an error code was returned something failed requesting the role.
404 return ec2RoleCredRespBody{}, errors.New(respCreds.Message)
405 }
406
407 return respCreds, nil
408}
409
410// isLoopback identifies if a uri's host is on a loopback address
411func isLoopback(uri string) (bool, error) {
412 u, err := url.Parse(uri)
413 if err != nil {
414 return false, err
415 }
416
417 host := u.Hostname()
418 if len(host) == 0 {
419 return false, fmt.Errorf("can't parse host from uri: %s", uri)
420 }
421
422 ips, err := net.LookupHost(host)
423 if err != nil {
424 return false, err
425 }
426 for _, ip := range ips {
427 if !net.ParseIP(ip).IsLoopback() {
428 return false, nil
429 }
430 }
431
432 return true, nil
433}
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/signature_type.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/signature_type.go
new file mode 100644
index 0000000..b794333
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/signature_type.go
@@ -0,0 +1,77 @@
1/*
2 * MinIO Go Library for Amazon S3 Compatible Cloud Storage
3 * Copyright 2017 MinIO, Inc.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18package credentials
19
20import "strings"
21
22// SignatureType is type of Authorization requested for a given HTTP request.
23type SignatureType int
24
25// Different types of supported signatures - default is SignatureV4 or SignatureDefault.
26const (
27 // SignatureDefault is always set to v4.
28 SignatureDefault SignatureType = iota
29 SignatureV4
30 SignatureV2
31 SignatureV4Streaming
32 SignatureAnonymous // Anonymous signature signifies, no signature.
33)
34
35// IsV2 - is signature SignatureV2?
36func (s SignatureType) IsV2() bool {
37 return s == SignatureV2
38}
39
40// IsV4 - is signature SignatureV4?
41func (s SignatureType) IsV4() bool {
42 return s == SignatureV4 || s == SignatureDefault
43}
44
45// IsStreamingV4 - is signature SignatureV4Streaming?
46func (s SignatureType) IsStreamingV4() bool {
47 return s == SignatureV4Streaming
48}
49
50// IsAnonymous - is signature empty?
51func (s SignatureType) IsAnonymous() bool {
52 return s == SignatureAnonymous
53}
54
55// Stringer humanized version of signature type,
56// strings returned here are case insensitive.
57func (s SignatureType) String() string {
58 if s.IsV2() {
59 return "S3v2"
60 } else if s.IsV4() {
61 return "S3v4"
62 } else if s.IsStreamingV4() {
63 return "S3v4Streaming"
64 }
65 return "Anonymous"
66}
67
68func parseSignatureType(str string) SignatureType {
69 if strings.EqualFold(str, "S3v4") {
70 return SignatureV4
71 } else if strings.EqualFold(str, "S3v2") {
72 return SignatureV2
73 } else if strings.EqualFold(str, "S3v4Streaming") {
74 return SignatureV4Streaming
75 }
76 return SignatureAnonymous
77}
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/static.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/static.go
new file mode 100644
index 0000000..7dde00b
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/static.go
@@ -0,0 +1,67 @@
1/*
2 * MinIO Go Library for Amazon S3 Compatible Cloud Storage
3 * Copyright 2017 MinIO, Inc.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18package credentials
19
20// A Static is a set of credentials which are set programmatically,
21// and will never expire.
22type Static struct {
23 Value
24}
25
26// NewStaticV2 returns a pointer to a new Credentials object
27// wrapping a static credentials value provider, signature is
28// set to v2. If access and secret are not specified then
29// regardless of signature type set it Value will return
30// as anonymous.
31func NewStaticV2(id, secret, token string) *Credentials {
32 return NewStatic(id, secret, token, SignatureV2)
33}
34
35// NewStaticV4 is similar to NewStaticV2 with similar considerations.
36func NewStaticV4(id, secret, token string) *Credentials {
37 return NewStatic(id, secret, token, SignatureV4)
38}
39
40// NewStatic returns a pointer to a new Credentials object
41// wrapping a static credentials value provider.
42func NewStatic(id, secret, token string, signerType SignatureType) *Credentials {
43 return New(&Static{
44 Value: Value{
45 AccessKeyID: id,
46 SecretAccessKey: secret,
47 SessionToken: token,
48 SignerType: signerType,
49 },
50 })
51}
52
53// Retrieve returns the static credentials.
54func (s *Static) Retrieve() (Value, error) {
55 if s.AccessKeyID == "" || s.SecretAccessKey == "" {
56 // Anonymous is not an error
57 return Value{SignerType: SignatureAnonymous}, nil
58 }
59 return s.Value, nil
60}
61
62// IsExpired returns if the credentials are expired.
63//
64// For Static, the credentials never expired.
65func (s *Static) IsExpired() bool {
66 return false
67}
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_client_grants.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_client_grants.go
new file mode 100644
index 0000000..9e92c1e
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_client_grants.go
@@ -0,0 +1,182 @@
1/*
2 * MinIO Go Library for Amazon S3 Compatible Cloud Storage
3 * Copyright 2019-2022 MinIO, Inc.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18package credentials
19
20import (
21 "bytes"
22 "encoding/xml"
23 "errors"
24 "fmt"
25 "io"
26 "net/http"
27 "net/url"
28 "strings"
29 "time"
30)
31
32// AssumedRoleUser - The identifiers for the temporary security credentials that
33// the operation returns. Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumedRoleUser
34type AssumedRoleUser struct {
35 Arn string
36 AssumedRoleID string `xml:"AssumeRoleId"`
37}
38
39// AssumeRoleWithClientGrantsResponse contains the result of successful AssumeRoleWithClientGrants request.
40type AssumeRoleWithClientGrantsResponse struct {
41 XMLName xml.Name `xml:"https://sts.amazonaws.com/doc/2011-06-15/ AssumeRoleWithClientGrantsResponse" json:"-"`
42 Result ClientGrantsResult `xml:"AssumeRoleWithClientGrantsResult"`
43 ResponseMetadata struct {
44 RequestID string `xml:"RequestId,omitempty"`
45 } `xml:"ResponseMetadata,omitempty"`
46}
47
48// ClientGrantsResult - Contains the response to a successful AssumeRoleWithClientGrants
49// request, including temporary credentials that can be used to make MinIO API requests.
50type ClientGrantsResult struct {
51 AssumedRoleUser AssumedRoleUser `xml:",omitempty"`
52 Audience string `xml:",omitempty"`
53 Credentials struct {
54 AccessKey string `xml:"AccessKeyId" json:"accessKey,omitempty"`
55 SecretKey string `xml:"SecretAccessKey" json:"secretKey,omitempty"`
56 Expiration time.Time `xml:"Expiration" json:"expiration,omitempty"`
57 SessionToken string `xml:"SessionToken" json:"sessionToken,omitempty"`
58 } `xml:",omitempty"`
59 PackedPolicySize int `xml:",omitempty"`
60 Provider string `xml:",omitempty"`
61 SubjectFromClientGrantsToken string `xml:",omitempty"`
62}
63
64// ClientGrantsToken - client grants token with expiry.
65type ClientGrantsToken struct {
66 Token string
67 Expiry int
68}
69
70// A STSClientGrants retrieves credentials from MinIO service, and keeps track if
71// those credentials are expired.
72type STSClientGrants struct {
73 Expiry
74
75 // Required http Client to use when connecting to MinIO STS service.
76 Client *http.Client
77
78 // MinIO endpoint to fetch STS credentials.
79 STSEndpoint string
80
81 // getClientGrantsTokenExpiry function to retrieve tokens
82 // from IDP This function should return two values one is
83 // accessToken which is a self contained access token (JWT)
84 // and second return value is the expiry associated with
85 // this token. This is a customer provided function and
86 // is mandatory.
87 GetClientGrantsTokenExpiry func() (*ClientGrantsToken, error)
88}
89
90// NewSTSClientGrants returns a pointer to a new
91// Credentials object wrapping the STSClientGrants.
92func NewSTSClientGrants(stsEndpoint string, getClientGrantsTokenExpiry func() (*ClientGrantsToken, error)) (*Credentials, error) {
93 if stsEndpoint == "" {
94 return nil, errors.New("STS endpoint cannot be empty")
95 }
96 if getClientGrantsTokenExpiry == nil {
97 return nil, errors.New("Client grants access token and expiry retrieval function should be defined")
98 }
99 return New(&STSClientGrants{
100 Client: &http.Client{
101 Transport: http.DefaultTransport,
102 },
103 STSEndpoint: stsEndpoint,
104 GetClientGrantsTokenExpiry: getClientGrantsTokenExpiry,
105 }), nil
106}
107
108func getClientGrantsCredentials(clnt *http.Client, endpoint string,
109 getClientGrantsTokenExpiry func() (*ClientGrantsToken, error),
110) (AssumeRoleWithClientGrantsResponse, error) {
111 accessToken, err := getClientGrantsTokenExpiry()
112 if err != nil {
113 return AssumeRoleWithClientGrantsResponse{}, err
114 }
115
116 v := url.Values{}
117 v.Set("Action", "AssumeRoleWithClientGrants")
118 v.Set("Token", accessToken.Token)
119 v.Set("DurationSeconds", fmt.Sprintf("%d", accessToken.Expiry))
120 v.Set("Version", STSVersion)
121
122 u, err := url.Parse(endpoint)
123 if err != nil {
124 return AssumeRoleWithClientGrantsResponse{}, err
125 }
126
127 req, err := http.NewRequest(http.MethodPost, u.String(), strings.NewReader(v.Encode()))
128 if err != nil {
129 return AssumeRoleWithClientGrantsResponse{}, err
130 }
131
132 req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
133
134 resp, err := clnt.Do(req)
135 if err != nil {
136 return AssumeRoleWithClientGrantsResponse{}, err
137 }
138 defer resp.Body.Close()
139 if resp.StatusCode != http.StatusOK {
140 var errResp ErrorResponse
141 buf, err := io.ReadAll(resp.Body)
142 if err != nil {
143 return AssumeRoleWithClientGrantsResponse{}, err
144 }
145 _, err = xmlDecodeAndBody(bytes.NewReader(buf), &errResp)
146 if err != nil {
147 var s3Err Error
148 if _, err = xmlDecodeAndBody(bytes.NewReader(buf), &s3Err); err != nil {
149 return AssumeRoleWithClientGrantsResponse{}, err
150 }
151 errResp.RequestID = s3Err.RequestID
152 errResp.STSError.Code = s3Err.Code
153 errResp.STSError.Message = s3Err.Message
154 }
155 return AssumeRoleWithClientGrantsResponse{}, errResp
156 }
157
158 a := AssumeRoleWithClientGrantsResponse{}
159 if err = xml.NewDecoder(resp.Body).Decode(&a); err != nil {
160 return AssumeRoleWithClientGrantsResponse{}, err
161 }
162 return a, nil
163}
164
165// Retrieve retrieves credentials from the MinIO service.
166// Error will be returned if the request fails.
167func (m *STSClientGrants) Retrieve() (Value, error) {
168 a, err := getClientGrantsCredentials(m.Client, m.STSEndpoint, m.GetClientGrantsTokenExpiry)
169 if err != nil {
170 return Value{}, err
171 }
172
173 // Expiry window is set to 10secs.
174 m.SetExpiration(a.Result.Credentials.Expiration, DefaultExpiryWindow)
175
176 return Value{
177 AccessKeyID: a.Result.Credentials.AccessKey,
178 SecretAccessKey: a.Result.Credentials.SecretKey,
179 SessionToken: a.Result.Credentials.SessionToken,
180 SignerType: SignatureV4,
181 }, nil
182}
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_custom_identity.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_custom_identity.go
new file mode 100644
index 0000000..e1f9ce4
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_custom_identity.go
@@ -0,0 +1,146 @@
1/*
2 * MinIO Go Library for Amazon S3 Compatible Cloud Storage
3 * Copyright 2015-2022 MinIO, Inc.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18package credentials
19
20import (
21 "encoding/xml"
22 "errors"
23 "fmt"
24 "net/http"
25 "net/url"
26 "time"
27)
28
29// CustomTokenResult - Contains temporary creds and user metadata.
30type CustomTokenResult struct {
31 Credentials struct {
32 AccessKey string `xml:"AccessKeyId"`
33 SecretKey string `xml:"SecretAccessKey"`
34 Expiration time.Time `xml:"Expiration"`
35 SessionToken string `xml:"SessionToken"`
36 } `xml:",omitempty"`
37
38 AssumedUser string `xml:",omitempty"`
39}
40
41// AssumeRoleWithCustomTokenResponse contains the result of a successful
42// AssumeRoleWithCustomToken request.
43type AssumeRoleWithCustomTokenResponse struct {
44 XMLName xml.Name `xml:"https://sts.amazonaws.com/doc/2011-06-15/ AssumeRoleWithCustomTokenResponse" json:"-"`
45 Result CustomTokenResult `xml:"AssumeRoleWithCustomTokenResult"`
46 Metadata struct {
47 RequestID string `xml:"RequestId,omitempty"`
48 } `xml:"ResponseMetadata,omitempty"`
49}
50
51// CustomTokenIdentity - satisfies the Provider interface, and retrieves
52// credentials from MinIO using the AssumeRoleWithCustomToken STS API.
53type CustomTokenIdentity struct {
54 Expiry
55
56 Client *http.Client
57
58 // MinIO server STS endpoint to fetch STS credentials.
59 STSEndpoint string
60
61 // The custom token to use with the request.
62 Token string
63
64 // RoleArn associated with the identity
65 RoleArn string
66
67 // RequestedExpiry is to set the validity of the generated credentials
68 // (this value bounded by server).
69 RequestedExpiry time.Duration
70}
71
72// Retrieve - to satisfy Provider interface; fetches credentials from MinIO.
73func (c *CustomTokenIdentity) Retrieve() (value Value, err error) {
74 u, err := url.Parse(c.STSEndpoint)
75 if err != nil {
76 return value, err
77 }
78
79 v := url.Values{}
80 v.Set("Action", "AssumeRoleWithCustomToken")
81 v.Set("Version", STSVersion)
82 v.Set("RoleArn", c.RoleArn)
83 v.Set("Token", c.Token)
84 if c.RequestedExpiry != 0 {
85 v.Set("DurationSeconds", fmt.Sprintf("%d", int(c.RequestedExpiry.Seconds())))
86 }
87
88 u.RawQuery = v.Encode()
89
90 req, err := http.NewRequest(http.MethodPost, u.String(), nil)
91 if err != nil {
92 return value, err
93 }
94
95 resp, err := c.Client.Do(req)
96 if err != nil {
97 return value, err
98 }
99
100 defer resp.Body.Close()
101 if resp.StatusCode != http.StatusOK {
102 return value, errors.New(resp.Status)
103 }
104
105 r := AssumeRoleWithCustomTokenResponse{}
106 if err = xml.NewDecoder(resp.Body).Decode(&r); err != nil {
107 return
108 }
109
110 cr := r.Result.Credentials
111 c.SetExpiration(cr.Expiration, DefaultExpiryWindow)
112 return Value{
113 AccessKeyID: cr.AccessKey,
114 SecretAccessKey: cr.SecretKey,
115 SessionToken: cr.SessionToken,
116 SignerType: SignatureV4,
117 }, nil
118}
119
120// NewCustomTokenCredentials - returns credentials using the
121// AssumeRoleWithCustomToken STS API.
122func NewCustomTokenCredentials(stsEndpoint, token, roleArn string, optFuncs ...CustomTokenOpt) (*Credentials, error) {
123 c := CustomTokenIdentity{
124 Client: &http.Client{Transport: http.DefaultTransport},
125 STSEndpoint: stsEndpoint,
126 Token: token,
127 RoleArn: roleArn,
128 }
129 for _, optFunc := range optFuncs {
130 optFunc(&c)
131 }
132 return New(&c), nil
133}
134
135// CustomTokenOpt is a function type to configure the custom-token based
136// credentials using NewCustomTokenCredentials.
137type CustomTokenOpt func(*CustomTokenIdentity)
138
139// CustomTokenValidityOpt sets the validity duration of the requested
140// credentials. This value is ignored if the server enforces a lower validity
141// period.
142func CustomTokenValidityOpt(d time.Duration) CustomTokenOpt {
143 return func(c *CustomTokenIdentity) {
144 c.RequestedExpiry = d
145 }
146}
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_ldap_identity.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_ldap_identity.go
new file mode 100644
index 0000000..ec5f3f0
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_ldap_identity.go
@@ -0,0 +1,189 @@
1/*
2 * MinIO Go Library for Amazon S3 Compatible Cloud Storage
3 * Copyright 2019-2022 MinIO, Inc.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18package credentials
19
20import (
21 "bytes"
22 "encoding/xml"
23 "fmt"
24 "io"
25 "net/http"
26 "net/url"
27 "strings"
28 "time"
29)
30
31// AssumeRoleWithLDAPResponse contains the result of successful
32// AssumeRoleWithLDAPIdentity request
33type AssumeRoleWithLDAPResponse struct {
34 XMLName xml.Name `xml:"https://sts.amazonaws.com/doc/2011-06-15/ AssumeRoleWithLDAPIdentityResponse" json:"-"`
35 Result LDAPIdentityResult `xml:"AssumeRoleWithLDAPIdentityResult"`
36 ResponseMetadata struct {
37 RequestID string `xml:"RequestId,omitempty"`
38 } `xml:"ResponseMetadata,omitempty"`
39}
40
41// LDAPIdentityResult - contains credentials for a successful
42// AssumeRoleWithLDAPIdentity request.
43type LDAPIdentityResult struct {
44 Credentials struct {
45 AccessKey string `xml:"AccessKeyId" json:"accessKey,omitempty"`
46 SecretKey string `xml:"SecretAccessKey" json:"secretKey,omitempty"`
47 Expiration time.Time `xml:"Expiration" json:"expiration,omitempty"`
48 SessionToken string `xml:"SessionToken" json:"sessionToken,omitempty"`
49 } `xml:",omitempty"`
50
51 SubjectFromToken string `xml:",omitempty"`
52}
53
54// LDAPIdentity retrieves credentials from MinIO
55type LDAPIdentity struct {
56 Expiry
57
58 // Required http Client to use when connecting to MinIO STS service.
59 Client *http.Client
60
61 // Exported STS endpoint to fetch STS credentials.
62 STSEndpoint string
63
64 // LDAP username/password used to fetch LDAP STS credentials.
65 LDAPUsername, LDAPPassword string
66
67 // Session policy to apply to the generated credentials. Leave empty to
68 // use the full access policy available to the user.
69 Policy string
70
71 // RequestedExpiry is the configured expiry duration for credentials
72 // requested from LDAP.
73 RequestedExpiry time.Duration
74}
75
76// NewLDAPIdentity returns new credentials object that uses LDAP
77// Identity.
78func NewLDAPIdentity(stsEndpoint, ldapUsername, ldapPassword string, optFuncs ...LDAPIdentityOpt) (*Credentials, error) {
79 l := LDAPIdentity{
80 Client: &http.Client{Transport: http.DefaultTransport},
81 STSEndpoint: stsEndpoint,
82 LDAPUsername: ldapUsername,
83 LDAPPassword: ldapPassword,
84 }
85 for _, optFunc := range optFuncs {
86 optFunc(&l)
87 }
88 return New(&l), nil
89}
90
91// LDAPIdentityOpt is a function type used to configured the LDAPIdentity
92// instance.
93type LDAPIdentityOpt func(*LDAPIdentity)
94
95// LDAPIdentityPolicyOpt sets the session policy for requested credentials.
96func LDAPIdentityPolicyOpt(policy string) LDAPIdentityOpt {
97 return func(k *LDAPIdentity) {
98 k.Policy = policy
99 }
100}
101
102// LDAPIdentityExpiryOpt sets the expiry duration for requested credentials.
103func LDAPIdentityExpiryOpt(d time.Duration) LDAPIdentityOpt {
104 return func(k *LDAPIdentity) {
105 k.RequestedExpiry = d
106 }
107}
108
109// NewLDAPIdentityWithSessionPolicy returns new credentials object that uses
110// LDAP Identity with a specified session policy. The `policy` parameter must be
111// a JSON string specifying the policy document.
112//
113// Deprecated: Use the `LDAPIdentityPolicyOpt` with `NewLDAPIdentity` instead.
114func NewLDAPIdentityWithSessionPolicy(stsEndpoint, ldapUsername, ldapPassword, policy string) (*Credentials, error) {
115 return New(&LDAPIdentity{
116 Client: &http.Client{Transport: http.DefaultTransport},
117 STSEndpoint: stsEndpoint,
118 LDAPUsername: ldapUsername,
119 LDAPPassword: ldapPassword,
120 Policy: policy,
121 }), nil
122}
123
124// Retrieve gets the credential by calling the MinIO STS API for
125// LDAP on the configured stsEndpoint.
126func (k *LDAPIdentity) Retrieve() (value Value, err error) {
127 u, err := url.Parse(k.STSEndpoint)
128 if err != nil {
129 return value, err
130 }
131
132 v := url.Values{}
133 v.Set("Action", "AssumeRoleWithLDAPIdentity")
134 v.Set("Version", STSVersion)
135 v.Set("LDAPUsername", k.LDAPUsername)
136 v.Set("LDAPPassword", k.LDAPPassword)
137 if k.Policy != "" {
138 v.Set("Policy", k.Policy)
139 }
140 if k.RequestedExpiry != 0 {
141 v.Set("DurationSeconds", fmt.Sprintf("%d", int(k.RequestedExpiry.Seconds())))
142 }
143
144 req, err := http.NewRequest(http.MethodPost, u.String(), strings.NewReader(v.Encode()))
145 if err != nil {
146 return value, err
147 }
148
149 req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
150
151 resp, err := k.Client.Do(req)
152 if err != nil {
153 return value, err
154 }
155
156 defer resp.Body.Close()
157 if resp.StatusCode != http.StatusOK {
158 var errResp ErrorResponse
159 buf, err := io.ReadAll(resp.Body)
160 if err != nil {
161 return value, err
162 }
163 _, err = xmlDecodeAndBody(bytes.NewReader(buf), &errResp)
164 if err != nil {
165 var s3Err Error
166 if _, err = xmlDecodeAndBody(bytes.NewReader(buf), &s3Err); err != nil {
167 return value, err
168 }
169 errResp.RequestID = s3Err.RequestID
170 errResp.STSError.Code = s3Err.Code
171 errResp.STSError.Message = s3Err.Message
172 }
173 return value, errResp
174 }
175
176 r := AssumeRoleWithLDAPResponse{}
177 if err = xml.NewDecoder(resp.Body).Decode(&r); err != nil {
178 return
179 }
180
181 cr := r.Result.Credentials
182 k.SetExpiration(cr.Expiration, DefaultExpiryWindow)
183 return Value{
184 AccessKeyID: cr.AccessKey,
185 SecretAccessKey: cr.SecretKey,
186 SessionToken: cr.SessionToken,
187 SignerType: SignatureV4,
188 }, nil
189}
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_tls_identity.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_tls_identity.go
new file mode 100644
index 0000000..dee0a8c
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_tls_identity.go
@@ -0,0 +1,211 @@
1// MinIO Go Library for Amazon S3 Compatible Cloud Storage
2// Copyright 2021 MinIO, Inc.
3//
4// Licensed under the Apache License, Version 2.0 (the "License");
5// you may not use this file except in compliance with the License.
6// You may obtain a copy of the License at
7//
8// http://www.apache.org/licenses/LICENSE-2.0
9//
10// Unless required by applicable law or agreed to in writing, software
11// distributed under the License is distributed on an "AS IS" BASIS,
12// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13// See the License for the specific language governing permissions and
14// limitations under the License.
15
16package credentials
17
18import (
19 "bytes"
20 "crypto/tls"
21 "encoding/xml"
22 "errors"
23 "io"
24 "net"
25 "net/http"
26 "net/url"
27 "strconv"
28 "time"
29)
30
31// CertificateIdentityOption is an optional AssumeRoleWithCertificate
32// parameter - e.g. a custom HTTP transport configuration or S3 credental
33// livetime.
34type CertificateIdentityOption func(*STSCertificateIdentity)
35
36// CertificateIdentityWithTransport returns a CertificateIdentityOption that
37// customizes the STSCertificateIdentity with the given http.RoundTripper.
38func CertificateIdentityWithTransport(t http.RoundTripper) CertificateIdentityOption {
39 return CertificateIdentityOption(func(i *STSCertificateIdentity) { i.Client.Transport = t })
40}
41
42// CertificateIdentityWithExpiry returns a CertificateIdentityOption that
43// customizes the STSCertificateIdentity with the given livetime.
44//
45// Fetched S3 credentials will have the given livetime if the STS server
46// allows such credentials.
47func CertificateIdentityWithExpiry(livetime time.Duration) CertificateIdentityOption {
48 return CertificateIdentityOption(func(i *STSCertificateIdentity) { i.S3CredentialLivetime = livetime })
49}
50
51// A STSCertificateIdentity retrieves S3 credentials from the MinIO STS API and
52// rotates those credentials once they expire.
53type STSCertificateIdentity struct {
54 Expiry
55
56 // STSEndpoint is the base URL endpoint of the STS API.
57 // For example, https://minio.local:9000
58 STSEndpoint string
59
60 // S3CredentialLivetime is the duration temp. S3 access
61 // credentials should be valid.
62 //
63 // It represents the access credential livetime requested
64 // by the client. The STS server may choose to issue
65 // temp. S3 credentials that have a different - usually
66 // shorter - livetime.
67 //
68 // The default livetime is one hour.
69 S3CredentialLivetime time.Duration
70
71 // Client is the HTTP client used to authenticate and fetch
72 // S3 credentials.
73 //
74 // A custom TLS client configuration can be specified by
75 // using a custom http.Transport:
76 // Client: http.Client {
77 // Transport: &http.Transport{
78 // TLSClientConfig: &tls.Config{},
79 // },
80 // }
81 Client http.Client
82}
83
84var _ Provider = (*STSWebIdentity)(nil) // compiler check
85
86// NewSTSCertificateIdentity returns a STSCertificateIdentity that authenticates
87// to the given STS endpoint with the given TLS certificate and retrieves and
88// rotates S3 credentials.
89func NewSTSCertificateIdentity(endpoint string, certificate tls.Certificate, options ...CertificateIdentityOption) (*Credentials, error) {
90 if endpoint == "" {
91 return nil, errors.New("STS endpoint cannot be empty")
92 }
93 if _, err := url.Parse(endpoint); err != nil {
94 return nil, err
95 }
96 identity := &STSCertificateIdentity{
97 STSEndpoint: endpoint,
98 Client: http.Client{
99 Transport: &http.Transport{
100 Proxy: http.ProxyFromEnvironment,
101 DialContext: (&net.Dialer{
102 Timeout: 30 * time.Second,
103 KeepAlive: 30 * time.Second,
104 }).DialContext,
105 ForceAttemptHTTP2: true,
106 MaxIdleConns: 100,
107 IdleConnTimeout: 90 * time.Second,
108 TLSHandshakeTimeout: 10 * time.Second,
109 ExpectContinueTimeout: 5 * time.Second,
110 TLSClientConfig: &tls.Config{
111 Certificates: []tls.Certificate{certificate},
112 },
113 },
114 },
115 }
116 for _, option := range options {
117 option(identity)
118 }
119 return New(identity), nil
120}
121
122// Retrieve fetches a new set of S3 credentials from the configured
123// STS API endpoint.
124func (i *STSCertificateIdentity) Retrieve() (Value, error) {
125 endpointURL, err := url.Parse(i.STSEndpoint)
126 if err != nil {
127 return Value{}, err
128 }
129 livetime := i.S3CredentialLivetime
130 if livetime == 0 {
131 livetime = 1 * time.Hour
132 }
133
134 queryValues := url.Values{}
135 queryValues.Set("Action", "AssumeRoleWithCertificate")
136 queryValues.Set("Version", STSVersion)
137 endpointURL.RawQuery = queryValues.Encode()
138
139 req, err := http.NewRequest(http.MethodPost, endpointURL.String(), nil)
140 if err != nil {
141 return Value{}, err
142 }
143 if req.Form == nil {
144 req.Form = url.Values{}
145 }
146 req.Form.Add("DurationSeconds", strconv.FormatUint(uint64(livetime.Seconds()), 10))
147
148 resp, err := i.Client.Do(req)
149 if err != nil {
150 return Value{}, err
151 }
152 if resp.Body != nil {
153 defer resp.Body.Close()
154 }
155 if resp.StatusCode != http.StatusOK {
156 var errResp ErrorResponse
157 buf, err := io.ReadAll(resp.Body)
158 if err != nil {
159 return Value{}, err
160 }
161 _, err = xmlDecodeAndBody(bytes.NewReader(buf), &errResp)
162 if err != nil {
163 var s3Err Error
164 if _, err = xmlDecodeAndBody(bytes.NewReader(buf), &s3Err); err != nil {
165 return Value{}, err
166 }
167 errResp.RequestID = s3Err.RequestID
168 errResp.STSError.Code = s3Err.Code
169 errResp.STSError.Message = s3Err.Message
170 }
171 return Value{}, errResp
172 }
173
174 const MaxSize = 10 * 1 << 20
175 var body io.Reader = resp.Body
176 if resp.ContentLength > 0 && resp.ContentLength < MaxSize {
177 body = io.LimitReader(body, resp.ContentLength)
178 } else {
179 body = io.LimitReader(body, MaxSize)
180 }
181
182 var response assumeRoleWithCertificateResponse
183 if err = xml.NewDecoder(body).Decode(&response); err != nil {
184 return Value{}, err
185 }
186 i.SetExpiration(response.Result.Credentials.Expiration, DefaultExpiryWindow)
187 return Value{
188 AccessKeyID: response.Result.Credentials.AccessKey,
189 SecretAccessKey: response.Result.Credentials.SecretKey,
190 SessionToken: response.Result.Credentials.SessionToken,
191 SignerType: SignatureDefault,
192 }, nil
193}
194
195// Expiration returns the expiration time of the current S3 credentials.
196func (i *STSCertificateIdentity) Expiration() time.Time { return i.expiration }
197
198type assumeRoleWithCertificateResponse struct {
199 XMLName xml.Name `xml:"https://sts.amazonaws.com/doc/2011-06-15/ AssumeRoleWithCertificateResponse" json:"-"`
200 Result struct {
201 Credentials struct {
202 AccessKey string `xml:"AccessKeyId" json:"accessKey,omitempty"`
203 SecretKey string `xml:"SecretAccessKey" json:"secretKey,omitempty"`
204 Expiration time.Time `xml:"Expiration" json:"expiration,omitempty"`
205 SessionToken string `xml:"SessionToken" json:"sessionToken,omitempty"`
206 } `xml:"Credentials" json:"credentials,omitempty"`
207 } `xml:"AssumeRoleWithCertificateResult"`
208 ResponseMetadata struct {
209 RequestID string `xml:"RequestId,omitempty"`
210 } `xml:"ResponseMetadata,omitempty"`
211}
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_web_identity.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_web_identity.go
new file mode 100644
index 0000000..2e2af50
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_web_identity.go
@@ -0,0 +1,205 @@
1/*
2 * MinIO Go Library for Amazon S3 Compatible Cloud Storage
3 * Copyright 2019-2022 MinIO, Inc.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18package credentials
19
20import (
21 "bytes"
22 "encoding/xml"
23 "errors"
24 "fmt"
25 "io"
26 "net/http"
27 "net/url"
28 "strconv"
29 "strings"
30 "time"
31)
32
33// AssumeRoleWithWebIdentityResponse contains the result of successful AssumeRoleWithWebIdentity request.
34type AssumeRoleWithWebIdentityResponse struct {
35 XMLName xml.Name `xml:"https://sts.amazonaws.com/doc/2011-06-15/ AssumeRoleWithWebIdentityResponse" json:"-"`
36 Result WebIdentityResult `xml:"AssumeRoleWithWebIdentityResult"`
37 ResponseMetadata struct {
38 RequestID string `xml:"RequestId,omitempty"`
39 } `xml:"ResponseMetadata,omitempty"`
40}
41
42// WebIdentityResult - Contains the response to a successful AssumeRoleWithWebIdentity
43// request, including temporary credentials that can be used to make MinIO API requests.
44type WebIdentityResult struct {
45 AssumedRoleUser AssumedRoleUser `xml:",omitempty"`
46 Audience string `xml:",omitempty"`
47 Credentials struct {
48 AccessKey string `xml:"AccessKeyId" json:"accessKey,omitempty"`
49 SecretKey string `xml:"SecretAccessKey" json:"secretKey,omitempty"`
50 Expiration time.Time `xml:"Expiration" json:"expiration,omitempty"`
51 SessionToken string `xml:"SessionToken" json:"sessionToken,omitempty"`
52 } `xml:",omitempty"`
53 PackedPolicySize int `xml:",omitempty"`
54 Provider string `xml:",omitempty"`
55 SubjectFromWebIdentityToken string `xml:",omitempty"`
56}
57
58// WebIdentityToken - web identity token with expiry.
59type WebIdentityToken struct {
60 Token string
61 AccessToken string
62 Expiry int
63}
64
65// A STSWebIdentity retrieves credentials from MinIO service, and keeps track if
66// those credentials are expired.
67type STSWebIdentity struct {
68 Expiry
69
70 // Required http Client to use when connecting to MinIO STS service.
71 Client *http.Client
72
73 // Exported STS endpoint to fetch STS credentials.
74 STSEndpoint string
75
76 // Exported GetWebIDTokenExpiry function which returns ID
77 // tokens from IDP. This function should return two values
78 // one is ID token which is a self contained ID token (JWT)
79 // and second return value is the expiry associated with
80 // this token.
81 // This is a customer provided function and is mandatory.
82 GetWebIDTokenExpiry func() (*WebIdentityToken, error)
83
84 // RoleARN is the Amazon Resource Name (ARN) of the role that the caller is
85 // assuming.
86 RoleARN string
87
88 // roleSessionName is the identifier for the assumed role session.
89 roleSessionName string
90}
91
92// NewSTSWebIdentity returns a pointer to a new
93// Credentials object wrapping the STSWebIdentity.
94func NewSTSWebIdentity(stsEndpoint string, getWebIDTokenExpiry func() (*WebIdentityToken, error)) (*Credentials, error) {
95 if stsEndpoint == "" {
96 return nil, errors.New("STS endpoint cannot be empty")
97 }
98 if getWebIDTokenExpiry == nil {
99 return nil, errors.New("Web ID token and expiry retrieval function should be defined")
100 }
101 return New(&STSWebIdentity{
102 Client: &http.Client{
103 Transport: http.DefaultTransport,
104 },
105 STSEndpoint: stsEndpoint,
106 GetWebIDTokenExpiry: getWebIDTokenExpiry,
107 }), nil
108}
109
110func getWebIdentityCredentials(clnt *http.Client, endpoint, roleARN, roleSessionName string,
111 getWebIDTokenExpiry func() (*WebIdentityToken, error),
112) (AssumeRoleWithWebIdentityResponse, error) {
113 idToken, err := getWebIDTokenExpiry()
114 if err != nil {
115 return AssumeRoleWithWebIdentityResponse{}, err
116 }
117
118 v := url.Values{}
119 v.Set("Action", "AssumeRoleWithWebIdentity")
120 if len(roleARN) > 0 {
121 v.Set("RoleArn", roleARN)
122
123 if len(roleSessionName) == 0 {
124 roleSessionName = strconv.FormatInt(time.Now().UnixNano(), 10)
125 }
126 v.Set("RoleSessionName", roleSessionName)
127 }
128 v.Set("WebIdentityToken", idToken.Token)
129 if idToken.AccessToken != "" {
130 // Usually set when server is using extended userInfo endpoint.
131 v.Set("WebIdentityAccessToken", idToken.AccessToken)
132 }
133 if idToken.Expiry > 0 {
134 v.Set("DurationSeconds", fmt.Sprintf("%d", idToken.Expiry))
135 }
136 v.Set("Version", STSVersion)
137
138 u, err := url.Parse(endpoint)
139 if err != nil {
140 return AssumeRoleWithWebIdentityResponse{}, err
141 }
142
143 req, err := http.NewRequest(http.MethodPost, u.String(), strings.NewReader(v.Encode()))
144 if err != nil {
145 return AssumeRoleWithWebIdentityResponse{}, err
146 }
147
148 req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
149
150 resp, err := clnt.Do(req)
151 if err != nil {
152 return AssumeRoleWithWebIdentityResponse{}, err
153 }
154
155 defer resp.Body.Close()
156 if resp.StatusCode != http.StatusOK {
157 var errResp ErrorResponse
158 buf, err := io.ReadAll(resp.Body)
159 if err != nil {
160 return AssumeRoleWithWebIdentityResponse{}, err
161 }
162 _, err = xmlDecodeAndBody(bytes.NewReader(buf), &errResp)
163 if err != nil {
164 var s3Err Error
165 if _, err = xmlDecodeAndBody(bytes.NewReader(buf), &s3Err); err != nil {
166 return AssumeRoleWithWebIdentityResponse{}, err
167 }
168 errResp.RequestID = s3Err.RequestID
169 errResp.STSError.Code = s3Err.Code
170 errResp.STSError.Message = s3Err.Message
171 }
172 return AssumeRoleWithWebIdentityResponse{}, errResp
173 }
174
175 a := AssumeRoleWithWebIdentityResponse{}
176 if err = xml.NewDecoder(resp.Body).Decode(&a); err != nil {
177 return AssumeRoleWithWebIdentityResponse{}, err
178 }
179
180 return a, nil
181}
182
183// Retrieve retrieves credentials from the MinIO service.
184// Error will be returned if the request fails.
185func (m *STSWebIdentity) Retrieve() (Value, error) {
186 a, err := getWebIdentityCredentials(m.Client, m.STSEndpoint, m.RoleARN, m.roleSessionName, m.GetWebIDTokenExpiry)
187 if err != nil {
188 return Value{}, err
189 }
190
191 // Expiry window is set to 10secs.
192 m.SetExpiration(a.Result.Credentials.Expiration, DefaultExpiryWindow)
193
194 return Value{
195 AccessKeyID: a.Result.Credentials.AccessKey,
196 SecretAccessKey: a.Result.Credentials.SecretKey,
197 SessionToken: a.Result.Credentials.SessionToken,
198 SignerType: SignatureV4,
199 }, nil
200}
201
202// Expiration returns the expiration time of the credentials
203func (m *STSWebIdentity) Expiration() time.Time {
204 return m.expiration
205}
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/encrypt/fips_disabled.go b/vendor/github.com/minio/minio-go/v7/pkg/encrypt/fips_disabled.go
new file mode 100644
index 0000000..6db26c0
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/pkg/encrypt/fips_disabled.go
@@ -0,0 +1,24 @@
1//go:build !fips
2// +build !fips
3
4/*
5 * MinIO Go Library for Amazon S3 Compatible Cloud Storage
6 * Copyright 2022 MinIO, Inc.
7 *
8 * Licensed under the Apache License, Version 2.0 (the "License");
9 * you may not use this file except in compliance with the License.
10 * You may obtain a copy of the License at
11 *
12 * http://www.apache.org/licenses/LICENSE-2.0
13 *
14 * Unless required by applicable law or agreed to in writing, software
15 * distributed under the License is distributed on an "AS IS" BASIS,
16 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
17 * See the License for the specific language governing permissions and
18 * limitations under the License.
19 */
20
21package encrypt
22
23// FIPS is true if 'fips' build tag was specified.
24const FIPS = false
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/encrypt/fips_enabled.go b/vendor/github.com/minio/minio-go/v7/pkg/encrypt/fips_enabled.go
new file mode 100644
index 0000000..6402582
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/pkg/encrypt/fips_enabled.go
@@ -0,0 +1,24 @@
1//go:build fips
2// +build fips
3
4/*
5 * MinIO Go Library for Amazon S3 Compatible Cloud Storage
6 * Copyright 2022 MinIO, Inc.
7 *
8 * Licensed under the Apache License, Version 2.0 (the "License");
9 * you may not use this file except in compliance with the License.
10 * You may obtain a copy of the License at
11 *
12 * http://www.apache.org/licenses/LICENSE-2.0
13 *
14 * Unless required by applicable law or agreed to in writing, software
15 * distributed under the License is distributed on an "AS IS" BASIS,
16 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
17 * See the License for the specific language governing permissions and
18 * limitations under the License.
19 */
20
21package encrypt
22
23// FIPS is true if 'fips' build tag was specified.
24const FIPS = true
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/encrypt/server-side.go b/vendor/github.com/minio/minio-go/v7/pkg/encrypt/server-side.go
new file mode 100644
index 0000000..a7081c5
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/pkg/encrypt/server-side.go
@@ -0,0 +1,198 @@
1/*
2 * MinIO Go Library for Amazon S3 Compatible Cloud Storage
3 * Copyright 2018 MinIO, Inc.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18package encrypt
19
20import (
21 "crypto/md5"
22 "encoding/base64"
23 "errors"
24 "net/http"
25
26 jsoniter "github.com/json-iterator/go"
27 "golang.org/x/crypto/argon2"
28)
29
30const (
31 // SseGenericHeader is the AWS SSE header used for SSE-S3 and SSE-KMS.
32 SseGenericHeader = "X-Amz-Server-Side-Encryption"
33
34 // SseKmsKeyID is the AWS SSE-KMS key id.
35 SseKmsKeyID = SseGenericHeader + "-Aws-Kms-Key-Id"
36 // SseEncryptionContext is the AWS SSE-KMS Encryption Context data.
37 SseEncryptionContext = SseGenericHeader + "-Context"
38
39 // SseCustomerAlgorithm is the AWS SSE-C algorithm HTTP header key.
40 SseCustomerAlgorithm = SseGenericHeader + "-Customer-Algorithm"
41 // SseCustomerKey is the AWS SSE-C encryption key HTTP header key.
42 SseCustomerKey = SseGenericHeader + "-Customer-Key"
43 // SseCustomerKeyMD5 is the AWS SSE-C encryption key MD5 HTTP header key.
44 SseCustomerKeyMD5 = SseGenericHeader + "-Customer-Key-MD5"
45
46 // SseCopyCustomerAlgorithm is the AWS SSE-C algorithm HTTP header key for CopyObject API.
47 SseCopyCustomerAlgorithm = "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm"
48 // SseCopyCustomerKey is the AWS SSE-C encryption key HTTP header key for CopyObject API.
49 SseCopyCustomerKey = "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key"
50 // SseCopyCustomerKeyMD5 is the AWS SSE-C encryption key MD5 HTTP header key for CopyObject API.
51 SseCopyCustomerKeyMD5 = "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key-MD5"
52)
53
54// PBKDF creates a SSE-C key from the provided password and salt.
55// PBKDF is a password-based key derivation function
56// which can be used to derive a high-entropy cryptographic
57// key from a low-entropy password and a salt.
58type PBKDF func(password, salt []byte) ServerSide
59
60// DefaultPBKDF is the default PBKDF. It uses Argon2id with the
61// recommended parameters from the RFC draft (1 pass, 64 MB memory, 4 threads).
62var DefaultPBKDF PBKDF = func(password, salt []byte) ServerSide {
63 sse := ssec{}
64 copy(sse[:], argon2.IDKey(password, salt, 1, 64*1024, 4, 32))
65 return sse
66}
67
68// Type is the server-side-encryption method. It represents one of
69// the following encryption methods:
70// - SSE-C: server-side-encryption with customer provided keys
71// - KMS: server-side-encryption with managed keys
72// - S3: server-side-encryption using S3 storage encryption
73type Type string
74
75const (
76 // SSEC represents server-side-encryption with customer provided keys
77 SSEC Type = "SSE-C"
78 // KMS represents server-side-encryption with managed keys
79 KMS Type = "KMS"
80 // S3 represents server-side-encryption using S3 storage encryption
81 S3 Type = "S3"
82)
83
84// ServerSide is a form of S3 server-side-encryption.
85type ServerSide interface {
86 // Type returns the server-side-encryption method.
87 Type() Type
88
89 // Marshal adds encryption headers to the provided HTTP headers.
90 // It marks an HTTP request as server-side-encryption request
91 // and inserts the required data into the headers.
92 Marshal(h http.Header)
93}
94
95// NewSSE returns a server-side-encryption using S3 storage encryption.
96// Using SSE-S3 the server will encrypt the object with server-managed keys.
97func NewSSE() ServerSide { return s3{} }
98
99// NewSSEKMS returns a new server-side-encryption using SSE-KMS and the provided Key Id and context.
100func NewSSEKMS(keyID string, context interface{}) (ServerSide, error) {
101 if context == nil {
102 return kms{key: keyID, hasContext: false}, nil
103 }
104 json := jsoniter.ConfigCompatibleWithStandardLibrary
105 serializedContext, err := json.Marshal(context)
106 if err != nil {
107 return nil, err
108 }
109 return kms{key: keyID, context: serializedContext, hasContext: true}, nil
110}
111
112// NewSSEC returns a new server-side-encryption using SSE-C and the provided key.
113// The key must be 32 bytes long.
114func NewSSEC(key []byte) (ServerSide, error) {
115 if len(key) != 32 {
116 return nil, errors.New("encrypt: SSE-C key must be 256 bit long")
117 }
118 sse := ssec{}
119 copy(sse[:], key)
120 return sse, nil
121}
122
123// SSE transforms a SSE-C copy encryption into a SSE-C encryption.
124// It is the inverse of SSECopy(...).
125//
126// If the provided sse is no SSE-C copy encryption SSE returns
127// sse unmodified.
128func SSE(sse ServerSide) ServerSide {
129 if sse == nil || sse.Type() != SSEC {
130 return sse
131 }
132 if sse, ok := sse.(ssecCopy); ok {
133 return ssec(sse)
134 }
135 return sse
136}
137
138// SSECopy transforms a SSE-C encryption into a SSE-C copy
139// encryption. This is required for SSE-C key rotation or a SSE-C
140// copy where the source and the destination should be encrypted.
141//
142// If the provided sse is no SSE-C encryption SSECopy returns
143// sse unmodified.
144func SSECopy(sse ServerSide) ServerSide {
145 if sse == nil || sse.Type() != SSEC {
146 return sse
147 }
148 if sse, ok := sse.(ssec); ok {
149 return ssecCopy(sse)
150 }
151 return sse
152}
153
154type ssec [32]byte
155
156func (s ssec) Type() Type { return SSEC }
157
158func (s ssec) Marshal(h http.Header) {
159 keyMD5 := md5.Sum(s[:])
160 h.Set(SseCustomerAlgorithm, "AES256")
161 h.Set(SseCustomerKey, base64.StdEncoding.EncodeToString(s[:]))
162 h.Set(SseCustomerKeyMD5, base64.StdEncoding.EncodeToString(keyMD5[:]))
163}
164
165type ssecCopy [32]byte
166
167func (s ssecCopy) Type() Type { return SSEC }
168
169func (s ssecCopy) Marshal(h http.Header) {
170 keyMD5 := md5.Sum(s[:])
171 h.Set(SseCopyCustomerAlgorithm, "AES256")
172 h.Set(SseCopyCustomerKey, base64.StdEncoding.EncodeToString(s[:]))
173 h.Set(SseCopyCustomerKeyMD5, base64.StdEncoding.EncodeToString(keyMD5[:]))
174}
175
176type s3 struct{}
177
178func (s s3) Type() Type { return S3 }
179
180func (s s3) Marshal(h http.Header) { h.Set(SseGenericHeader, "AES256") }
181
182type kms struct {
183 key string
184 context []byte
185 hasContext bool
186}
187
188func (s kms) Type() Type { return KMS }
189
190func (s kms) Marshal(h http.Header) {
191 h.Set(SseGenericHeader, "aws:kms")
192 if s.key != "" {
193 h.Set(SseKmsKeyID, s.key)
194 }
195 if s.hasContext {
196 h.Set(SseEncryptionContext, base64.StdEncoding.EncodeToString(s.context))
197 }
198}
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/lifecycle/lifecycle.go b/vendor/github.com/minio/minio-go/v7/pkg/lifecycle/lifecycle.go
new file mode 100644
index 0000000..c52f78c
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/pkg/lifecycle/lifecycle.go
@@ -0,0 +1,491 @@
1/*
2 * MinIO Go Library for Amazon S3 Compatible Cloud Storage
3 * Copyright 2020 MinIO, Inc.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18// Package lifecycle contains all the lifecycle related data types and marshallers.
19package lifecycle
20
21import (
22 "encoding/json"
23 "encoding/xml"
24 "errors"
25 "time"
26)
27
28var errMissingStorageClass = errors.New("storage-class cannot be empty")
29
30// AbortIncompleteMultipartUpload structure, not supported yet on MinIO
31type AbortIncompleteMultipartUpload struct {
32 XMLName xml.Name `xml:"AbortIncompleteMultipartUpload,omitempty" json:"-"`
33 DaysAfterInitiation ExpirationDays `xml:"DaysAfterInitiation,omitempty" json:"DaysAfterInitiation,omitempty"`
34}
35
36// IsDaysNull returns true if days field is null
37func (n AbortIncompleteMultipartUpload) IsDaysNull() bool {
38 return n.DaysAfterInitiation == ExpirationDays(0)
39}
40
41// MarshalXML if days after initiation is set to non-zero value
42func (n AbortIncompleteMultipartUpload) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
43 if n.IsDaysNull() {
44 return nil
45 }
46 type abortIncompleteMultipartUploadWrapper AbortIncompleteMultipartUpload
47 return e.EncodeElement(abortIncompleteMultipartUploadWrapper(n), start)
48}
49
50// NoncurrentVersionExpiration - Specifies when noncurrent object versions expire.
51// Upon expiration, server permanently deletes the noncurrent object versions.
52// Set this lifecycle configuration action on a bucket that has versioning enabled
53// (or suspended) to request server delete noncurrent object versions at a
54// specific period in the object's lifetime.
55type NoncurrentVersionExpiration struct {
56 XMLName xml.Name `xml:"NoncurrentVersionExpiration" json:"-"`
57 NoncurrentDays ExpirationDays `xml:"NoncurrentDays,omitempty" json:"NoncurrentDays,omitempty"`
58 NewerNoncurrentVersions int `xml:"NewerNoncurrentVersions,omitempty" json:"NewerNoncurrentVersions,omitempty"`
59}
60
61// MarshalXML if n is non-empty, i.e has a non-zero NoncurrentDays or NewerNoncurrentVersions.
62func (n NoncurrentVersionExpiration) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
63 if n.isNull() {
64 return nil
65 }
66 type noncurrentVersionExpirationWrapper NoncurrentVersionExpiration
67 return e.EncodeElement(noncurrentVersionExpirationWrapper(n), start)
68}
69
70// IsDaysNull returns true if days field is null
71func (n NoncurrentVersionExpiration) IsDaysNull() bool {
72 return n.NoncurrentDays == ExpirationDays(0)
73}
74
75func (n NoncurrentVersionExpiration) isNull() bool {
76 return n.IsDaysNull() && n.NewerNoncurrentVersions == 0
77}
78
79// NoncurrentVersionTransition structure, set this action to request server to
80// transition noncurrent object versions to different set storage classes
81// at a specific period in the object's lifetime.
82type NoncurrentVersionTransition struct {
83 XMLName xml.Name `xml:"NoncurrentVersionTransition,omitempty" json:"-"`
84 StorageClass string `xml:"StorageClass,omitempty" json:"StorageClass,omitempty"`
85 NoncurrentDays ExpirationDays `xml:"NoncurrentDays" json:"NoncurrentDays"`
86 NewerNoncurrentVersions int `xml:"NewerNoncurrentVersions,omitempty" json:"NewerNoncurrentVersions,omitempty"`
87}
88
89// IsDaysNull returns true if days field is null
90func (n NoncurrentVersionTransition) IsDaysNull() bool {
91 return n.NoncurrentDays == ExpirationDays(0)
92}
93
94// IsStorageClassEmpty returns true if storage class field is empty
95func (n NoncurrentVersionTransition) IsStorageClassEmpty() bool {
96 return n.StorageClass == ""
97}
98
99func (n NoncurrentVersionTransition) isNull() bool {
100 return n.StorageClass == ""
101}
102
103// UnmarshalJSON implements NoncurrentVersionTransition JSONify
104func (n *NoncurrentVersionTransition) UnmarshalJSON(b []byte) error {
105 type noncurrentVersionTransition NoncurrentVersionTransition
106 var nt noncurrentVersionTransition
107 err := json.Unmarshal(b, &nt)
108 if err != nil {
109 return err
110 }
111
112 if nt.StorageClass == "" {
113 return errMissingStorageClass
114 }
115 *n = NoncurrentVersionTransition(nt)
116 return nil
117}
118
119// MarshalXML is extended to leave out
120// <NoncurrentVersionTransition></NoncurrentVersionTransition> tags
121func (n NoncurrentVersionTransition) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
122 if n.isNull() {
123 return nil
124 }
125 type noncurrentVersionTransitionWrapper NoncurrentVersionTransition
126 return e.EncodeElement(noncurrentVersionTransitionWrapper(n), start)
127}
128
129// Tag structure key/value pair representing an object tag to apply lifecycle configuration
130type Tag struct {
131 XMLName xml.Name `xml:"Tag,omitempty" json:"-"`
132 Key string `xml:"Key,omitempty" json:"Key,omitempty"`
133 Value string `xml:"Value,omitempty" json:"Value,omitempty"`
134}
135
136// IsEmpty returns whether this tag is empty or not.
137func (tag Tag) IsEmpty() bool {
138 return tag.Key == ""
139}
140
141// Transition structure - transition details of lifecycle configuration
142type Transition struct {
143 XMLName xml.Name `xml:"Transition" json:"-"`
144 Date ExpirationDate `xml:"Date,omitempty" json:"Date,omitempty"`
145 StorageClass string `xml:"StorageClass,omitempty" json:"StorageClass,omitempty"`
146 Days ExpirationDays `xml:"Days" json:"Days"`
147}
148
149// UnmarshalJSON returns an error if storage-class is empty.
150func (t *Transition) UnmarshalJSON(b []byte) error {
151 type transition Transition
152 var tr transition
153 err := json.Unmarshal(b, &tr)
154 if err != nil {
155 return err
156 }
157
158 if tr.StorageClass == "" {
159 return errMissingStorageClass
160 }
161 *t = Transition(tr)
162 return nil
163}
164
165// MarshalJSON customizes json encoding by omitting empty values
166func (t Transition) MarshalJSON() ([]byte, error) {
167 if t.IsNull() {
168 return nil, nil
169 }
170 type transition struct {
171 Date *ExpirationDate `json:"Date,omitempty"`
172 StorageClass string `json:"StorageClass,omitempty"`
173 Days *ExpirationDays `json:"Days"`
174 }
175
176 newt := transition{
177 StorageClass: t.StorageClass,
178 }
179
180 if !t.IsDateNull() {
181 newt.Date = &t.Date
182 } else {
183 newt.Days = &t.Days
184 }
185 return json.Marshal(newt)
186}
187
188// IsDaysNull returns true if days field is null
189func (t Transition) IsDaysNull() bool {
190 return t.Days == ExpirationDays(0)
191}
192
193// IsDateNull returns true if date field is null
194func (t Transition) IsDateNull() bool {
195 return t.Date.Time.IsZero()
196}
197
198// IsNull returns true if no storage-class is set.
199func (t Transition) IsNull() bool {
200 return t.StorageClass == ""
201}
202
203// MarshalXML is transition is non null
204func (t Transition) MarshalXML(en *xml.Encoder, startElement xml.StartElement) error {
205 if t.IsNull() {
206 return nil
207 }
208 type transitionWrapper Transition
209 return en.EncodeElement(transitionWrapper(t), startElement)
210}
211
212// And And Rule for LifecycleTag, to be used in LifecycleRuleFilter
213type And struct {
214 XMLName xml.Name `xml:"And" json:"-"`
215 Prefix string `xml:"Prefix" json:"Prefix,omitempty"`
216 Tags []Tag `xml:"Tag" json:"Tags,omitempty"`
217 ObjectSizeLessThan int64 `xml:"ObjectSizeLessThan,omitempty" json:"ObjectSizeLessThan,omitempty"`
218 ObjectSizeGreaterThan int64 `xml:"ObjectSizeGreaterThan,omitempty" json:"ObjectSizeGreaterThan,omitempty"`
219}
220
221// IsEmpty returns true if Tags field is null
222func (a And) IsEmpty() bool {
223 return len(a.Tags) == 0 && a.Prefix == "" &&
224 a.ObjectSizeLessThan == 0 && a.ObjectSizeGreaterThan == 0
225}
226
227// Filter will be used in selecting rule(s) for lifecycle configuration
228type Filter struct {
229 XMLName xml.Name `xml:"Filter" json:"-"`
230 And And `xml:"And,omitempty" json:"And,omitempty"`
231 Prefix string `xml:"Prefix,omitempty" json:"Prefix,omitempty"`
232 Tag Tag `xml:"Tag,omitempty" json:"Tag,omitempty"`
233 ObjectSizeLessThan int64 `xml:"ObjectSizeLessThan,omitempty" json:"ObjectSizeLessThan,omitempty"`
234 ObjectSizeGreaterThan int64 `xml:"ObjectSizeGreaterThan,omitempty" json:"ObjectSizeGreaterThan,omitempty"`
235}
236
237// IsNull returns true if all Filter fields are empty.
238func (f Filter) IsNull() bool {
239 return f.Tag.IsEmpty() && f.And.IsEmpty() && f.Prefix == "" &&
240 f.ObjectSizeLessThan == 0 && f.ObjectSizeGreaterThan == 0
241}
242
243// MarshalJSON customizes json encoding by removing empty values.
244func (f Filter) MarshalJSON() ([]byte, error) {
245 type filter struct {
246 And *And `json:"And,omitempty"`
247 Prefix string `json:"Prefix,omitempty"`
248 Tag *Tag `json:"Tag,omitempty"`
249 ObjectSizeLessThan int64 `json:"ObjectSizeLessThan,omitempty"`
250 ObjectSizeGreaterThan int64 `json:"ObjectSizeGreaterThan,omitempty"`
251 }
252
253 newf := filter{
254 Prefix: f.Prefix,
255 }
256 if !f.Tag.IsEmpty() {
257 newf.Tag = &f.Tag
258 }
259 if !f.And.IsEmpty() {
260 newf.And = &f.And
261 }
262 newf.ObjectSizeLessThan = f.ObjectSizeLessThan
263 newf.ObjectSizeGreaterThan = f.ObjectSizeGreaterThan
264 return json.Marshal(newf)
265}
266
267// MarshalXML - produces the xml representation of the Filter struct
268// only one of Prefix, And and Tag should be present in the output.
269func (f Filter) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
270 if err := e.EncodeToken(start); err != nil {
271 return err
272 }
273
274 switch {
275 case !f.And.IsEmpty():
276 if err := e.EncodeElement(f.And, xml.StartElement{Name: xml.Name{Local: "And"}}); err != nil {
277 return err
278 }
279 case !f.Tag.IsEmpty():
280 if err := e.EncodeElement(f.Tag, xml.StartElement{Name: xml.Name{Local: "Tag"}}); err != nil {
281 return err
282 }
283 default:
284 if f.ObjectSizeLessThan > 0 {
285 if err := e.EncodeElement(f.ObjectSizeLessThan, xml.StartElement{Name: xml.Name{Local: "ObjectSizeLessThan"}}); err != nil {
286 return err
287 }
288 break
289 }
290 if f.ObjectSizeGreaterThan > 0 {
291 if err := e.EncodeElement(f.ObjectSizeGreaterThan, xml.StartElement{Name: xml.Name{Local: "ObjectSizeGreaterThan"}}); err != nil {
292 return err
293 }
294 break
295 }
296 // Print empty Prefix field only when everything else is empty
297 if err := e.EncodeElement(f.Prefix, xml.StartElement{Name: xml.Name{Local: "Prefix"}}); err != nil {
298 return err
299 }
300 }
301
302 return e.EncodeToken(xml.EndElement{Name: start.Name})
303}
304
305// ExpirationDays is a type alias to unmarshal Days in Expiration
306type ExpirationDays int
307
308// MarshalXML encodes number of days to expire if it is non-zero and
309// encodes empty string otherwise
310func (eDays ExpirationDays) MarshalXML(e *xml.Encoder, startElement xml.StartElement) error {
311 if eDays == 0 {
312 return nil
313 }
314 return e.EncodeElement(int(eDays), startElement)
315}
316
317// ExpirationDate is a embedded type containing time.Time to unmarshal
318// Date in Expiration
319type ExpirationDate struct {
320 time.Time
321}
322
323// MarshalXML encodes expiration date if it is non-zero and encodes
324// empty string otherwise
325func (eDate ExpirationDate) MarshalXML(e *xml.Encoder, startElement xml.StartElement) error {
326 if eDate.Time.IsZero() {
327 return nil
328 }
329 return e.EncodeElement(eDate.Format(time.RFC3339), startElement)
330}
331
332// ExpireDeleteMarker represents value of ExpiredObjectDeleteMarker field in Expiration XML element.
333type ExpireDeleteMarker ExpirationBoolean
334
335// IsEnabled returns true if the auto delete-marker expiration is enabled
336func (e ExpireDeleteMarker) IsEnabled() bool {
337 return bool(e)
338}
339
340// ExpirationBoolean represents an XML version of 'bool' type
341type ExpirationBoolean bool
342
343// MarshalXML encodes delete marker boolean into an XML form.
344func (b ExpirationBoolean) MarshalXML(e *xml.Encoder, startElement xml.StartElement) error {
345 if !b {
346 return nil
347 }
348 type booleanWrapper ExpirationBoolean
349 return e.EncodeElement(booleanWrapper(b), startElement)
350}
351
352// IsEnabled returns true if the expiration boolean is enabled
353func (b ExpirationBoolean) IsEnabled() bool {
354 return bool(b)
355}
356
357// Expiration structure - expiration details of lifecycle configuration
358type Expiration struct {
359 XMLName xml.Name `xml:"Expiration,omitempty" json:"-"`
360 Date ExpirationDate `xml:"Date,omitempty" json:"Date,omitempty"`
361 Days ExpirationDays `xml:"Days,omitempty" json:"Days,omitempty"`
362 DeleteMarker ExpireDeleteMarker `xml:"ExpiredObjectDeleteMarker,omitempty" json:"ExpiredObjectDeleteMarker,omitempty"`
363 DeleteAll ExpirationBoolean `xml:"ExpiredObjectAllVersions,omitempty" json:"ExpiredObjectAllVersions,omitempty"`
364}
365
366// MarshalJSON customizes json encoding by removing empty day/date specification.
367func (e Expiration) MarshalJSON() ([]byte, error) {
368 type expiration struct {
369 Date *ExpirationDate `json:"Date,omitempty"`
370 Days *ExpirationDays `json:"Days,omitempty"`
371 DeleteMarker ExpireDeleteMarker `json:"ExpiredObjectDeleteMarker,omitempty"`
372 DeleteAll ExpirationBoolean `json:"ExpiredObjectAllVersions,omitempty"`
373 }
374
375 newexp := expiration{
376 DeleteMarker: e.DeleteMarker,
377 DeleteAll: e.DeleteAll,
378 }
379 if !e.IsDaysNull() {
380 newexp.Days = &e.Days
381 }
382 if !e.IsDateNull() {
383 newexp.Date = &e.Date
384 }
385 return json.Marshal(newexp)
386}
387
388// IsDaysNull returns true if days field is null
389func (e Expiration) IsDaysNull() bool {
390 return e.Days == ExpirationDays(0)
391}
392
393// IsDateNull returns true if date field is null
394func (e Expiration) IsDateNull() bool {
395 return e.Date.Time.IsZero()
396}
397
398// IsDeleteMarkerExpirationEnabled returns true if the auto-expiration of delete marker is enabled
399func (e Expiration) IsDeleteMarkerExpirationEnabled() bool {
400 return e.DeleteMarker.IsEnabled()
401}
402
403// IsNull returns true if both date and days fields are null
404func (e Expiration) IsNull() bool {
405 return e.IsDaysNull() && e.IsDateNull() && !e.IsDeleteMarkerExpirationEnabled()
406}
407
408// MarshalXML is expiration is non null
409func (e Expiration) MarshalXML(en *xml.Encoder, startElement xml.StartElement) error {
410 if e.IsNull() {
411 return nil
412 }
413 type expirationWrapper Expiration
414 return en.EncodeElement(expirationWrapper(e), startElement)
415}
416
417// MarshalJSON customizes json encoding by omitting empty values
418func (r Rule) MarshalJSON() ([]byte, error) {
419 type rule struct {
420 AbortIncompleteMultipartUpload *AbortIncompleteMultipartUpload `json:"AbortIncompleteMultipartUpload,omitempty"`
421 Expiration *Expiration `json:"Expiration,omitempty"`
422 ID string `json:"ID"`
423 RuleFilter *Filter `json:"Filter,omitempty"`
424 NoncurrentVersionExpiration *NoncurrentVersionExpiration `json:"NoncurrentVersionExpiration,omitempty"`
425 NoncurrentVersionTransition *NoncurrentVersionTransition `json:"NoncurrentVersionTransition,omitempty"`
426 Prefix string `json:"Prefix,omitempty"`
427 Status string `json:"Status"`
428 Transition *Transition `json:"Transition,omitempty"`
429 }
430 newr := rule{
431 Prefix: r.Prefix,
432 Status: r.Status,
433 ID: r.ID,
434 }
435
436 if !r.RuleFilter.IsNull() {
437 newr.RuleFilter = &r.RuleFilter
438 }
439 if !r.AbortIncompleteMultipartUpload.IsDaysNull() {
440 newr.AbortIncompleteMultipartUpload = &r.AbortIncompleteMultipartUpload
441 }
442 if !r.Expiration.IsNull() {
443 newr.Expiration = &r.Expiration
444 }
445 if !r.Transition.IsNull() {
446 newr.Transition = &r.Transition
447 }
448 if !r.NoncurrentVersionExpiration.isNull() {
449 newr.NoncurrentVersionExpiration = &r.NoncurrentVersionExpiration
450 }
451 if !r.NoncurrentVersionTransition.isNull() {
452 newr.NoncurrentVersionTransition = &r.NoncurrentVersionTransition
453 }
454
455 return json.Marshal(newr)
456}
457
458// Rule represents a single rule in lifecycle configuration
459type Rule struct {
460 XMLName xml.Name `xml:"Rule,omitempty" json:"-"`
461 AbortIncompleteMultipartUpload AbortIncompleteMultipartUpload `xml:"AbortIncompleteMultipartUpload,omitempty" json:"AbortIncompleteMultipartUpload,omitempty"`
462 Expiration Expiration `xml:"Expiration,omitempty" json:"Expiration,omitempty"`
463 ID string `xml:"ID" json:"ID"`
464 RuleFilter Filter `xml:"Filter,omitempty" json:"Filter,omitempty"`
465 NoncurrentVersionExpiration NoncurrentVersionExpiration `xml:"NoncurrentVersionExpiration,omitempty" json:"NoncurrentVersionExpiration,omitempty"`
466 NoncurrentVersionTransition NoncurrentVersionTransition `xml:"NoncurrentVersionTransition,omitempty" json:"NoncurrentVersionTransition,omitempty"`
467 Prefix string `xml:"Prefix,omitempty" json:"Prefix,omitempty"`
468 Status string `xml:"Status" json:"Status"`
469 Transition Transition `xml:"Transition,omitempty" json:"Transition,omitempty"`
470}
471
472// Configuration is a collection of Rule objects.
473type Configuration struct {
474 XMLName xml.Name `xml:"LifecycleConfiguration,omitempty" json:"-"`
475 Rules []Rule `xml:"Rule"`
476}
477
478// Empty check if lifecycle configuration is empty
479func (c *Configuration) Empty() bool {
480 if c == nil {
481 return true
482 }
483 return len(c.Rules) == 0
484}
485
486// NewConfiguration initializes a fresh lifecycle configuration
487// for manipulation, such as setting and removing lifecycle rules
488// and filters.
489func NewConfiguration() *Configuration {
490 return &Configuration{}
491}
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/notification/info.go b/vendor/github.com/minio/minio-go/v7/pkg/notification/info.go
new file mode 100644
index 0000000..126661a
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/pkg/notification/info.go
@@ -0,0 +1,78 @@
1/*
2 * MinIO Go Library for Amazon S3 Compatible Cloud Storage
3 * Copyright 2017-2020 MinIO, Inc.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18package notification
19
20// Indentity represents the user id, this is a compliance field.
21type identity struct {
22 PrincipalID string `json:"principalId"`
23}
24
25// event bucket metadata.
26type bucketMeta struct {
27 Name string `json:"name"`
28 OwnerIdentity identity `json:"ownerIdentity"`
29 ARN string `json:"arn"`
30}
31
32// event object metadata.
33type objectMeta struct {
34 Key string `json:"key"`
35 Size int64 `json:"size,omitempty"`
36 ETag string `json:"eTag,omitempty"`
37 ContentType string `json:"contentType,omitempty"`
38 UserMetadata map[string]string `json:"userMetadata,omitempty"`
39 VersionID string `json:"versionId,omitempty"`
40 Sequencer string `json:"sequencer"`
41}
42
43// event server specific metadata.
44type eventMeta struct {
45 SchemaVersion string `json:"s3SchemaVersion"`
46 ConfigurationID string `json:"configurationId"`
47 Bucket bucketMeta `json:"bucket"`
48 Object objectMeta `json:"object"`
49}
50
51// sourceInfo represents information on the client that
52// triggered the event notification.
53type sourceInfo struct {
54 Host string `json:"host"`
55 Port string `json:"port"`
56 UserAgent string `json:"userAgent"`
57}
58
59// Event represents an Amazon an S3 bucket notification event.
60type Event struct {
61 EventVersion string `json:"eventVersion"`
62 EventSource string `json:"eventSource"`
63 AwsRegion string `json:"awsRegion"`
64 EventTime string `json:"eventTime"`
65 EventName string `json:"eventName"`
66 UserIdentity identity `json:"userIdentity"`
67 RequestParameters map[string]string `json:"requestParameters"`
68 ResponseElements map[string]string `json:"responseElements"`
69 S3 eventMeta `json:"s3"`
70 Source sourceInfo `json:"source"`
71}
72
73// Info - represents the collection of notification events, additionally
74// also reports errors if any while listening on bucket notifications.
75type Info struct {
76 Records []Event
77 Err error
78}
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/notification/notification.go b/vendor/github.com/minio/minio-go/v7/pkg/notification/notification.go
new file mode 100644
index 0000000..a44799d
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/pkg/notification/notification.go
@@ -0,0 +1,440 @@
1/*
2 * MinIO Go Library for Amazon S3 Compatible Cloud Storage
3 * Copyright 2020 MinIO, Inc.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18package notification
19
20import (
21 "encoding/xml"
22 "errors"
23 "fmt"
24 "strings"
25
26 "github.com/minio/minio-go/v7/pkg/set"
27)
28
29// EventType is a S3 notification event associated to the bucket notification configuration
30type EventType string
31
32// The role of all event types are described in :
33//
34// http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html#notification-how-to-event-types-and-destinations
35const (
36 ObjectCreatedAll EventType = "s3:ObjectCreated:*"
37 ObjectCreatedPut EventType = "s3:ObjectCreated:Put"
38 ObjectCreatedPost EventType = "s3:ObjectCreated:Post"
39 ObjectCreatedCopy EventType = "s3:ObjectCreated:Copy"
40 ObjectCreatedDeleteTagging EventType = "s3:ObjectCreated:DeleteTagging"
41 ObjectCreatedCompleteMultipartUpload EventType = "s3:ObjectCreated:CompleteMultipartUpload"
42 ObjectCreatedPutLegalHold EventType = "s3:ObjectCreated:PutLegalHold"
43 ObjectCreatedPutRetention EventType = "s3:ObjectCreated:PutRetention"
44 ObjectCreatedPutTagging EventType = "s3:ObjectCreated:PutTagging"
45 ObjectAccessedGet EventType = "s3:ObjectAccessed:Get"
46 ObjectAccessedHead EventType = "s3:ObjectAccessed:Head"
47 ObjectAccessedGetRetention EventType = "s3:ObjectAccessed:GetRetention"
48 ObjectAccessedGetLegalHold EventType = "s3:ObjectAccessed:GetLegalHold"
49 ObjectAccessedAll EventType = "s3:ObjectAccessed:*"
50 ObjectRemovedAll EventType = "s3:ObjectRemoved:*"
51 ObjectRemovedDelete EventType = "s3:ObjectRemoved:Delete"
52 ObjectRemovedDeleteMarkerCreated EventType = "s3:ObjectRemoved:DeleteMarkerCreated"
53 ObjectReducedRedundancyLostObject EventType = "s3:ReducedRedundancyLostObject"
54 ObjectTransitionAll EventType = "s3:ObjectTransition:*"
55 ObjectTransitionFailed EventType = "s3:ObjectTransition:Failed"
56 ObjectTransitionComplete EventType = "s3:ObjectTransition:Complete"
57 ObjectTransitionPost EventType = "s3:ObjectRestore:Post"
58 ObjectTransitionCompleted EventType = "s3:ObjectRestore:Completed"
59 ObjectReplicationAll EventType = "s3:Replication:*"
60 ObjectReplicationOperationCompletedReplication EventType = "s3:Replication:OperationCompletedReplication"
61 ObjectReplicationOperationFailedReplication EventType = "s3:Replication:OperationFailedReplication"
62 ObjectReplicationOperationMissedThreshold EventType = "s3:Replication:OperationMissedThreshold"
63 ObjectReplicationOperationNotTracked EventType = "s3:Replication:OperationNotTracked"
64 ObjectReplicationOperationReplicatedAfterThreshold EventType = "s3:Replication:OperationReplicatedAfterThreshold"
65 ObjectScannerManyVersions EventType = "s3:Scanner:ManyVersions"
66 ObjectScannerBigPrefix EventType = "s3:Scanner:BigPrefix"
67 ObjectScannerAll EventType = "s3:Scanner:*"
68 BucketCreatedAll EventType = "s3:BucketCreated:*"
69 BucketRemovedAll EventType = "s3:BucketRemoved:*"
70)
71
72// FilterRule - child of S3Key, a tag in the notification xml which
73// carries suffix/prefix filters
74type FilterRule struct {
75 Name string `xml:"Name"`
76 Value string `xml:"Value"`
77}
78
79// S3Key - child of Filter, a tag in the notification xml which
80// carries suffix/prefix filters
81type S3Key struct {
82 FilterRules []FilterRule `xml:"FilterRule,omitempty"`
83}
84
85// Filter - a tag in the notification xml structure which carries
86// suffix/prefix filters
87type Filter struct {
88 S3Key S3Key `xml:"S3Key,omitempty"`
89}
90
91// Arn - holds ARN information that will be sent to the web service,
92// ARN desciption can be found in http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html
93type Arn struct {
94 Partition string
95 Service string
96 Region string
97 AccountID string
98 Resource string
99}
100
101// NewArn creates new ARN based on the given partition, service, region, account id and resource
102func NewArn(partition, service, region, accountID, resource string) Arn {
103 return Arn{
104 Partition: partition,
105 Service: service,
106 Region: region,
107 AccountID: accountID,
108 Resource: resource,
109 }
110}
111
112var (
113 // ErrInvalidArnPrefix is returned when ARN string format does not start with 'arn'
114 ErrInvalidArnPrefix = errors.New("invalid ARN format, must start with 'arn:'")
115 // ErrInvalidArnFormat is returned when ARN string format is not valid
116 ErrInvalidArnFormat = errors.New("invalid ARN format, must be 'arn:<partition>:<service>:<region>:<accountID>:<resource>'")
117)
118
119// NewArnFromString parses string representation of ARN into Arn object.
120// Returns an error if the string format is incorrect.
121func NewArnFromString(arn string) (Arn, error) {
122 parts := strings.Split(arn, ":")
123 if len(parts) != 6 {
124 return Arn{}, ErrInvalidArnFormat
125 }
126 if parts[0] != "arn" {
127 return Arn{}, ErrInvalidArnPrefix
128 }
129
130 return NewArn(parts[1], parts[2], parts[3], parts[4], parts[5]), nil
131}
132
133// String returns the string format of the ARN
134func (arn Arn) String() string {
135 return "arn:" + arn.Partition + ":" + arn.Service + ":" + arn.Region + ":" + arn.AccountID + ":" + arn.Resource
136}
137
138// Config - represents one single notification configuration
139// such as topic, queue or lambda configuration.
140type Config struct {
141 ID string `xml:"Id,omitempty"`
142 Arn Arn `xml:"-"`
143 Events []EventType `xml:"Event"`
144 Filter *Filter `xml:"Filter,omitempty"`
145}
146
147// NewConfig creates one notification config and sets the given ARN
148func NewConfig(arn Arn) Config {
149 return Config{Arn: arn, Filter: &Filter{}}
150}
151
152// AddEvents adds one event to the current notification config
153func (t *Config) AddEvents(events ...EventType) {
154 t.Events = append(t.Events, events...)
155}
156
157// AddFilterSuffix sets the suffix configuration to the current notification config
158func (t *Config) AddFilterSuffix(suffix string) {
159 if t.Filter == nil {
160 t.Filter = &Filter{}
161 }
162 newFilterRule := FilterRule{Name: "suffix", Value: suffix}
163 // Replace any suffix rule if existing and add to the list otherwise
164 for index := range t.Filter.S3Key.FilterRules {
165 if t.Filter.S3Key.FilterRules[index].Name == "suffix" {
166 t.Filter.S3Key.FilterRules[index] = newFilterRule
167 return
168 }
169 }
170 t.Filter.S3Key.FilterRules = append(t.Filter.S3Key.FilterRules, newFilterRule)
171}
172
173// AddFilterPrefix sets the prefix configuration to the current notification config
174func (t *Config) AddFilterPrefix(prefix string) {
175 if t.Filter == nil {
176 t.Filter = &Filter{}
177 }
178 newFilterRule := FilterRule{Name: "prefix", Value: prefix}
179 // Replace any prefix rule if existing and add to the list otherwise
180 for index := range t.Filter.S3Key.FilterRules {
181 if t.Filter.S3Key.FilterRules[index].Name == "prefix" {
182 t.Filter.S3Key.FilterRules[index] = newFilterRule
183 return
184 }
185 }
186 t.Filter.S3Key.FilterRules = append(t.Filter.S3Key.FilterRules, newFilterRule)
187}
188
189// EqualEventTypeList tells whether a and b contain the same events
190func EqualEventTypeList(a, b []EventType) bool {
191 if len(a) != len(b) {
192 return false
193 }
194 setA := set.NewStringSet()
195 for _, i := range a {
196 setA.Add(string(i))
197 }
198
199 setB := set.NewStringSet()
200 for _, i := range b {
201 setB.Add(string(i))
202 }
203
204 return setA.Difference(setB).IsEmpty()
205}
206
207// EqualFilterRuleList tells whether a and b contain the same filters
208func EqualFilterRuleList(a, b []FilterRule) bool {
209 if len(a) != len(b) {
210 return false
211 }
212
213 setA := set.NewStringSet()
214 for _, i := range a {
215 setA.Add(fmt.Sprintf("%s-%s", i.Name, i.Value))
216 }
217
218 setB := set.NewStringSet()
219 for _, i := range b {
220 setB.Add(fmt.Sprintf("%s-%s", i.Name, i.Value))
221 }
222
223 return setA.Difference(setB).IsEmpty()
224}
225
226// Equal returns whether this `Config` is equal to another defined by the passed parameters
227func (t *Config) Equal(events []EventType, prefix, suffix string) bool {
228 if t == nil {
229 return false
230 }
231
232 // Compare events
233 passEvents := EqualEventTypeList(t.Events, events)
234
235 // Compare filters
236 var newFilterRules []FilterRule
237 if prefix != "" {
238 newFilterRules = append(newFilterRules, FilterRule{Name: "prefix", Value: prefix})
239 }
240 if suffix != "" {
241 newFilterRules = append(newFilterRules, FilterRule{Name: "suffix", Value: suffix})
242 }
243
244 var currentFilterRules []FilterRule
245 if t.Filter != nil {
246 currentFilterRules = t.Filter.S3Key.FilterRules
247 }
248
249 passFilters := EqualFilterRuleList(currentFilterRules, newFilterRules)
250 return passEvents && passFilters
251}
252
253// TopicConfig carries one single topic notification configuration
254type TopicConfig struct {
255 Config
256 Topic string `xml:"Topic"`
257}
258
259// QueueConfig carries one single queue notification configuration
260type QueueConfig struct {
261 Config
262 Queue string `xml:"Queue"`
263}
264
265// LambdaConfig carries one single cloudfunction notification configuration
266type LambdaConfig struct {
267 Config
268 Lambda string `xml:"CloudFunction"`
269}
270
271// Configuration - the struct that represents the whole XML to be sent to the web service
272type Configuration struct {
273 XMLName xml.Name `xml:"NotificationConfiguration"`
274 LambdaConfigs []LambdaConfig `xml:"CloudFunctionConfiguration"`
275 TopicConfigs []TopicConfig `xml:"TopicConfiguration"`
276 QueueConfigs []QueueConfig `xml:"QueueConfiguration"`
277}
278
279// AddTopic adds a given topic config to the general bucket notification config
280func (b *Configuration) AddTopic(topicConfig Config) bool {
281 newTopicConfig := TopicConfig{Config: topicConfig, Topic: topicConfig.Arn.String()}
282 for _, n := range b.TopicConfigs {
283 // If new config matches existing one
284 if n.Topic == newTopicConfig.Arn.String() && newTopicConfig.Filter == n.Filter {
285
286 existingConfig := set.NewStringSet()
287 for _, v := range n.Events {
288 existingConfig.Add(string(v))
289 }
290
291 newConfig := set.NewStringSet()
292 for _, v := range topicConfig.Events {
293 newConfig.Add(string(v))
294 }
295
296 if !newConfig.Intersection(existingConfig).IsEmpty() {
297 return false
298 }
299 }
300 }
301 b.TopicConfigs = append(b.TopicConfigs, newTopicConfig)
302 return true
303}
304
305// AddQueue adds a given queue config to the general bucket notification config
306func (b *Configuration) AddQueue(queueConfig Config) bool {
307 newQueueConfig := QueueConfig{Config: queueConfig, Queue: queueConfig.Arn.String()}
308 for _, n := range b.QueueConfigs {
309 if n.Queue == newQueueConfig.Arn.String() && newQueueConfig.Filter == n.Filter {
310
311 existingConfig := set.NewStringSet()
312 for _, v := range n.Events {
313 existingConfig.Add(string(v))
314 }
315
316 newConfig := set.NewStringSet()
317 for _, v := range queueConfig.Events {
318 newConfig.Add(string(v))
319 }
320
321 if !newConfig.Intersection(existingConfig).IsEmpty() {
322 return false
323 }
324 }
325 }
326 b.QueueConfigs = append(b.QueueConfigs, newQueueConfig)
327 return true
328}
329
330// AddLambda adds a given lambda config to the general bucket notification config
331func (b *Configuration) AddLambda(lambdaConfig Config) bool {
332 newLambdaConfig := LambdaConfig{Config: lambdaConfig, Lambda: lambdaConfig.Arn.String()}
333 for _, n := range b.LambdaConfigs {
334 if n.Lambda == newLambdaConfig.Arn.String() && newLambdaConfig.Filter == n.Filter {
335
336 existingConfig := set.NewStringSet()
337 for _, v := range n.Events {
338 existingConfig.Add(string(v))
339 }
340
341 newConfig := set.NewStringSet()
342 for _, v := range lambdaConfig.Events {
343 newConfig.Add(string(v))
344 }
345
346 if !newConfig.Intersection(existingConfig).IsEmpty() {
347 return false
348 }
349 }
350 }
351 b.LambdaConfigs = append(b.LambdaConfigs, newLambdaConfig)
352 return true
353}
354
355// RemoveTopicByArn removes all topic configurations that match the exact specified ARN
356func (b *Configuration) RemoveTopicByArn(arn Arn) {
357 var topics []TopicConfig
358 for _, topic := range b.TopicConfigs {
359 if topic.Topic != arn.String() {
360 topics = append(topics, topic)
361 }
362 }
363 b.TopicConfigs = topics
364}
365
366// ErrNoConfigMatch is returned when a notification configuration (sqs,sns,lambda) is not found when trying to delete
367var ErrNoConfigMatch = errors.New("no notification configuration matched")
368
369// RemoveTopicByArnEventsPrefixSuffix removes a topic configuration that match the exact specified ARN, events, prefix and suffix
370func (b *Configuration) RemoveTopicByArnEventsPrefixSuffix(arn Arn, events []EventType, prefix, suffix string) error {
371 removeIndex := -1
372 for i, v := range b.TopicConfigs {
373 // if it matches events and filters, mark the index for deletion
374 if v.Topic == arn.String() && v.Config.Equal(events, prefix, suffix) {
375 removeIndex = i
376 break // since we have at most one matching config
377 }
378 }
379 if removeIndex >= 0 {
380 b.TopicConfigs = append(b.TopicConfigs[:removeIndex], b.TopicConfigs[removeIndex+1:]...)
381 return nil
382 }
383 return ErrNoConfigMatch
384}
385
386// RemoveQueueByArn removes all queue configurations that match the exact specified ARN
387func (b *Configuration) RemoveQueueByArn(arn Arn) {
388 var queues []QueueConfig
389 for _, queue := range b.QueueConfigs {
390 if queue.Queue != arn.String() {
391 queues = append(queues, queue)
392 }
393 }
394 b.QueueConfigs = queues
395}
396
397// RemoveQueueByArnEventsPrefixSuffix removes a queue configuration that match the exact specified ARN, events, prefix and suffix
398func (b *Configuration) RemoveQueueByArnEventsPrefixSuffix(arn Arn, events []EventType, prefix, suffix string) error {
399 removeIndex := -1
400 for i, v := range b.QueueConfigs {
401 // if it matches events and filters, mark the index for deletion
402 if v.Queue == arn.String() && v.Config.Equal(events, prefix, suffix) {
403 removeIndex = i
404 break // since we have at most one matching config
405 }
406 }
407 if removeIndex >= 0 {
408 b.QueueConfigs = append(b.QueueConfigs[:removeIndex], b.QueueConfigs[removeIndex+1:]...)
409 return nil
410 }
411 return ErrNoConfigMatch
412}
413
414// RemoveLambdaByArn removes all lambda configurations that match the exact specified ARN
415func (b *Configuration) RemoveLambdaByArn(arn Arn) {
416 var lambdas []LambdaConfig
417 for _, lambda := range b.LambdaConfigs {
418 if lambda.Lambda != arn.String() {
419 lambdas = append(lambdas, lambda)
420 }
421 }
422 b.LambdaConfigs = lambdas
423}
424
425// RemoveLambdaByArnEventsPrefixSuffix removes a topic configuration that match the exact specified ARN, events, prefix and suffix
426func (b *Configuration) RemoveLambdaByArnEventsPrefixSuffix(arn Arn, events []EventType, prefix, suffix string) error {
427 removeIndex := -1
428 for i, v := range b.LambdaConfigs {
429 // if it matches events and filters, mark the index for deletion
430 if v.Lambda == arn.String() && v.Config.Equal(events, prefix, suffix) {
431 removeIndex = i
432 break // since we have at most one matching config
433 }
434 }
435 if removeIndex >= 0 {
436 b.LambdaConfigs = append(b.LambdaConfigs[:removeIndex], b.LambdaConfigs[removeIndex+1:]...)
437 return nil
438 }
439 return ErrNoConfigMatch
440}
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/replication/replication.go b/vendor/github.com/minio/minio-go/v7/pkg/replication/replication.go
new file mode 100644
index 0000000..0abbf6e
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/pkg/replication/replication.go
@@ -0,0 +1,971 @@
1/*
2 * MinIO Client (C) 2020 MinIO, Inc.
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17package replication
18
19import (
20 "bytes"
21 "encoding/xml"
22 "fmt"
23 "math"
24 "strconv"
25 "strings"
26 "time"
27 "unicode/utf8"
28
29 "github.com/rs/xid"
30)
31
32var errInvalidFilter = fmt.Errorf("invalid filter")
33
34// OptionType specifies operation to be performed on config
35type OptionType string
36
37const (
38 // AddOption specifies addition of rule to config
39 AddOption OptionType = "Add"
40 // SetOption specifies modification of existing rule to config
41 SetOption OptionType = "Set"
42
43 // RemoveOption specifies rule options are for removing a rule
44 RemoveOption OptionType = "Remove"
45 // ImportOption is for getting current config
46 ImportOption OptionType = "Import"
47)
48
49// Options represents options to set a replication configuration rule
50type Options struct {
51 Op OptionType
52 RoleArn string
53 ID string
54 Prefix string
55 RuleStatus string
56 Priority string
57 TagString string
58 StorageClass string
59 DestBucket string
60 IsTagSet bool
61 IsSCSet bool
62 ReplicateDeletes string // replicate versioned deletes
63 ReplicateDeleteMarkers string // replicate soft deletes
64 ReplicaSync string // replicate replica metadata modifications
65 ExistingObjectReplicate string
66}
67
68// Tags returns a slice of tags for a rule
69func (opts Options) Tags() ([]Tag, error) {
70 var tagList []Tag
71 tagTokens := strings.Split(opts.TagString, "&")
72 for _, tok := range tagTokens {
73 if tok == "" {
74 break
75 }
76 kv := strings.SplitN(tok, "=", 2)
77 if len(kv) != 2 {
78 return []Tag{}, fmt.Errorf("tags should be entered as comma separated k=v pairs")
79 }
80 tagList = append(tagList, Tag{
81 Key: kv[0],
82 Value: kv[1],
83 })
84 }
85 return tagList, nil
86}
87
88// Config - replication configuration specified in
89// https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html
90type Config struct {
91 XMLName xml.Name `xml:"ReplicationConfiguration" json:"-"`
92 Rules []Rule `xml:"Rule" json:"Rules"`
93 Role string `xml:"Role" json:"Role"`
94}
95
96// Empty returns true if config is not set
97func (c *Config) Empty() bool {
98 return len(c.Rules) == 0
99}
100
101// AddRule adds a new rule to existing replication config. If a rule exists with the
102// same ID, then the rule is replaced.
103func (c *Config) AddRule(opts Options) error {
104 priority, err := strconv.Atoi(opts.Priority)
105 if err != nil {
106 return err
107 }
108 var compatSw bool // true if RoleArn is used with new mc client and older minio version prior to multisite
109 if opts.RoleArn != "" {
110 tokens := strings.Split(opts.RoleArn, ":")
111 if len(tokens) != 6 {
112 return fmt.Errorf("invalid format for replication Role Arn: %v", opts.RoleArn)
113 }
114 switch {
115 case strings.HasPrefix(opts.RoleArn, "arn:minio:replication") && len(c.Rules) == 0:
116 c.Role = opts.RoleArn
117 compatSw = true
118 case strings.HasPrefix(opts.RoleArn, "arn:aws:iam"):
119 c.Role = opts.RoleArn
120 default:
121 return fmt.Errorf("RoleArn invalid for AWS replication configuration: %v", opts.RoleArn)
122 }
123 }
124
125 var status Status
126 // toggle rule status for edit option
127 switch opts.RuleStatus {
128 case "enable":
129 status = Enabled
130 case "disable":
131 status = Disabled
132 default:
133 return fmt.Errorf("rule state should be either [enable|disable]")
134 }
135
136 tags, err := opts.Tags()
137 if err != nil {
138 return err
139 }
140 andVal := And{
141 Tags: tags,
142 }
143 filter := Filter{Prefix: opts.Prefix}
144 // only a single tag is set.
145 if opts.Prefix == "" && len(tags) == 1 {
146 filter.Tag = tags[0]
147 }
148 // both prefix and tag are present
149 if len(andVal.Tags) > 1 || opts.Prefix != "" {
150 filter.And = andVal
151 filter.And.Prefix = opts.Prefix
152 filter.Prefix = ""
153 filter.Tag = Tag{}
154 }
155 if opts.ID == "" {
156 opts.ID = xid.New().String()
157 }
158
159 destBucket := opts.DestBucket
160 // ref https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-arn-format.html
161 if btokens := strings.Split(destBucket, ":"); len(btokens) != 6 {
162 if len(btokens) == 1 && compatSw {
163 destBucket = fmt.Sprintf("arn:aws:s3:::%s", destBucket)
164 } else {
165 return fmt.Errorf("destination bucket needs to be in Arn format")
166 }
167 }
168 dmStatus := Disabled
169 if opts.ReplicateDeleteMarkers != "" {
170 switch opts.ReplicateDeleteMarkers {
171 case "enable":
172 dmStatus = Enabled
173 case "disable":
174 dmStatus = Disabled
175 default:
176 return fmt.Errorf("ReplicateDeleteMarkers should be either enable|disable")
177 }
178 }
179
180 vDeleteStatus := Disabled
181 if opts.ReplicateDeletes != "" {
182 switch opts.ReplicateDeletes {
183 case "enable":
184 vDeleteStatus = Enabled
185 case "disable":
186 vDeleteStatus = Disabled
187 default:
188 return fmt.Errorf("ReplicateDeletes should be either enable|disable")
189 }
190 }
191 var replicaSync Status
192 // replica sync is by default Enabled, unless specified.
193 switch opts.ReplicaSync {
194 case "enable", "":
195 replicaSync = Enabled
196 case "disable":
197 replicaSync = Disabled
198 default:
199 return fmt.Errorf("replica metadata sync should be either [enable|disable]")
200 }
201
202 var existingStatus Status
203 if opts.ExistingObjectReplicate != "" {
204 switch opts.ExistingObjectReplicate {
205 case "enable":
206 existingStatus = Enabled
207 case "disable", "":
208 existingStatus = Disabled
209 default:
210 return fmt.Errorf("existingObjectReplicate should be either enable|disable")
211 }
212 }
213 newRule := Rule{
214 ID: opts.ID,
215 Priority: priority,
216 Status: status,
217 Filter: filter,
218 Destination: Destination{
219 Bucket: destBucket,
220 StorageClass: opts.StorageClass,
221 },
222 DeleteMarkerReplication: DeleteMarkerReplication{Status: dmStatus},
223 DeleteReplication: DeleteReplication{Status: vDeleteStatus},
224 // MinIO enables replica metadata syncing by default in the case of bi-directional replication to allow
225 // automatic failover as the expectation in this case is that replica and source should be identical.
226 // However AWS leaves this configurable https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-for-metadata-changes.html
227 SourceSelectionCriteria: SourceSelectionCriteria{
228 ReplicaModifications: ReplicaModifications{
229 Status: replicaSync,
230 },
231 },
232 // By default disable existing object replication unless selected
233 ExistingObjectReplication: ExistingObjectReplication{
234 Status: existingStatus,
235 },
236 }
237
238 // validate rule after overlaying priority for pre-existing rule being disabled.
239 if err := newRule.Validate(); err != nil {
240 return err
241 }
242 // if replication config uses RoleArn, migrate this to the destination element as target ARN for remote bucket for MinIO configuration
243 if c.Role != "" && !strings.HasPrefix(c.Role, "arn:aws:iam") && !compatSw {
244 for i := range c.Rules {
245 c.Rules[i].Destination.Bucket = c.Role
246 }
247 c.Role = ""
248 }
249
250 for _, rule := range c.Rules {
251 if rule.Priority == newRule.Priority {
252 return fmt.Errorf("priority must be unique. Replication configuration already has a rule with this priority")
253 }
254 if rule.ID == newRule.ID {
255 return fmt.Errorf("a rule exists with this ID")
256 }
257 }
258
259 c.Rules = append(c.Rules, newRule)
260 return nil
261}
262
263// EditRule modifies an existing rule in replication config
264func (c *Config) EditRule(opts Options) error {
265 if opts.ID == "" {
266 return fmt.Errorf("rule ID missing")
267 }
268 // if replication config uses RoleArn, migrate this to the destination element as target ARN for remote bucket for non AWS.
269 if c.Role != "" && !strings.HasPrefix(c.Role, "arn:aws:iam") && len(c.Rules) > 1 {
270 for i := range c.Rules {
271 c.Rules[i].Destination.Bucket = c.Role
272 }
273 c.Role = ""
274 }
275
276 rIdx := -1
277 var newRule Rule
278 for i, rule := range c.Rules {
279 if rule.ID == opts.ID {
280 rIdx = i
281 newRule = rule
282 break
283 }
284 }
285 if rIdx < 0 {
286 return fmt.Errorf("rule with ID %s not found in replication configuration", opts.ID)
287 }
288 prefixChg := opts.Prefix != newRule.Prefix()
289 if opts.IsTagSet || prefixChg {
290 prefix := newRule.Prefix()
291 if prefix != opts.Prefix {
292 prefix = opts.Prefix
293 }
294 tags := []Tag{newRule.Filter.Tag}
295 if len(newRule.Filter.And.Tags) != 0 {
296 tags = newRule.Filter.And.Tags
297 }
298 var err error
299 if opts.IsTagSet {
300 tags, err = opts.Tags()
301 if err != nil {
302 return err
303 }
304 }
305 andVal := And{
306 Tags: tags,
307 }
308
309 filter := Filter{Prefix: prefix}
310 // only a single tag is set.
311 if prefix == "" && len(tags) == 1 {
312 filter.Tag = tags[0]
313 }
314 // both prefix and tag are present
315 if len(andVal.Tags) > 1 || prefix != "" {
316 filter.And = andVal
317 filter.And.Prefix = prefix
318 filter.Prefix = ""
319 filter.Tag = Tag{}
320 }
321 newRule.Filter = filter
322 }
323
324 // toggle rule status for edit option
325 if opts.RuleStatus != "" {
326 switch opts.RuleStatus {
327 case "enable":
328 newRule.Status = Enabled
329 case "disable":
330 newRule.Status = Disabled
331 default:
332 return fmt.Errorf("rule state should be either [enable|disable]")
333 }
334 }
335 // set DeleteMarkerReplication rule status for edit option
336 if opts.ReplicateDeleteMarkers != "" {
337 switch opts.ReplicateDeleteMarkers {
338 case "enable":
339 newRule.DeleteMarkerReplication.Status = Enabled
340 case "disable":
341 newRule.DeleteMarkerReplication.Status = Disabled
342 default:
343 return fmt.Errorf("ReplicateDeleteMarkers state should be either [enable|disable]")
344 }
345 }
346
347 // set DeleteReplication rule status for edit option. This is a MinIO specific
348 // option to replicate versioned deletes
349 if opts.ReplicateDeletes != "" {
350 switch opts.ReplicateDeletes {
351 case "enable":
352 newRule.DeleteReplication.Status = Enabled
353 case "disable":
354 newRule.DeleteReplication.Status = Disabled
355 default:
356 return fmt.Errorf("ReplicateDeletes state should be either [enable|disable]")
357 }
358 }
359
360 if opts.ReplicaSync != "" {
361 switch opts.ReplicaSync {
362 case "enable", "":
363 newRule.SourceSelectionCriteria.ReplicaModifications.Status = Enabled
364 case "disable":
365 newRule.SourceSelectionCriteria.ReplicaModifications.Status = Disabled
366 default:
367 return fmt.Errorf("replica metadata sync should be either [enable|disable]")
368 }
369 }
370
371 if opts.ExistingObjectReplicate != "" {
372 switch opts.ExistingObjectReplicate {
373 case "enable":
374 newRule.ExistingObjectReplication.Status = Enabled
375 case "disable":
376 newRule.ExistingObjectReplication.Status = Disabled
377 default:
378 return fmt.Errorf("existingObjectsReplication state should be either [enable|disable]")
379 }
380 }
381 if opts.IsSCSet {
382 newRule.Destination.StorageClass = opts.StorageClass
383 }
384 if opts.Priority != "" {
385 priority, err := strconv.Atoi(opts.Priority)
386 if err != nil {
387 return err
388 }
389 newRule.Priority = priority
390 }
391 if opts.DestBucket != "" {
392 destBucket := opts.DestBucket
393 // ref https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-arn-format.html
394 if btokens := strings.Split(opts.DestBucket, ":"); len(btokens) != 6 {
395 return fmt.Errorf("destination bucket needs to be in Arn format")
396 }
397 newRule.Destination.Bucket = destBucket
398 }
399 // validate rule
400 if err := newRule.Validate(); err != nil {
401 return err
402 }
403 // ensure priority and destination bucket restrictions are not violated
404 for idx, rule := range c.Rules {
405 if rule.Priority == newRule.Priority && rIdx != idx {
406 return fmt.Errorf("priority must be unique. Replication configuration already has a rule with this priority")
407 }
408 if rule.Destination.Bucket != newRule.Destination.Bucket && rule.ID == newRule.ID {
409 return fmt.Errorf("invalid destination bucket for this rule")
410 }
411 }
412
413 c.Rules[rIdx] = newRule
414 return nil
415}
416
417// RemoveRule removes a rule from replication config.
418func (c *Config) RemoveRule(opts Options) error {
419 var newRules []Rule
420 ruleFound := false
421 for _, rule := range c.Rules {
422 if rule.ID != opts.ID {
423 newRules = append(newRules, rule)
424 continue
425 }
426 ruleFound = true
427 }
428 if !ruleFound {
429 return fmt.Errorf("Rule with ID %s not found", opts.ID)
430 }
431 if len(newRules) == 0 {
432 return fmt.Errorf("replication configuration should have at least one rule")
433 }
434 c.Rules = newRules
435 return nil
436}
437
438// Rule - a rule for replication configuration.
439type Rule struct {
440 XMLName xml.Name `xml:"Rule" json:"-"`
441 ID string `xml:"ID,omitempty"`
442 Status Status `xml:"Status"`
443 Priority int `xml:"Priority"`
444 DeleteMarkerReplication DeleteMarkerReplication `xml:"DeleteMarkerReplication"`
445 DeleteReplication DeleteReplication `xml:"DeleteReplication"`
446 Destination Destination `xml:"Destination"`
447 Filter Filter `xml:"Filter" json:"Filter"`
448 SourceSelectionCriteria SourceSelectionCriteria `xml:"SourceSelectionCriteria" json:"SourceSelectionCriteria"`
449 ExistingObjectReplication ExistingObjectReplication `xml:"ExistingObjectReplication,omitempty" json:"ExistingObjectReplication,omitempty"`
450}
451
452// Validate validates the rule for correctness
453func (r Rule) Validate() error {
454 if err := r.validateID(); err != nil {
455 return err
456 }
457 if err := r.validateStatus(); err != nil {
458 return err
459 }
460 if err := r.validateFilter(); err != nil {
461 return err
462 }
463
464 if r.Priority < 0 && r.Status == Enabled {
465 return fmt.Errorf("priority must be set for the rule")
466 }
467
468 if err := r.validateStatus(); err != nil {
469 return err
470 }
471 return r.ExistingObjectReplication.Validate()
472}
473
474// validateID - checks if ID is valid or not.
475func (r Rule) validateID() error {
476 // cannot be longer than 255 characters
477 if len(r.ID) > 255 {
478 return fmt.Errorf("ID must be less than 255 characters")
479 }
480 return nil
481}
482
483// validateStatus - checks if status is valid or not.
484func (r Rule) validateStatus() error {
485 // Status can't be empty
486 if len(r.Status) == 0 {
487 return fmt.Errorf("status cannot be empty")
488 }
489
490 // Status must be one of Enabled or Disabled
491 if r.Status != Enabled && r.Status != Disabled {
492 return fmt.Errorf("status must be set to either Enabled or Disabled")
493 }
494 return nil
495}
496
497func (r Rule) validateFilter() error {
498 return r.Filter.Validate()
499}
500
501// Prefix - a rule can either have prefix under <filter></filter> or under
502// <filter><and></and></filter>. This method returns the prefix from the
503// location where it is available
504func (r Rule) Prefix() string {
505 if r.Filter.Prefix != "" {
506 return r.Filter.Prefix
507 }
508 return r.Filter.And.Prefix
509}
510
511// Tags - a rule can either have tag under <filter></filter> or under
512// <filter><and></and></filter>. This method returns all the tags from the
513// rule in the format tag1=value1&tag2=value2
514func (r Rule) Tags() string {
515 ts := []Tag{r.Filter.Tag}
516 if len(r.Filter.And.Tags) != 0 {
517 ts = r.Filter.And.Tags
518 }
519
520 var buf bytes.Buffer
521 for _, t := range ts {
522 if buf.Len() > 0 {
523 buf.WriteString("&")
524 }
525 buf.WriteString(t.String())
526 }
527 return buf.String()
528}
529
530// Filter - a filter for a replication configuration Rule.
531type Filter struct {
532 XMLName xml.Name `xml:"Filter" json:"-"`
533 Prefix string `json:"Prefix,omitempty"`
534 And And `xml:"And,omitempty" json:"And,omitempty"`
535 Tag Tag `xml:"Tag,omitempty" json:"Tag,omitempty"`
536}
537
538// Validate - validates the filter element
539func (f Filter) Validate() error {
540 // A Filter must have exactly one of Prefix, Tag, or And specified.
541 if !f.And.isEmpty() {
542 if f.Prefix != "" {
543 return errInvalidFilter
544 }
545 if !f.Tag.IsEmpty() {
546 return errInvalidFilter
547 }
548 }
549 if f.Prefix != "" {
550 if !f.Tag.IsEmpty() {
551 return errInvalidFilter
552 }
553 }
554 if !f.Tag.IsEmpty() {
555 if err := f.Tag.Validate(); err != nil {
556 return err
557 }
558 }
559 return nil
560}
561
562// Tag - a tag for a replication configuration Rule filter.
563type Tag struct {
564 XMLName xml.Name `json:"-"`
565 Key string `xml:"Key,omitempty" json:"Key,omitempty"`
566 Value string `xml:"Value,omitempty" json:"Value,omitempty"`
567}
568
569func (tag Tag) String() string {
570 if tag.IsEmpty() {
571 return ""
572 }
573 return tag.Key + "=" + tag.Value
574}
575
576// IsEmpty returns whether this tag is empty or not.
577func (tag Tag) IsEmpty() bool {
578 return tag.Key == ""
579}
580
581// Validate checks this tag.
582func (tag Tag) Validate() error {
583 if len(tag.Key) == 0 || utf8.RuneCountInString(tag.Key) > 128 {
584 return fmt.Errorf("invalid Tag Key")
585 }
586
587 if utf8.RuneCountInString(tag.Value) > 256 {
588 return fmt.Errorf("invalid Tag Value")
589 }
590 return nil
591}
592
593// Destination - destination in ReplicationConfiguration.
594type Destination struct {
595 XMLName xml.Name `xml:"Destination" json:"-"`
596 Bucket string `xml:"Bucket" json:"Bucket"`
597 StorageClass string `xml:"StorageClass,omitempty" json:"StorageClass,omitempty"`
598}
599
600// And - a tag to combine a prefix and multiple tags for replication configuration rule.
601type And struct {
602 XMLName xml.Name `xml:"And,omitempty" json:"-"`
603 Prefix string `xml:"Prefix,omitempty" json:"Prefix,omitempty"`
604 Tags []Tag `xml:"Tag,omitempty" json:"Tag,omitempty"`
605}
606
607// isEmpty returns true if Tags field is null
608func (a And) isEmpty() bool {
609 return len(a.Tags) == 0 && a.Prefix == ""
610}
611
612// Status represents Enabled/Disabled status
613type Status string
614
615// Supported status types
616const (
617 Enabled Status = "Enabled"
618 Disabled Status = "Disabled"
619)
620
621// DeleteMarkerReplication - whether delete markers are replicated - https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html
622type DeleteMarkerReplication struct {
623 Status Status `xml:"Status" json:"Status"` // should be set to "Disabled" by default
624}
625
626// IsEmpty returns true if DeleteMarkerReplication is not set
627func (d DeleteMarkerReplication) IsEmpty() bool {
628 return len(d.Status) == 0
629}
630
631// DeleteReplication - whether versioned deletes are replicated - this
632// is a MinIO specific extension
633type DeleteReplication struct {
634 Status Status `xml:"Status" json:"Status"` // should be set to "Disabled" by default
635}
636
637// IsEmpty returns true if DeleteReplication is not set
638func (d DeleteReplication) IsEmpty() bool {
639 return len(d.Status) == 0
640}
641
642// ReplicaModifications specifies if replica modification sync is enabled
643type ReplicaModifications struct {
644 Status Status `xml:"Status" json:"Status"` // should be set to "Enabled" by default
645}
646
647// SourceSelectionCriteria - specifies additional source selection criteria in ReplicationConfiguration.
648type SourceSelectionCriteria struct {
649 ReplicaModifications ReplicaModifications `xml:"ReplicaModifications" json:"ReplicaModifications"`
650}
651
652// IsValid - checks whether SourceSelectionCriteria is valid or not.
653func (s SourceSelectionCriteria) IsValid() bool {
654 return s.ReplicaModifications.Status == Enabled || s.ReplicaModifications.Status == Disabled
655}
656
657// Validate source selection criteria
658func (s SourceSelectionCriteria) Validate() error {
659 if (s == SourceSelectionCriteria{}) {
660 return nil
661 }
662 if !s.IsValid() {
663 return fmt.Errorf("invalid ReplicaModification status")
664 }
665 return nil
666}
667
668// ExistingObjectReplication - whether existing object replication is enabled
669type ExistingObjectReplication struct {
670 Status Status `xml:"Status"` // should be set to "Disabled" by default
671}
672
673// IsEmpty returns true if DeleteMarkerReplication is not set
674func (e ExistingObjectReplication) IsEmpty() bool {
675 return len(e.Status) == 0
676}
677
678// Validate validates whether the status is disabled.
679func (e ExistingObjectReplication) Validate() error {
680 if e.IsEmpty() {
681 return nil
682 }
683 if e.Status != Disabled && e.Status != Enabled {
684 return fmt.Errorf("invalid ExistingObjectReplication status")
685 }
686 return nil
687}
688
689// TargetMetrics represents inline replication metrics
690// such as pending, failed and completed bytes in total for a bucket remote target
691type TargetMetrics struct {
692 // Completed count
693 ReplicatedCount uint64 `json:"replicationCount,omitempty"`
694 // Completed size in bytes
695 ReplicatedSize uint64 `json:"completedReplicationSize,omitempty"`
696 // Bandwidth limit in bytes/sec for this target
697 BandWidthLimitInBytesPerSecond int64 `json:"limitInBits,omitempty"`
698 // Current bandwidth used in bytes/sec for this target
699 CurrentBandwidthInBytesPerSecond float64 `json:"currentBandwidth,omitempty"`
700 // errors seen in replication in last minute, hour and total
701 Failed TimedErrStats `json:"failed,omitempty"`
702 // Deprecated fields
703 // Pending size in bytes
704 PendingSize uint64 `json:"pendingReplicationSize,omitempty"`
705 // Total Replica size in bytes
706 ReplicaSize uint64 `json:"replicaSize,omitempty"`
707 // Failed size in bytes
708 FailedSize uint64 `json:"failedReplicationSize,omitempty"`
709 // Total number of pending operations including metadata updates
710 PendingCount uint64 `json:"pendingReplicationCount,omitempty"`
711 // Total number of failed operations including metadata updates
712 FailedCount uint64 `json:"failedReplicationCount,omitempty"`
713}
714
715// Metrics represents inline replication metrics for a bucket.
716type Metrics struct {
717 Stats map[string]TargetMetrics
718 // Completed size in bytes across targets
719 ReplicatedSize uint64 `json:"completedReplicationSize,omitempty"`
720 // Total Replica size in bytes across targets
721 ReplicaSize uint64 `json:"replicaSize,omitempty"`
722 // Total Replica counts
723 ReplicaCount int64 `json:"replicaCount,omitempty"`
724 // Total Replicated count
725 ReplicatedCount int64 `json:"replicationCount,omitempty"`
726 // errors seen in replication in last minute, hour and total
727 Errors TimedErrStats `json:"failed,omitempty"`
728 // Total number of entries that are queued for replication
729 QStats InQueueMetric `json:"queued"`
730 // Deprecated fields
731 // Total Pending size in bytes across targets
732 PendingSize uint64 `json:"pendingReplicationSize,omitempty"`
733 // Failed size in bytes across targets
734 FailedSize uint64 `json:"failedReplicationSize,omitempty"`
735 // Total number of pending operations including metadata updates across targets
736 PendingCount uint64 `json:"pendingReplicationCount,omitempty"`
737 // Total number of failed operations including metadata updates across targets
738 FailedCount uint64 `json:"failedReplicationCount,omitempty"`
739}
740
741// RStat - has count and bytes for replication metrics
742type RStat struct {
743 Count float64 `json:"count"`
744 Bytes int64 `json:"bytes"`
745}
746
747// Add two RStat
748func (r RStat) Add(r1 RStat) RStat {
749 return RStat{
750 Count: r.Count + r1.Count,
751 Bytes: r.Bytes + r1.Bytes,
752 }
753}
754
755// TimedErrStats holds error stats for a time period
756type TimedErrStats struct {
757 LastMinute RStat `json:"lastMinute"`
758 LastHour RStat `json:"lastHour"`
759 Totals RStat `json:"totals"`
760}
761
762// Add two TimedErrStats
763func (te TimedErrStats) Add(o TimedErrStats) TimedErrStats {
764 return TimedErrStats{
765 LastMinute: te.LastMinute.Add(o.LastMinute),
766 LastHour: te.LastHour.Add(o.LastHour),
767 Totals: te.Totals.Add(o.Totals),
768 }
769}
770
771// ResyncTargetsInfo provides replication target information to resync replicated data.
772type ResyncTargetsInfo struct {
773 Targets []ResyncTarget `json:"target,omitempty"`
774}
775
776// ResyncTarget provides the replica resources and resetID to initiate resync replication.
777type ResyncTarget struct {
778 Arn string `json:"arn"`
779 ResetID string `json:"resetid"`
780 StartTime time.Time `json:"startTime,omitempty"`
781 EndTime time.Time `json:"endTime,omitempty"`
782 // Status of resync operation
783 ResyncStatus string `json:"resyncStatus,omitempty"`
784 // Completed size in bytes
785 ReplicatedSize int64 `json:"completedReplicationSize,omitempty"`
786 // Failed size in bytes
787 FailedSize int64 `json:"failedReplicationSize,omitempty"`
788 // Total number of failed operations
789 FailedCount int64 `json:"failedReplicationCount,omitempty"`
790 // Total number of completed operations
791 ReplicatedCount int64 `json:"replicationCount,omitempty"`
792 // Last bucket/object replicated.
793 Bucket string `json:"bucket,omitempty"`
794 Object string `json:"object,omitempty"`
795}
796
797// XferStats holds transfer rate info for uploads/sec
798type XferStats struct {
799 AvgRate float64 `json:"avgRate"`
800 PeakRate float64 `json:"peakRate"`
801 CurrRate float64 `json:"currRate"`
802}
803
804// Merge two XferStats
805func (x *XferStats) Merge(x1 XferStats) {
806 x.AvgRate += x1.AvgRate
807 x.PeakRate += x1.PeakRate
808 x.CurrRate += x1.CurrRate
809}
810
811// QStat holds count and bytes for objects in replication queue
812type QStat struct {
813 Count float64 `json:"count"`
814 Bytes float64 `json:"bytes"`
815}
816
817// Add 2 QStat entries
818func (q *QStat) Add(q1 QStat) {
819 q.Count += q1.Count
820 q.Bytes += q1.Bytes
821}
822
823// InQueueMetric holds stats for objects in replication queue
824type InQueueMetric struct {
825 Curr QStat `json:"curr" msg:"cq"`
826 Avg QStat `json:"avg" msg:"aq"`
827 Max QStat `json:"peak" msg:"pq"`
828}
829
830// MetricName name of replication metric
831type MetricName string
832
833const (
834 // Large is a metric name for large objects >=128MiB
835 Large MetricName = "Large"
836 // Small is a metric name for objects <128MiB size
837 Small MetricName = "Small"
838 // Total is a metric name for total objects
839 Total MetricName = "Total"
840)
841
842// WorkerStat has stats on number of replication workers
843type WorkerStat struct {
844 Curr int32 `json:"curr"`
845 Avg float32 `json:"avg"`
846 Max int32 `json:"max"`
847}
848
849// ReplMRFStats holds stats of MRF backlog saved to disk in the last 5 minutes
850// and number of entries that failed replication after 3 retries
851type ReplMRFStats struct {
852 LastFailedCount uint64 `json:"failedCount_last5min"`
853 // Count of unreplicated entries that were dropped after MRF retry limit reached since cluster start.
854 TotalDroppedCount uint64 `json:"droppedCount_since_uptime"`
855 // Bytes of unreplicated entries that were dropped after MRF retry limit reached since cluster start.
856 TotalDroppedBytes uint64 `json:"droppedBytes_since_uptime"`
857}
858
859// ReplQNodeStats holds stats for a node in replication queue
860type ReplQNodeStats struct {
861 NodeName string `json:"nodeName"`
862 Uptime int64 `json:"uptime"`
863 Workers WorkerStat `json:"activeWorkers"`
864
865 XferStats map[MetricName]XferStats `json:"transferSummary"`
866 TgtXferStats map[string]map[MetricName]XferStats `json:"tgtTransferStats"`
867
868 QStats InQueueMetric `json:"queueStats"`
869 MRFStats ReplMRFStats `json:"mrfStats"`
870}
871
872// ReplQueueStats holds stats for replication queue across nodes
873type ReplQueueStats struct {
874 Nodes []ReplQNodeStats `json:"nodes"`
875}
876
877// Workers returns number of workers across all nodes
878func (q ReplQueueStats) Workers() (tot WorkerStat) {
879 for _, node := range q.Nodes {
880 tot.Avg += node.Workers.Avg
881 tot.Curr += node.Workers.Curr
882 if tot.Max < node.Workers.Max {
883 tot.Max = node.Workers.Max
884 }
885 }
886 if len(q.Nodes) > 0 {
887 tot.Avg /= float32(len(q.Nodes))
888 tot.Curr /= int32(len(q.Nodes))
889 }
890 return tot
891}
892
893// qStatSummary returns cluster level stats for objects in replication queue
894func (q ReplQueueStats) qStatSummary() InQueueMetric {
895 m := InQueueMetric{}
896 for _, v := range q.Nodes {
897 m.Avg.Add(v.QStats.Avg)
898 m.Curr.Add(v.QStats.Curr)
899 if m.Max.Count < v.QStats.Max.Count {
900 m.Max.Add(v.QStats.Max)
901 }
902 }
903 return m
904}
905
906// ReplQStats holds stats for objects in replication queue
907type ReplQStats struct {
908 Uptime int64 `json:"uptime"`
909 Workers WorkerStat `json:"workers"`
910
911 XferStats map[MetricName]XferStats `json:"xferStats"`
912 TgtXferStats map[string]map[MetricName]XferStats `json:"tgtXferStats"`
913
914 QStats InQueueMetric `json:"qStats"`
915 MRFStats ReplMRFStats `json:"mrfStats"`
916}
917
918// QStats returns cluster level stats for objects in replication queue
919func (q ReplQueueStats) QStats() (r ReplQStats) {
920 r.QStats = q.qStatSummary()
921 r.XferStats = make(map[MetricName]XferStats)
922 r.TgtXferStats = make(map[string]map[MetricName]XferStats)
923 r.Workers = q.Workers()
924
925 for _, node := range q.Nodes {
926 for arn := range node.TgtXferStats {
927 xmap, ok := node.TgtXferStats[arn]
928 if !ok {
929 xmap = make(map[MetricName]XferStats)
930 }
931 for m, v := range xmap {
932 st, ok := r.XferStats[m]
933 if !ok {
934 st = XferStats{}
935 }
936 st.AvgRate += v.AvgRate
937 st.CurrRate += v.CurrRate
938 st.PeakRate = math.Max(st.PeakRate, v.PeakRate)
939 if _, ok := r.TgtXferStats[arn]; !ok {
940 r.TgtXferStats[arn] = make(map[MetricName]XferStats)
941 }
942 r.TgtXferStats[arn][m] = st
943 }
944 }
945 for k, v := range node.XferStats {
946 st, ok := r.XferStats[k]
947 if !ok {
948 st = XferStats{}
949 }
950 st.AvgRate += v.AvgRate
951 st.CurrRate += v.CurrRate
952 st.PeakRate = math.Max(st.PeakRate, v.PeakRate)
953 r.XferStats[k] = st
954 }
955 r.MRFStats.LastFailedCount += node.MRFStats.LastFailedCount
956 r.MRFStats.TotalDroppedCount += node.MRFStats.TotalDroppedCount
957 r.MRFStats.TotalDroppedBytes += node.MRFStats.TotalDroppedBytes
958 r.Uptime += node.Uptime
959 }
960 if len(q.Nodes) > 0 {
961 r.Uptime /= int64(len(q.Nodes)) // average uptime
962 }
963 return
964}
965
966// MetricsV2 represents replication metrics for a bucket.
967type MetricsV2 struct {
968 Uptime int64 `json:"uptime"`
969 CurrentStats Metrics `json:"currStats"`
970 QueueStats ReplQueueStats `json:"queueStats"`
971}
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/s3utils/utils.go b/vendor/github.com/minio/minio-go/v7/pkg/s3utils/utils.go
new file mode 100644
index 0000000..056e78a
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/pkg/s3utils/utils.go
@@ -0,0 +1,411 @@
1/*
2 * MinIO Go Library for Amazon S3 Compatible Cloud Storage
3 * Copyright 2015-2020 MinIO, Inc.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18package s3utils
19
20import (
21 "bytes"
22 "encoding/hex"
23 "errors"
24 "net"
25 "net/url"
26 "regexp"
27 "sort"
28 "strings"
29 "unicode/utf8"
30)
31
32// Sentinel URL is the default url value which is invalid.
33var sentinelURL = url.URL{}
34
35// IsValidDomain validates if input string is a valid domain name.
36func IsValidDomain(host string) bool {
37 // See RFC 1035, RFC 3696.
38 host = strings.TrimSpace(host)
39 if len(host) == 0 || len(host) > 255 {
40 return false
41 }
42 // host cannot start or end with "-"
43 if host[len(host)-1:] == "-" || host[:1] == "-" {
44 return false
45 }
46 // host cannot start or end with "_"
47 if host[len(host)-1:] == "_" || host[:1] == "_" {
48 return false
49 }
50 // host cannot start with a "."
51 if host[:1] == "." {
52 return false
53 }
54 // All non alphanumeric characters are invalid.
55 if strings.ContainsAny(host, "`~!@#$%^&*()+={}[]|\\\"';:><?/") {
56 return false
57 }
58 // No need to regexp match, since the list is non-exhaustive.
59 // We let it valid and fail later.
60 return true
61}
62
63// IsValidIP parses input string for ip address validity.
64func IsValidIP(ip string) bool {
65 return net.ParseIP(ip) != nil
66}
67
68// IsVirtualHostSupported - verifies if bucketName can be part of
69// virtual host. Currently only Amazon S3 and Google Cloud Storage
70// would support this.
71func IsVirtualHostSupported(endpointURL url.URL, bucketName string) bool {
72 if endpointURL == sentinelURL {
73 return false
74 }
75 // bucketName can be valid but '.' in the hostname will fail SSL
76 // certificate validation. So do not use host-style for such buckets.
77 if endpointURL.Scheme == "https" && strings.Contains(bucketName, ".") {
78 return false
79 }
80 // Return true for all other cases
81 return IsAmazonEndpoint(endpointURL) || IsGoogleEndpoint(endpointURL) || IsAliyunOSSEndpoint(endpointURL)
82}
83
84// Refer for region styles - https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region
85
86// amazonS3HostHyphen - regular expression used to determine if an arg is s3 host in hyphenated style.
87var amazonS3HostHyphen = regexp.MustCompile(`^s3-(.*?).amazonaws.com$`)
88
89// amazonS3HostDualStack - regular expression used to determine if an arg is s3 host dualstack.
90var amazonS3HostDualStack = regexp.MustCompile(`^s3.dualstack.(.*?).amazonaws.com$`)
91
92// amazonS3HostFIPS - regular expression used to determine if an arg is s3 FIPS host.
93var amazonS3HostFIPS = regexp.MustCompile(`^s3-fips.(.*?).amazonaws.com$`)
94
95// amazonS3HostFIPSDualStack - regular expression used to determine if an arg is s3 FIPS host dualstack.
96var amazonS3HostFIPSDualStack = regexp.MustCompile(`^s3-fips.dualstack.(.*?).amazonaws.com$`)
97
98// amazonS3HostDot - regular expression used to determine if an arg is s3 host in . style.
99var amazonS3HostDot = regexp.MustCompile(`^s3.(.*?).amazonaws.com$`)
100
101// amazonS3ChinaHost - regular expression used to determine if the arg is s3 china host.
102var amazonS3ChinaHost = regexp.MustCompile(`^s3.(cn.*?).amazonaws.com.cn$`)
103
104// amazonS3ChinaHostDualStack - regular expression used to determine if the arg is s3 china host dualstack.
105var amazonS3ChinaHostDualStack = regexp.MustCompile(`^s3.dualstack.(cn.*?).amazonaws.com.cn$`)
106
107// Regular expression used to determine if the arg is elb host.
108var elbAmazonRegex = regexp.MustCompile(`elb(.*?).amazonaws.com$`)
109
110// Regular expression used to determine if the arg is elb host in china.
111var elbAmazonCnRegex = regexp.MustCompile(`elb(.*?).amazonaws.com.cn$`)
112
113// amazonS3HostPrivateLink - regular expression used to determine if an arg is s3 host in AWS PrivateLink interface endpoints style
114var amazonS3HostPrivateLink = regexp.MustCompile(`^(?:bucket|accesspoint).vpce-.*?.s3.(.*?).vpce.amazonaws.com$`)
115
116// GetRegionFromURL - returns a region from url host.
117func GetRegionFromURL(endpointURL url.URL) string {
118 if endpointURL == sentinelURL {
119 return ""
120 }
121 if endpointURL.Host == "s3-external-1.amazonaws.com" {
122 return ""
123 }
124
125 // if elb's are used we cannot calculate which region it may be, just return empty.
126 if elbAmazonRegex.MatchString(endpointURL.Host) || elbAmazonCnRegex.MatchString(endpointURL.Host) {
127 return ""
128 }
129
130 // We check for FIPS dualstack matching first to avoid the non-greedy
131 // regex for FIPS non-dualstack matching a dualstack URL
132 parts := amazonS3HostFIPSDualStack.FindStringSubmatch(endpointURL.Host)
133 if len(parts) > 1 {
134 return parts[1]
135 }
136
137 parts = amazonS3HostFIPS.FindStringSubmatch(endpointURL.Host)
138 if len(parts) > 1 {
139 return parts[1]
140 }
141
142 parts = amazonS3HostDualStack.FindStringSubmatch(endpointURL.Host)
143 if len(parts) > 1 {
144 return parts[1]
145 }
146
147 parts = amazonS3HostHyphen.FindStringSubmatch(endpointURL.Host)
148 if len(parts) > 1 {
149 return parts[1]
150 }
151
152 parts = amazonS3ChinaHost.FindStringSubmatch(endpointURL.Host)
153 if len(parts) > 1 {
154 return parts[1]
155 }
156
157 parts = amazonS3ChinaHostDualStack.FindStringSubmatch(endpointURL.Host)
158 if len(parts) > 1 {
159 return parts[1]
160 }
161
162 parts = amazonS3HostDot.FindStringSubmatch(endpointURL.Host)
163 if len(parts) > 1 {
164 return parts[1]
165 }
166
167 parts = amazonS3HostPrivateLink.FindStringSubmatch(endpointURL.Host)
168 if len(parts) > 1 {
169 return parts[1]
170 }
171
172 return ""
173}
174
175// IsAliyunOSSEndpoint - Match if it is exactly Aliyun OSS endpoint.
176func IsAliyunOSSEndpoint(endpointURL url.URL) bool {
177 return strings.HasSuffix(endpointURL.Host, "aliyuncs.com")
178}
179
180// IsAmazonEndpoint - Match if it is exactly Amazon S3 endpoint.
181func IsAmazonEndpoint(endpointURL url.URL) bool {
182 if endpointURL.Host == "s3-external-1.amazonaws.com" || endpointURL.Host == "s3.amazonaws.com" {
183 return true
184 }
185 return GetRegionFromURL(endpointURL) != ""
186}
187
188// IsAmazonGovCloudEndpoint - Match if it is exactly Amazon S3 GovCloud endpoint.
189func IsAmazonGovCloudEndpoint(endpointURL url.URL) bool {
190 if endpointURL == sentinelURL {
191 return false
192 }
193 return (endpointURL.Host == "s3-us-gov-west-1.amazonaws.com" ||
194 endpointURL.Host == "s3-us-gov-east-1.amazonaws.com" ||
195 IsAmazonFIPSGovCloudEndpoint(endpointURL))
196}
197
198// IsAmazonFIPSGovCloudEndpoint - match if the endpoint is FIPS and GovCloud.
199func IsAmazonFIPSGovCloudEndpoint(endpointURL url.URL) bool {
200 if endpointURL == sentinelURL {
201 return false
202 }
203 return IsAmazonFIPSEndpoint(endpointURL) && strings.Contains(endpointURL.Host, "us-gov-")
204}
205
206// IsAmazonFIPSEndpoint - Match if it is exactly Amazon S3 FIPS endpoint.
207// See https://aws.amazon.com/compliance/fips.
208func IsAmazonFIPSEndpoint(endpointURL url.URL) bool {
209 if endpointURL == sentinelURL {
210 return false
211 }
212 return strings.HasPrefix(endpointURL.Host, "s3-fips") && strings.HasSuffix(endpointURL.Host, ".amazonaws.com")
213}
214
215// IsAmazonPrivateLinkEndpoint - Match if it is exactly Amazon S3 PrivateLink interface endpoint
216// See https://docs.aws.amazon.com/AmazonS3/latest/userguide/privatelink-interface-endpoints.html.
217func IsAmazonPrivateLinkEndpoint(endpointURL url.URL) bool {
218 if endpointURL == sentinelURL {
219 return false
220 }
221 return amazonS3HostPrivateLink.MatchString(endpointURL.Host)
222}
223
224// IsGoogleEndpoint - Match if it is exactly Google cloud storage endpoint.
225func IsGoogleEndpoint(endpointURL url.URL) bool {
226 if endpointURL == sentinelURL {
227 return false
228 }
229 return endpointURL.Host == "storage.googleapis.com"
230}
231
232// Expects ascii encoded strings - from output of urlEncodePath
233func percentEncodeSlash(s string) string {
234 return strings.ReplaceAll(s, "/", "%2F")
235}
236
237// QueryEncode - encodes query values in their URL encoded form. In
238// addition to the percent encoding performed by urlEncodePath() used
239// here, it also percent encodes '/' (forward slash)
240func QueryEncode(v url.Values) string {
241 if v == nil {
242 return ""
243 }
244 var buf bytes.Buffer
245 keys := make([]string, 0, len(v))
246 for k := range v {
247 keys = append(keys, k)
248 }
249 sort.Strings(keys)
250 for _, k := range keys {
251 vs := v[k]
252 prefix := percentEncodeSlash(EncodePath(k)) + "="
253 for _, v := range vs {
254 if buf.Len() > 0 {
255 buf.WriteByte('&')
256 }
257 buf.WriteString(prefix)
258 buf.WriteString(percentEncodeSlash(EncodePath(v)))
259 }
260 }
261 return buf.String()
262}
263
264// TagDecode - decodes canonical tag into map of key and value.
265func TagDecode(ctag string) map[string]string {
266 if ctag == "" {
267 return map[string]string{}
268 }
269 tags := strings.Split(ctag, "&")
270 tagMap := make(map[string]string, len(tags))
271 var err error
272 for _, tag := range tags {
273 kvs := strings.SplitN(tag, "=", 2)
274 if len(kvs) == 0 {
275 return map[string]string{}
276 }
277 if len(kvs) == 1 {
278 return map[string]string{}
279 }
280 tagMap[kvs[0]], err = url.PathUnescape(kvs[1])
281 if err != nil {
282 continue
283 }
284 }
285 return tagMap
286}
287
288// TagEncode - encodes tag values in their URL encoded form. In
289// addition to the percent encoding performed by urlEncodePath() used
290// here, it also percent encodes '/' (forward slash)
291func TagEncode(tags map[string]string) string {
292 if tags == nil {
293 return ""
294 }
295 values := url.Values{}
296 for k, v := range tags {
297 values[k] = []string{v}
298 }
299 return QueryEncode(values)
300}
301
302// if object matches reserved string, no need to encode them
303var reservedObjectNames = regexp.MustCompile("^[a-zA-Z0-9-_.~/]+$")
304
305// EncodePath encode the strings from UTF-8 byte representations to HTML hex escape sequences
306//
307// This is necessary since regular url.Parse() and url.Encode() functions do not support UTF-8
308// non english characters cannot be parsed due to the nature in which url.Encode() is written
309//
310// This function on the other hand is a direct replacement for url.Encode() technique to support
311// pretty much every UTF-8 character.
312func EncodePath(pathName string) string {
313 if reservedObjectNames.MatchString(pathName) {
314 return pathName
315 }
316 var encodedPathname strings.Builder
317 for _, s := range pathName {
318 if 'A' <= s && s <= 'Z' || 'a' <= s && s <= 'z' || '0' <= s && s <= '9' { // §2.3 Unreserved characters (mark)
319 encodedPathname.WriteRune(s)
320 continue
321 }
322 switch s {
323 case '-', '_', '.', '~', '/': // §2.3 Unreserved characters (mark)
324 encodedPathname.WriteRune(s)
325 continue
326 default:
327 l := utf8.RuneLen(s)
328 if l < 0 {
329 // if utf8 cannot convert return the same string as is
330 return pathName
331 }
332 u := make([]byte, l)
333 utf8.EncodeRune(u, s)
334 for _, r := range u {
335 hex := hex.EncodeToString([]byte{r})
336 encodedPathname.WriteString("%" + strings.ToUpper(hex))
337 }
338 }
339 }
340 return encodedPathname.String()
341}
342
343// We support '.' with bucket names but we fallback to using path
344// style requests instead for such buckets.
345var (
346 validBucketName = regexp.MustCompile(`^[A-Za-z0-9][A-Za-z0-9\.\-\_\:]{1,61}[A-Za-z0-9]$`)
347 validBucketNameStrict = regexp.MustCompile(`^[a-z0-9][a-z0-9\.\-]{1,61}[a-z0-9]$`)
348 ipAddress = regexp.MustCompile(`^(\d+\.){3}\d+$`)
349)
350
351// Common checker for both stricter and basic validation.
352func checkBucketNameCommon(bucketName string, strict bool) (err error) {
353 if strings.TrimSpace(bucketName) == "" {
354 return errors.New("Bucket name cannot be empty")
355 }
356 if len(bucketName) < 3 {
357 return errors.New("Bucket name cannot be shorter than 3 characters")
358 }
359 if len(bucketName) > 63 {
360 return errors.New("Bucket name cannot be longer than 63 characters")
361 }
362 if ipAddress.MatchString(bucketName) {
363 return errors.New("Bucket name cannot be an ip address")
364 }
365 if strings.Contains(bucketName, "..") || strings.Contains(bucketName, ".-") || strings.Contains(bucketName, "-.") {
366 return errors.New("Bucket name contains invalid characters")
367 }
368 if strict {
369 if !validBucketNameStrict.MatchString(bucketName) {
370 err = errors.New("Bucket name contains invalid characters")
371 }
372 return err
373 }
374 if !validBucketName.MatchString(bucketName) {
375 err = errors.New("Bucket name contains invalid characters")
376 }
377 return err
378}
379
380// CheckValidBucketName - checks if we have a valid input bucket name.
381func CheckValidBucketName(bucketName string) (err error) {
382 return checkBucketNameCommon(bucketName, false)
383}
384
385// CheckValidBucketNameStrict - checks if we have a valid input bucket name.
386// This is a stricter version.
387// - http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html
388func CheckValidBucketNameStrict(bucketName string) (err error) {
389 return checkBucketNameCommon(bucketName, true)
390}
391
392// CheckValidObjectNamePrefix - checks if we have a valid input object name prefix.
393// - http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMetadata.html
394func CheckValidObjectNamePrefix(objectName string) error {
395 if len(objectName) > 1024 {
396 return errors.New("Object name cannot be longer than 1024 characters")
397 }
398 if !utf8.ValidString(objectName) {
399 return errors.New("Object name with non UTF-8 strings are not supported")
400 }
401 return nil
402}
403
404// CheckValidObjectName - checks if we have a valid input object name.
405// - http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMetadata.html
406func CheckValidObjectName(objectName string) error {
407 if strings.TrimSpace(objectName) == "" {
408 return errors.New("Object name cannot be empty")
409 }
410 return CheckValidObjectNamePrefix(objectName)
411}
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/set/stringset.go b/vendor/github.com/minio/minio-go/v7/pkg/set/stringset.go
new file mode 100644
index 0000000..c35e58e
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/pkg/set/stringset.go
@@ -0,0 +1,200 @@
1/*
2 * MinIO Go Library for Amazon S3 Compatible Cloud Storage
3 * Copyright 2015-2017 MinIO, Inc.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18package set
19
20import (
21 "fmt"
22 "sort"
23
24 jsoniter "github.com/json-iterator/go"
25)
26
27// StringSet - uses map as set of strings.
28type StringSet map[string]struct{}
29
30var json = jsoniter.ConfigCompatibleWithStandardLibrary
31
32// ToSlice - returns StringSet as string slice.
33func (set StringSet) ToSlice() []string {
34 keys := make([]string, 0, len(set))
35 for k := range set {
36 keys = append(keys, k)
37 }
38 sort.Strings(keys)
39 return keys
40}
41
42// IsEmpty - returns whether the set is empty or not.
43func (set StringSet) IsEmpty() bool {
44 return len(set) == 0
45}
46
47// Add - adds string to the set.
48func (set StringSet) Add(s string) {
49 set[s] = struct{}{}
50}
51
52// Remove - removes string in the set. It does nothing if string does not exist in the set.
53func (set StringSet) Remove(s string) {
54 delete(set, s)
55}
56
57// Contains - checks if string is in the set.
58func (set StringSet) Contains(s string) bool {
59 _, ok := set[s]
60 return ok
61}
62
63// FuncMatch - returns new set containing each value who passes match function.
64// A 'matchFn' should accept element in a set as first argument and
65// 'matchString' as second argument. The function can do any logic to
66// compare both the arguments and should return true to accept element in
67// a set to include in output set else the element is ignored.
68func (set StringSet) FuncMatch(matchFn func(string, string) bool, matchString string) StringSet {
69 nset := NewStringSet()
70 for k := range set {
71 if matchFn(k, matchString) {
72 nset.Add(k)
73 }
74 }
75 return nset
76}
77
78// ApplyFunc - returns new set containing each value processed by 'applyFn'.
79// A 'applyFn' should accept element in a set as a argument and return
80// a processed string. The function can do any logic to return a processed
81// string.
82func (set StringSet) ApplyFunc(applyFn func(string) string) StringSet {
83 nset := NewStringSet()
84 for k := range set {
85 nset.Add(applyFn(k))
86 }
87 return nset
88}
89
90// Equals - checks whether given set is equal to current set or not.
91func (set StringSet) Equals(sset StringSet) bool {
92 // If length of set is not equal to length of given set, the
93 // set is not equal to given set.
94 if len(set) != len(sset) {
95 return false
96 }
97
98 // As both sets are equal in length, check each elements are equal.
99 for k := range set {
100 if _, ok := sset[k]; !ok {
101 return false
102 }
103 }
104
105 return true
106}
107
108// Intersection - returns the intersection with given set as new set.
109func (set StringSet) Intersection(sset StringSet) StringSet {
110 nset := NewStringSet()
111 for k := range set {
112 if _, ok := sset[k]; ok {
113 nset.Add(k)
114 }
115 }
116
117 return nset
118}
119
120// Difference - returns the difference with given set as new set.
121func (set StringSet) Difference(sset StringSet) StringSet {
122 nset := NewStringSet()
123 for k := range set {
124 if _, ok := sset[k]; !ok {
125 nset.Add(k)
126 }
127 }
128
129 return nset
130}
131
132// Union - returns the union with given set as new set.
133func (set StringSet) Union(sset StringSet) StringSet {
134 nset := NewStringSet()
135 for k := range set {
136 nset.Add(k)
137 }
138
139 for k := range sset {
140 nset.Add(k)
141 }
142
143 return nset
144}
145
146// MarshalJSON - converts to JSON data.
147func (set StringSet) MarshalJSON() ([]byte, error) {
148 return json.Marshal(set.ToSlice())
149}
150
151// UnmarshalJSON - parses JSON data and creates new set with it.
152// If 'data' contains JSON string array, the set contains each string.
153// If 'data' contains JSON string, the set contains the string as one element.
154// If 'data' contains Other JSON types, JSON parse error is returned.
155func (set *StringSet) UnmarshalJSON(data []byte) error {
156 sl := []string{}
157 var err error
158 if err = json.Unmarshal(data, &sl); err == nil {
159 *set = make(StringSet)
160 for _, s := range sl {
161 set.Add(s)
162 }
163 } else {
164 var s string
165 if err = json.Unmarshal(data, &s); err == nil {
166 *set = make(StringSet)
167 set.Add(s)
168 }
169 }
170
171 return err
172}
173
174// String - returns printable string of the set.
175func (set StringSet) String() string {
176 return fmt.Sprintf("%s", set.ToSlice())
177}
178
179// NewStringSet - creates new string set.
180func NewStringSet() StringSet {
181 return make(StringSet)
182}
183
184// CreateStringSet - creates new string set with given string values.
185func CreateStringSet(sl ...string) StringSet {
186 set := make(StringSet)
187 for _, k := range sl {
188 set.Add(k)
189 }
190 return set
191}
192
193// CopyStringSet - returns copy of given set.
194func CopyStringSet(set StringSet) StringSet {
195 nset := NewStringSet()
196 for k, v := range set {
197 nset[k] = v
198 }
199 return nset
200}
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-streaming-unsigned-trailer.go b/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-streaming-unsigned-trailer.go
new file mode 100644
index 0000000..77540e2
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-streaming-unsigned-trailer.go
@@ -0,0 +1,224 @@
1/*
2 * MinIO Go Library for Amazon S3 Compatible Cloud Storage
3 * Copyright 2022 MinIO, Inc.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18package signer
19
20import (
21 "bytes"
22 "fmt"
23 "io"
24 "net/http"
25 "strconv"
26 "strings"
27 "time"
28)
29
30// getUnsignedChunkLength - calculates the length of chunk metadata
31func getUnsignedChunkLength(chunkDataSize int64) int64 {
32 return int64(len(fmt.Sprintf("%x", chunkDataSize))) +
33 crlfLen +
34 chunkDataSize +
35 crlfLen
36}
37
38// getUSStreamLength - calculates the length of the overall stream (data + metadata)
39func getUSStreamLength(dataLen, chunkSize int64, trailers http.Header) int64 {
40 if dataLen <= 0 {
41 return 0
42 }
43
44 chunksCount := int64(dataLen / chunkSize)
45 remainingBytes := int64(dataLen % chunkSize)
46 streamLen := int64(0)
47 streamLen += chunksCount * getUnsignedChunkLength(chunkSize)
48 if remainingBytes > 0 {
49 streamLen += getUnsignedChunkLength(remainingBytes)
50 }
51 streamLen += getUnsignedChunkLength(0)
52 if len(trailers) > 0 {
53 for name, placeholder := range trailers {
54 if len(placeholder) > 0 {
55 streamLen += int64(len(name) + len(trailerKVSeparator) + len(placeholder[0]) + 1)
56 }
57 }
58 streamLen += crlfLen
59 }
60
61 return streamLen
62}
63
64// prepareStreamingRequest - prepares a request with appropriate
65// headers before computing the seed signature.
66func prepareUSStreamingRequest(req *http.Request, sessionToken string, dataLen int64, timestamp time.Time) {
67 req.TransferEncoding = []string{"aws-chunked"}
68 if sessionToken != "" {
69 req.Header.Set("X-Amz-Security-Token", sessionToken)
70 }
71
72 req.Header.Set("X-Amz-Date", timestamp.Format(iso8601DateFormat))
73 // Set content length with streaming signature for each chunk included.
74 req.ContentLength = getUSStreamLength(dataLen, int64(payloadChunkSize), req.Trailer)
75}
76
77// StreamingUSReader implements chunked upload signature as a reader on
78// top of req.Body's ReaderCloser chunk header;data;... repeat
79type StreamingUSReader struct {
80 contentLen int64 // Content-Length from req header
81 baseReadCloser io.ReadCloser // underlying io.Reader
82 bytesRead int64 // bytes read from underlying io.Reader
83 buf bytes.Buffer // holds signed chunk
84 chunkBuf []byte // holds raw data read from req Body
85 chunkBufLen int // no. of bytes read so far into chunkBuf
86 done bool // done reading the underlying reader to EOF
87 chunkNum int
88 totalChunks int
89 lastChunkSize int
90 trailer http.Header
91}
92
93// writeChunk - signs a chunk read from s.baseReader of chunkLen size.
94func (s *StreamingUSReader) writeChunk(chunkLen int, addCrLf bool) {
95 s.buf.WriteString(strconv.FormatInt(int64(chunkLen), 16) + "\r\n")
96
97 // Write chunk data into streaming buffer
98 s.buf.Write(s.chunkBuf[:chunkLen])
99
100 // Write the chunk trailer.
101 if addCrLf {
102 s.buf.Write([]byte("\r\n"))
103 }
104
105 // Reset chunkBufLen for next chunk read.
106 s.chunkBufLen = 0
107 s.chunkNum++
108}
109
110// addSignedTrailer - adds a trailer with the provided headers,
111// then signs a chunk and adds it to output.
112func (s *StreamingUSReader) addTrailer(h http.Header) {
113 olen := len(s.chunkBuf)
114 s.chunkBuf = s.chunkBuf[:0]
115 for k, v := range h {
116 s.chunkBuf = append(s.chunkBuf, []byte(strings.ToLower(k)+trailerKVSeparator+v[0]+"\n")...)
117 }
118
119 s.buf.Write(s.chunkBuf)
120 s.buf.WriteString("\r\n\r\n")
121
122 // Reset chunkBufLen for next chunk read.
123 s.chunkBuf = s.chunkBuf[:olen]
124 s.chunkBufLen = 0
125 s.chunkNum++
126}
127
128// StreamingUnsignedV4 - provides chunked upload
129func StreamingUnsignedV4(req *http.Request, sessionToken string, dataLen int64, reqTime time.Time) *http.Request {
130 // Set headers needed for streaming signature.
131 prepareUSStreamingRequest(req, sessionToken, dataLen, reqTime)
132
133 if req.Body == nil {
134 req.Body = io.NopCloser(bytes.NewReader([]byte("")))
135 }
136
137 stReader := &StreamingUSReader{
138 baseReadCloser: req.Body,
139 chunkBuf: make([]byte, payloadChunkSize),
140 contentLen: dataLen,
141 chunkNum: 1,
142 totalChunks: int((dataLen+payloadChunkSize-1)/payloadChunkSize) + 1,
143 lastChunkSize: int(dataLen % payloadChunkSize),
144 }
145 if len(req.Trailer) > 0 {
146 stReader.trailer = req.Trailer
147 // Remove...
148 req.Trailer = nil
149 }
150
151 req.Body = stReader
152
153 return req
154}
155
156// Read - this method performs chunk upload signature providing a
157// io.Reader interface.
158func (s *StreamingUSReader) Read(buf []byte) (int, error) {
159 switch {
160 // After the last chunk is read from underlying reader, we
161 // never re-fill s.buf.
162 case s.done:
163
164 // s.buf will be (re-)filled with next chunk when has lesser
165 // bytes than asked for.
166 case s.buf.Len() < len(buf):
167 s.chunkBufLen = 0
168 for {
169 n1, err := s.baseReadCloser.Read(s.chunkBuf[s.chunkBufLen:])
170 // Usually we validate `err` first, but in this case
171 // we are validating n > 0 for the following reasons.
172 //
173 // 1. n > 0, err is one of io.EOF, nil (near end of stream)
174 // A Reader returning a non-zero number of bytes at the end
175 // of the input stream may return either err == EOF or err == nil
176 //
177 // 2. n == 0, err is io.EOF (actual end of stream)
178 //
179 // Callers should always process the n > 0 bytes returned
180 // before considering the error err.
181 if n1 > 0 {
182 s.chunkBufLen += n1
183 s.bytesRead += int64(n1)
184
185 if s.chunkBufLen == payloadChunkSize ||
186 (s.chunkNum == s.totalChunks-1 &&
187 s.chunkBufLen == s.lastChunkSize) {
188 // Sign the chunk and write it to s.buf.
189 s.writeChunk(s.chunkBufLen, true)
190 break
191 }
192 }
193 if err != nil {
194 if err == io.EOF {
195 // No more data left in baseReader - last chunk.
196 // Done reading the last chunk from baseReader.
197 s.done = true
198
199 // bytes read from baseReader different than
200 // content length provided.
201 if s.bytesRead != s.contentLen {
202 return 0, fmt.Errorf("http: ContentLength=%d with Body length %d", s.contentLen, s.bytesRead)
203 }
204
205 // Sign the chunk and write it to s.buf.
206 s.writeChunk(0, len(s.trailer) == 0)
207 if len(s.trailer) > 0 {
208 // Trailer must be set now.
209 s.addTrailer(s.trailer)
210 }
211 break
212 }
213 return 0, err
214 }
215
216 }
217 }
218 return s.buf.Read(buf)
219}
220
221// Close - this method makes underlying io.ReadCloser's Close method available.
222func (s *StreamingUSReader) Close() error {
223 return s.baseReadCloser.Close()
224}
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-streaming.go b/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-streaming.go
new file mode 100644
index 0000000..1c2f1dc
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-streaming.go
@@ -0,0 +1,403 @@
1/*
2 * MinIO Go Library for Amazon S3 Compatible Cloud Storage
3 * Copyright 2017 MinIO, Inc.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18package signer
19
20import (
21 "bytes"
22 "encoding/hex"
23 "fmt"
24 "io"
25 "net/http"
26 "strconv"
27 "strings"
28 "time"
29
30 md5simd "github.com/minio/md5-simd"
31)
32
33// Reference for constants used below -
34// http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-streaming.html#example-signature-calculations-streaming
35const (
36 streamingSignAlgorithm = "STREAMING-AWS4-HMAC-SHA256-PAYLOAD"
37 streamingSignTrailerAlgorithm = "STREAMING-AWS4-HMAC-SHA256-PAYLOAD-TRAILER"
38 streamingPayloadHdr = "AWS4-HMAC-SHA256-PAYLOAD"
39 streamingTrailerHdr = "AWS4-HMAC-SHA256-TRAILER"
40 emptySHA256 = "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
41 payloadChunkSize = 64 * 1024
42 chunkSigConstLen = 17 // ";chunk-signature="
43 signatureStrLen = 64 // e.g. "f2ca1bb6c7e907d06dafe4687e579fce76b37e4e93b7605022da52e6ccc26fd2"
44 crlfLen = 2 // CRLF
45 trailerKVSeparator = ":"
46 trailerSignature = "x-amz-trailer-signature"
47)
48
49// Request headers to be ignored while calculating seed signature for
50// a request.
51var ignoredStreamingHeaders = map[string]bool{
52 "Authorization": true,
53 "User-Agent": true,
54 "Content-Type": true,
55}
56
57// getSignedChunkLength - calculates the length of chunk metadata
58func getSignedChunkLength(chunkDataSize int64) int64 {
59 return int64(len(fmt.Sprintf("%x", chunkDataSize))) +
60 chunkSigConstLen +
61 signatureStrLen +
62 crlfLen +
63 chunkDataSize +
64 crlfLen
65}
66
67// getStreamLength - calculates the length of the overall stream (data + metadata)
68func getStreamLength(dataLen, chunkSize int64, trailers http.Header) int64 {
69 if dataLen <= 0 {
70 return 0
71 }
72
73 chunksCount := int64(dataLen / chunkSize)
74 remainingBytes := int64(dataLen % chunkSize)
75 streamLen := int64(0)
76 streamLen += chunksCount * getSignedChunkLength(chunkSize)
77 if remainingBytes > 0 {
78 streamLen += getSignedChunkLength(remainingBytes)
79 }
80 streamLen += getSignedChunkLength(0)
81 if len(trailers) > 0 {
82 for name, placeholder := range trailers {
83 if len(placeholder) > 0 {
84 streamLen += int64(len(name) + len(trailerKVSeparator) + len(placeholder[0]) + 1)
85 }
86 }
87 streamLen += int64(len(trailerSignature)+len(trailerKVSeparator)) + signatureStrLen + crlfLen + crlfLen
88 }
89
90 return streamLen
91}
92
93// buildChunkStringToSign - returns the string to sign given chunk data
94// and previous signature.
95func buildChunkStringToSign(t time.Time, region, previousSig, chunkChecksum string) string {
96 stringToSignParts := []string{
97 streamingPayloadHdr,
98 t.Format(iso8601DateFormat),
99 getScope(region, t, ServiceTypeS3),
100 previousSig,
101 emptySHA256,
102 chunkChecksum,
103 }
104
105 return strings.Join(stringToSignParts, "\n")
106}
107
108// buildTrailerChunkStringToSign - returns the string to sign given chunk data
109// and previous signature.
110func buildTrailerChunkStringToSign(t time.Time, region, previousSig, chunkChecksum string) string {
111 stringToSignParts := []string{
112 streamingTrailerHdr,
113 t.Format(iso8601DateFormat),
114 getScope(region, t, ServiceTypeS3),
115 previousSig,
116 chunkChecksum,
117 }
118
119 return strings.Join(stringToSignParts, "\n")
120}
121
122// prepareStreamingRequest - prepares a request with appropriate
123// headers before computing the seed signature.
124func prepareStreamingRequest(req *http.Request, sessionToken string, dataLen int64, timestamp time.Time) {
125 // Set x-amz-content-sha256 header.
126 if len(req.Trailer) == 0 {
127 req.Header.Set("X-Amz-Content-Sha256", streamingSignAlgorithm)
128 } else {
129 req.Header.Set("X-Amz-Content-Sha256", streamingSignTrailerAlgorithm)
130 for k := range req.Trailer {
131 req.Header.Add("X-Amz-Trailer", strings.ToLower(k))
132 }
133 req.TransferEncoding = []string{"aws-chunked"}
134 }
135
136 if sessionToken != "" {
137 req.Header.Set("X-Amz-Security-Token", sessionToken)
138 }
139
140 req.Header.Set("X-Amz-Date", timestamp.Format(iso8601DateFormat))
141 // Set content length with streaming signature for each chunk included.
142 req.ContentLength = getStreamLength(dataLen, int64(payloadChunkSize), req.Trailer)
143 req.Header.Set("x-amz-decoded-content-length", strconv.FormatInt(dataLen, 10))
144}
145
146// buildChunkHeader - returns the chunk header.
147// e.g string(IntHexBase(chunk-size)) + ";chunk-signature=" + signature + \r\n + chunk-data + \r\n
148func buildChunkHeader(chunkLen int64, signature string) []byte {
149 return []byte(strconv.FormatInt(chunkLen, 16) + ";chunk-signature=" + signature + "\r\n")
150}
151
152// buildChunkSignature - returns chunk signature for a given chunk and previous signature.
153func buildChunkSignature(chunkCheckSum string, reqTime time.Time, region,
154 previousSignature, secretAccessKey string,
155) string {
156 chunkStringToSign := buildChunkStringToSign(reqTime, region,
157 previousSignature, chunkCheckSum)
158 signingKey := getSigningKey(secretAccessKey, region, reqTime, ServiceTypeS3)
159 return getSignature(signingKey, chunkStringToSign)
160}
161
162// buildChunkSignature - returns chunk signature for a given chunk and previous signature.
163func buildTrailerChunkSignature(chunkChecksum string, reqTime time.Time, region,
164 previousSignature, secretAccessKey string,
165) string {
166 chunkStringToSign := buildTrailerChunkStringToSign(reqTime, region,
167 previousSignature, chunkChecksum)
168 signingKey := getSigningKey(secretAccessKey, region, reqTime, ServiceTypeS3)
169 return getSignature(signingKey, chunkStringToSign)
170}
171
172// getSeedSignature - returns the seed signature for a given request.
173func (s *StreamingReader) setSeedSignature(req *http.Request) {
174 // Get canonical request
175 canonicalRequest := getCanonicalRequest(*req, ignoredStreamingHeaders, getHashedPayload(*req))
176
177 // Get string to sign from canonical request.
178 stringToSign := getStringToSignV4(s.reqTime, s.region, canonicalRequest, ServiceTypeS3)
179
180 signingKey := getSigningKey(s.secretAccessKey, s.region, s.reqTime, ServiceTypeS3)
181
182 // Calculate signature.
183 s.seedSignature = getSignature(signingKey, stringToSign)
184}
185
186// StreamingReader implements chunked upload signature as a reader on
187// top of req.Body's ReaderCloser chunk header;data;... repeat
188type StreamingReader struct {
189 accessKeyID string
190 secretAccessKey string
191 sessionToken string
192 region string
193 prevSignature string
194 seedSignature string
195 contentLen int64 // Content-Length from req header
196 baseReadCloser io.ReadCloser // underlying io.Reader
197 bytesRead int64 // bytes read from underlying io.Reader
198 buf bytes.Buffer // holds signed chunk
199 chunkBuf []byte // holds raw data read from req Body
200 chunkBufLen int // no. of bytes read so far into chunkBuf
201 done bool // done reading the underlying reader to EOF
202 reqTime time.Time
203 chunkNum int
204 totalChunks int
205 lastChunkSize int
206 trailer http.Header
207 sh256 md5simd.Hasher
208}
209
210// signChunk - signs a chunk read from s.baseReader of chunkLen size.
211func (s *StreamingReader) signChunk(chunkLen int, addCrLf bool) {
212 // Compute chunk signature for next header
213 s.sh256.Reset()
214 s.sh256.Write(s.chunkBuf[:chunkLen])
215 chunckChecksum := hex.EncodeToString(s.sh256.Sum(nil))
216
217 signature := buildChunkSignature(chunckChecksum, s.reqTime,
218 s.region, s.prevSignature, s.secretAccessKey)
219
220 // For next chunk signature computation
221 s.prevSignature = signature
222
223 // Write chunk header into streaming buffer
224 chunkHdr := buildChunkHeader(int64(chunkLen), signature)
225 s.buf.Write(chunkHdr)
226
227 // Write chunk data into streaming buffer
228 s.buf.Write(s.chunkBuf[:chunkLen])
229
230 // Write the chunk trailer.
231 if addCrLf {
232 s.buf.Write([]byte("\r\n"))
233 }
234
235 // Reset chunkBufLen for next chunk read.
236 s.chunkBufLen = 0
237 s.chunkNum++
238}
239
240// addSignedTrailer - adds a trailer with the provided headers,
241// then signs a chunk and adds it to output.
242func (s *StreamingReader) addSignedTrailer(h http.Header) {
243 olen := len(s.chunkBuf)
244 s.chunkBuf = s.chunkBuf[:0]
245 for k, v := range h {
246 s.chunkBuf = append(s.chunkBuf, []byte(strings.ToLower(k)+trailerKVSeparator+v[0]+"\n")...)
247 }
248
249 s.sh256.Reset()
250 s.sh256.Write(s.chunkBuf)
251 chunkChecksum := hex.EncodeToString(s.sh256.Sum(nil))
252 // Compute chunk signature
253 signature := buildTrailerChunkSignature(chunkChecksum, s.reqTime,
254 s.region, s.prevSignature, s.secretAccessKey)
255
256 // For next chunk signature computation
257 s.prevSignature = signature
258
259 s.buf.Write(s.chunkBuf)
260 s.buf.WriteString("\r\n" + trailerSignature + trailerKVSeparator + signature + "\r\n\r\n")
261
262 // Reset chunkBufLen for next chunk read.
263 s.chunkBuf = s.chunkBuf[:olen]
264 s.chunkBufLen = 0
265 s.chunkNum++
266}
267
268// setStreamingAuthHeader - builds and sets authorization header value
269// for streaming signature.
270func (s *StreamingReader) setStreamingAuthHeader(req *http.Request) {
271 credential := GetCredential(s.accessKeyID, s.region, s.reqTime, ServiceTypeS3)
272 authParts := []string{
273 signV4Algorithm + " Credential=" + credential,
274 "SignedHeaders=" + getSignedHeaders(*req, ignoredStreamingHeaders),
275 "Signature=" + s.seedSignature,
276 }
277
278 // Set authorization header.
279 auth := strings.Join(authParts, ",")
280 req.Header.Set("Authorization", auth)
281}
282
283// StreamingSignV4 - provides chunked upload signatureV4 support by
284// implementing io.Reader.
285func StreamingSignV4(req *http.Request, accessKeyID, secretAccessKey, sessionToken,
286 region string, dataLen int64, reqTime time.Time, sh256 md5simd.Hasher,
287) *http.Request {
288 // Set headers needed for streaming signature.
289 prepareStreamingRequest(req, sessionToken, dataLen, reqTime)
290
291 if req.Body == nil {
292 req.Body = io.NopCloser(bytes.NewReader([]byte("")))
293 }
294
295 stReader := &StreamingReader{
296 baseReadCloser: req.Body,
297 accessKeyID: accessKeyID,
298 secretAccessKey: secretAccessKey,
299 sessionToken: sessionToken,
300 region: region,
301 reqTime: reqTime,
302 chunkBuf: make([]byte, payloadChunkSize),
303 contentLen: dataLen,
304 chunkNum: 1,
305 totalChunks: int((dataLen+payloadChunkSize-1)/payloadChunkSize) + 1,
306 lastChunkSize: int(dataLen % payloadChunkSize),
307 sh256: sh256,
308 }
309 if len(req.Trailer) > 0 {
310 stReader.trailer = req.Trailer
311 // Remove...
312 req.Trailer = nil
313 }
314
315 // Add the request headers required for chunk upload signing.
316
317 // Compute the seed signature.
318 stReader.setSeedSignature(req)
319
320 // Set the authorization header with the seed signature.
321 stReader.setStreamingAuthHeader(req)
322
323 // Set seed signature as prevSignature for subsequent
324 // streaming signing process.
325 stReader.prevSignature = stReader.seedSignature
326 req.Body = stReader
327
328 return req
329}
330
331// Read - this method performs chunk upload signature providing a
332// io.Reader interface.
333func (s *StreamingReader) Read(buf []byte) (int, error) {
334 switch {
335 // After the last chunk is read from underlying reader, we
336 // never re-fill s.buf.
337 case s.done:
338
339 // s.buf will be (re-)filled with next chunk when has lesser
340 // bytes than asked for.
341 case s.buf.Len() < len(buf):
342 s.chunkBufLen = 0
343 for {
344 n1, err := s.baseReadCloser.Read(s.chunkBuf[s.chunkBufLen:])
345 // Usually we validate `err` first, but in this case
346 // we are validating n > 0 for the following reasons.
347 //
348 // 1. n > 0, err is one of io.EOF, nil (near end of stream)
349 // A Reader returning a non-zero number of bytes at the end
350 // of the input stream may return either err == EOF or err == nil
351 //
352 // 2. n == 0, err is io.EOF (actual end of stream)
353 //
354 // Callers should always process the n > 0 bytes returned
355 // before considering the error err.
356 if n1 > 0 {
357 s.chunkBufLen += n1
358 s.bytesRead += int64(n1)
359
360 if s.chunkBufLen == payloadChunkSize ||
361 (s.chunkNum == s.totalChunks-1 &&
362 s.chunkBufLen == s.lastChunkSize) {
363 // Sign the chunk and write it to s.buf.
364 s.signChunk(s.chunkBufLen, true)
365 break
366 }
367 }
368 if err != nil {
369 if err == io.EOF {
370 // No more data left in baseReader - last chunk.
371 // Done reading the last chunk from baseReader.
372 s.done = true
373
374 // bytes read from baseReader different than
375 // content length provided.
376 if s.bytesRead != s.contentLen {
377 return 0, fmt.Errorf("http: ContentLength=%d with Body length %d", s.contentLen, s.bytesRead)
378 }
379
380 // Sign the chunk and write it to s.buf.
381 s.signChunk(0, len(s.trailer) == 0)
382 if len(s.trailer) > 0 {
383 // Trailer must be set now.
384 s.addSignedTrailer(s.trailer)
385 }
386 break
387 }
388 return 0, err
389 }
390
391 }
392 }
393 return s.buf.Read(buf)
394}
395
396// Close - this method makes underlying io.ReadCloser's Close method available.
397func (s *StreamingReader) Close() error {
398 if s.sh256 != nil {
399 s.sh256.Close()
400 s.sh256 = nil
401 }
402 return s.baseReadCloser.Close()
403}
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-v2.go b/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-v2.go
new file mode 100644
index 0000000..fa4f8c9
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-v2.go
@@ -0,0 +1,319 @@
1/*
2 * MinIO Go Library for Amazon S3 Compatible Cloud Storage
3 * Copyright 2015-2017 MinIO, Inc.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18package signer
19
20import (
21 "bytes"
22 "crypto/hmac"
23 "crypto/sha1"
24 "encoding/base64"
25 "fmt"
26 "net/http"
27 "net/url"
28 "sort"
29 "strconv"
30 "strings"
31 "time"
32
33 "github.com/minio/minio-go/v7/pkg/s3utils"
34)
35
36// Signature and API related constants.
37const (
38 signV2Algorithm = "AWS"
39)
40
41// Encode input URL path to URL encoded path.
42func encodeURL2Path(req *http.Request, virtualHost bool) (path string) {
43 if virtualHost {
44 reqHost := getHostAddr(req)
45 dotPos := strings.Index(reqHost, ".")
46 if dotPos > -1 {
47 bucketName := reqHost[:dotPos]
48 path = "/" + bucketName
49 path += req.URL.Path
50 path = s3utils.EncodePath(path)
51 return
52 }
53 }
54 path = s3utils.EncodePath(req.URL.Path)
55 return
56}
57
58// PreSignV2 - presign the request in following style.
59// https://${S3_BUCKET}.s3.amazonaws.com/${S3_OBJECT}?AWSAccessKeyId=${S3_ACCESS_KEY}&Expires=${TIMESTAMP}&Signature=${SIGNATURE}.
60func PreSignV2(req http.Request, accessKeyID, secretAccessKey string, expires int64, virtualHost bool) *http.Request {
61 // Presign is not needed for anonymous credentials.
62 if accessKeyID == "" || secretAccessKey == "" {
63 return &req
64 }
65
66 d := time.Now().UTC()
67 // Find epoch expires when the request will expire.
68 epochExpires := d.Unix() + expires
69
70 // Add expires header if not present.
71 if expiresStr := req.Header.Get("Expires"); expiresStr == "" {
72 req.Header.Set("Expires", strconv.FormatInt(epochExpires, 10))
73 }
74
75 // Get presigned string to sign.
76 stringToSign := preStringToSignV2(req, virtualHost)
77 hm := hmac.New(sha1.New, []byte(secretAccessKey))
78 hm.Write([]byte(stringToSign))
79
80 // Calculate signature.
81 signature := base64.StdEncoding.EncodeToString(hm.Sum(nil))
82
83 query := req.URL.Query()
84 // Handle specially for Google Cloud Storage.
85 if strings.Contains(getHostAddr(&req), ".storage.googleapis.com") {
86 query.Set("GoogleAccessId", accessKeyID)
87 } else {
88 query.Set("AWSAccessKeyId", accessKeyID)
89 }
90
91 // Fill in Expires for presigned query.
92 query.Set("Expires", strconv.FormatInt(epochExpires, 10))
93
94 // Encode query and save.
95 req.URL.RawQuery = s3utils.QueryEncode(query)
96
97 // Save signature finally.
98 req.URL.RawQuery += "&Signature=" + s3utils.EncodePath(signature)
99
100 // Return.
101 return &req
102}
103
104// PostPresignSignatureV2 - presigned signature for PostPolicy
105// request.
106func PostPresignSignatureV2(policyBase64, secretAccessKey string) string {
107 hm := hmac.New(sha1.New, []byte(secretAccessKey))
108 hm.Write([]byte(policyBase64))
109 signature := base64.StdEncoding.EncodeToString(hm.Sum(nil))
110 return signature
111}
112
113// Authorization = "AWS" + " " + AWSAccessKeyId + ":" + Signature;
114// Signature = Base64( HMAC-SHA1( YourSecretAccessKeyID, UTF-8-Encoding-Of( StringToSign ) ) );
115//
116// StringToSign = HTTP-Verb + "\n" +
117// Content-Md5 + "\n" +
118// Content-Type + "\n" +
119// Date + "\n" +
120// CanonicalizedProtocolHeaders +
121// CanonicalizedResource;
122//
123// CanonicalizedResource = [ "/" + Bucket ] +
124// <HTTP-Request-URI, from the protocol name up to the query string> +
125// [ subresource, if present. For example "?acl", "?location", "?logging", or "?torrent"];
126//
127// CanonicalizedProtocolHeaders = <described below>
128
129// SignV2 sign the request before Do() (AWS Signature Version 2).
130func SignV2(req http.Request, accessKeyID, secretAccessKey string, virtualHost bool) *http.Request {
131 // Signature calculation is not needed for anonymous credentials.
132 if accessKeyID == "" || secretAccessKey == "" {
133 return &req
134 }
135
136 // Initial time.
137 d := time.Now().UTC()
138
139 // Add date if not present.
140 if date := req.Header.Get("Date"); date == "" {
141 req.Header.Set("Date", d.Format(http.TimeFormat))
142 }
143
144 // Calculate HMAC for secretAccessKey.
145 stringToSign := stringToSignV2(req, virtualHost)
146 hm := hmac.New(sha1.New, []byte(secretAccessKey))
147 hm.Write([]byte(stringToSign))
148
149 // Prepare auth header.
150 authHeader := new(bytes.Buffer)
151 authHeader.WriteString(fmt.Sprintf("%s %s:", signV2Algorithm, accessKeyID))
152 encoder := base64.NewEncoder(base64.StdEncoding, authHeader)
153 encoder.Write(hm.Sum(nil))
154 encoder.Close()
155
156 // Set Authorization header.
157 req.Header.Set("Authorization", authHeader.String())
158
159 return &req
160}
161
162// From the Amazon docs:
163//
164// StringToSign = HTTP-Verb + "\n" +
165//
166// Content-Md5 + "\n" +
167// Content-Type + "\n" +
168// Expires + "\n" +
169// CanonicalizedProtocolHeaders +
170// CanonicalizedResource;
171func preStringToSignV2(req http.Request, virtualHost bool) string {
172 buf := new(bytes.Buffer)
173 // Write standard headers.
174 writePreSignV2Headers(buf, req)
175 // Write canonicalized protocol headers if any.
176 writeCanonicalizedHeaders(buf, req)
177 // Write canonicalized Query resources if any.
178 writeCanonicalizedResource(buf, req, virtualHost)
179 return buf.String()
180}
181
182// writePreSignV2Headers - write preSign v2 required headers.
183func writePreSignV2Headers(buf *bytes.Buffer, req http.Request) {
184 buf.WriteString(req.Method + "\n")
185 buf.WriteString(req.Header.Get("Content-Md5") + "\n")
186 buf.WriteString(req.Header.Get("Content-Type") + "\n")
187 buf.WriteString(req.Header.Get("Expires") + "\n")
188}
189
190// From the Amazon docs:
191//
192// StringToSign = HTTP-Verb + "\n" +
193//
194// Content-Md5 + "\n" +
195// Content-Type + "\n" +
196// Date + "\n" +
197// CanonicalizedProtocolHeaders +
198// CanonicalizedResource;
199func stringToSignV2(req http.Request, virtualHost bool) string {
200 buf := new(bytes.Buffer)
201 // Write standard headers.
202 writeSignV2Headers(buf, req)
203 // Write canonicalized protocol headers if any.
204 writeCanonicalizedHeaders(buf, req)
205 // Write canonicalized Query resources if any.
206 writeCanonicalizedResource(buf, req, virtualHost)
207 return buf.String()
208}
209
210// writeSignV2Headers - write signV2 required headers.
211func writeSignV2Headers(buf *bytes.Buffer, req http.Request) {
212 buf.WriteString(req.Method + "\n")
213 buf.WriteString(req.Header.Get("Content-Md5") + "\n")
214 buf.WriteString(req.Header.Get("Content-Type") + "\n")
215 buf.WriteString(req.Header.Get("Date") + "\n")
216}
217
218// writeCanonicalizedHeaders - write canonicalized headers.
219func writeCanonicalizedHeaders(buf *bytes.Buffer, req http.Request) {
220 var protoHeaders []string
221 vals := make(map[string][]string)
222 for k, vv := range req.Header {
223 // All the AMZ headers should be lowercase
224 lk := strings.ToLower(k)
225 if strings.HasPrefix(lk, "x-amz") {
226 protoHeaders = append(protoHeaders, lk)
227 vals[lk] = vv
228 }
229 }
230 sort.Strings(protoHeaders)
231 for _, k := range protoHeaders {
232 buf.WriteString(k)
233 buf.WriteByte(':')
234 for idx, v := range vals[k] {
235 if idx > 0 {
236 buf.WriteByte(',')
237 }
238 buf.WriteString(v)
239 }
240 buf.WriteByte('\n')
241 }
242}
243
244// AWS S3 Signature V2 calculation rule is give here:
245// http://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html#RESTAuthenticationStringToSign
246
247// Whitelist resource list that will be used in query string for signature-V2 calculation.
248//
249// This list should be kept alphabetically sorted, do not hastily edit.
250var resourceList = []string{
251 "acl",
252 "cors",
253 "delete",
254 "encryption",
255 "legal-hold",
256 "lifecycle",
257 "location",
258 "logging",
259 "notification",
260 "partNumber",
261 "policy",
262 "replication",
263 "requestPayment",
264 "response-cache-control",
265 "response-content-disposition",
266 "response-content-encoding",
267 "response-content-language",
268 "response-content-type",
269 "response-expires",
270 "retention",
271 "select",
272 "select-type",
273 "tagging",
274 "torrent",
275 "uploadId",
276 "uploads",
277 "versionId",
278 "versioning",
279 "versions",
280 "website",
281}
282
283// From the Amazon docs:
284//
285// CanonicalizedResource = [ "/" + Bucket ] +
286//
287// <HTTP-Request-URI, from the protocol name up to the query string> +
288// [ sub-resource, if present. For example "?acl", "?location", "?logging", or "?torrent"];
289func writeCanonicalizedResource(buf *bytes.Buffer, req http.Request, virtualHost bool) {
290 // Save request URL.
291 requestURL := req.URL
292 // Get encoded URL path.
293 buf.WriteString(encodeURL2Path(&req, virtualHost))
294 if requestURL.RawQuery != "" {
295 var n int
296 vals, _ := url.ParseQuery(requestURL.RawQuery)
297 // Verify if any sub resource queries are present, if yes
298 // canonicallize them.
299 for _, resource := range resourceList {
300 if vv, ok := vals[resource]; ok && len(vv) > 0 {
301 n++
302 // First element
303 switch n {
304 case 1:
305 buf.WriteByte('?')
306 // The rest
307 default:
308 buf.WriteByte('&')
309 }
310 buf.WriteString(resource)
311 // Request parameters
312 if len(vv[0]) > 0 {
313 buf.WriteByte('=')
314 buf.WriteString(vv[0])
315 }
316 }
317 }
318 }
319}
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-v4.go b/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-v4.go
new file mode 100644
index 0000000..ffd2514
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-v4.go
@@ -0,0 +1,351 @@
1/*
2 * MinIO Go Library for Amazon S3 Compatible Cloud Storage
3 * Copyright 2015-2017 MinIO, Inc.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18package signer
19
20import (
21 "bytes"
22 "encoding/hex"
23 "net/http"
24 "sort"
25 "strconv"
26 "strings"
27 "time"
28
29 "github.com/minio/minio-go/v7/pkg/s3utils"
30)
31
32// Signature and API related constants.
33const (
34 signV4Algorithm = "AWS4-HMAC-SHA256"
35 iso8601DateFormat = "20060102T150405Z"
36 yyyymmdd = "20060102"
37)
38
39// Different service types
40const (
41 ServiceTypeS3 = "s3"
42 ServiceTypeSTS = "sts"
43)
44
45// Excerpts from @lsegal -
46// https:/github.com/aws/aws-sdk-js/issues/659#issuecomment-120477258.
47//
48// * User-Agent
49// This is ignored from signing because signing this causes problems with generating pre-signed
50// URLs (that are executed by other agents) or when customers pass requests through proxies, which
51// may modify the user-agent.
52//
53// * Authorization
54// Is skipped for obvious reasons.
55//
56// * Accept-Encoding
57// Some S3 servers like Hitachi Content Platform do not honor this header for signature
58// calculation.
59var v4IgnoredHeaders = map[string]bool{
60 "Accept-Encoding": true,
61 "Authorization": true,
62 "User-Agent": true,
63}
64
65// getSigningKey hmac seed to calculate final signature.
66func getSigningKey(secret, loc string, t time.Time, serviceType string) []byte {
67 date := sumHMAC([]byte("AWS4"+secret), []byte(t.Format(yyyymmdd)))
68 location := sumHMAC(date, []byte(loc))
69 service := sumHMAC(location, []byte(serviceType))
70 signingKey := sumHMAC(service, []byte("aws4_request"))
71 return signingKey
72}
73
74// getSignature final signature in hexadecimal form.
75func getSignature(signingKey []byte, stringToSign string) string {
76 return hex.EncodeToString(sumHMAC(signingKey, []byte(stringToSign)))
77}
78
79// getScope generate a string of a specific date, an AWS region, and a
80// service.
81func getScope(location string, t time.Time, serviceType string) string {
82 scope := strings.Join([]string{
83 t.Format(yyyymmdd),
84 location,
85 serviceType,
86 "aws4_request",
87 }, "/")
88 return scope
89}
90
91// GetCredential generate a credential string.
92func GetCredential(accessKeyID, location string, t time.Time, serviceType string) string {
93 scope := getScope(location, t, serviceType)
94 return accessKeyID + "/" + scope
95}
96
97// getHashedPayload get the hexadecimal value of the SHA256 hash of
98// the request payload.
99func getHashedPayload(req http.Request) string {
100 hashedPayload := req.Header.Get("X-Amz-Content-Sha256")
101 if hashedPayload == "" {
102 // Presign does not have a payload, use S3 recommended value.
103 hashedPayload = unsignedPayload
104 }
105 return hashedPayload
106}
107
108// getCanonicalHeaders generate a list of request headers for
109// signature.
110func getCanonicalHeaders(req http.Request, ignoredHeaders map[string]bool) string {
111 var headers []string
112 vals := make(map[string][]string)
113 for k, vv := range req.Header {
114 if _, ok := ignoredHeaders[http.CanonicalHeaderKey(k)]; ok {
115 continue // ignored header
116 }
117 headers = append(headers, strings.ToLower(k))
118 vals[strings.ToLower(k)] = vv
119 }
120 if !headerExists("host", headers) {
121 headers = append(headers, "host")
122 }
123 sort.Strings(headers)
124
125 var buf bytes.Buffer
126 // Save all the headers in canonical form <header>:<value> newline
127 // separated for each header.
128 for _, k := range headers {
129 buf.WriteString(k)
130 buf.WriteByte(':')
131 switch {
132 case k == "host":
133 buf.WriteString(getHostAddr(&req))
134 buf.WriteByte('\n')
135 default:
136 for idx, v := range vals[k] {
137 if idx > 0 {
138 buf.WriteByte(',')
139 }
140 buf.WriteString(signV4TrimAll(v))
141 }
142 buf.WriteByte('\n')
143 }
144 }
145 return buf.String()
146}
147
148func headerExists(key string, headers []string) bool {
149 for _, k := range headers {
150 if k == key {
151 return true
152 }
153 }
154 return false
155}
156
157// getSignedHeaders generate all signed request headers.
158// i.e lexically sorted, semicolon-separated list of lowercase
159// request header names.
160func getSignedHeaders(req http.Request, ignoredHeaders map[string]bool) string {
161 var headers []string
162 for k := range req.Header {
163 if _, ok := ignoredHeaders[http.CanonicalHeaderKey(k)]; ok {
164 continue // Ignored header found continue.
165 }
166 headers = append(headers, strings.ToLower(k))
167 }
168 if !headerExists("host", headers) {
169 headers = append(headers, "host")
170 }
171 sort.Strings(headers)
172 return strings.Join(headers, ";")
173}
174
175// getCanonicalRequest generate a canonical request of style.
176//
177// canonicalRequest =
178//
179// <HTTPMethod>\n
180// <CanonicalURI>\n
181// <CanonicalQueryString>\n
182// <CanonicalHeaders>\n
183// <SignedHeaders>\n
184// <HashedPayload>
185func getCanonicalRequest(req http.Request, ignoredHeaders map[string]bool, hashedPayload string) string {
186 req.URL.RawQuery = strings.ReplaceAll(req.URL.Query().Encode(), "+", "%20")
187 canonicalRequest := strings.Join([]string{
188 req.Method,
189 s3utils.EncodePath(req.URL.Path),
190 req.URL.RawQuery,
191 getCanonicalHeaders(req, ignoredHeaders),
192 getSignedHeaders(req, ignoredHeaders),
193 hashedPayload,
194 }, "\n")
195 return canonicalRequest
196}
197
198// getStringToSign a string based on selected query values.
199func getStringToSignV4(t time.Time, location, canonicalRequest, serviceType string) string {
200 stringToSign := signV4Algorithm + "\n" + t.Format(iso8601DateFormat) + "\n"
201 stringToSign = stringToSign + getScope(location, t, serviceType) + "\n"
202 stringToSign += hex.EncodeToString(sum256([]byte(canonicalRequest)))
203 return stringToSign
204}
205
206// PreSignV4 presign the request, in accordance with
207// http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-query-string-auth.html.
208func PreSignV4(req http.Request, accessKeyID, secretAccessKey, sessionToken, location string, expires int64) *http.Request {
209 // Presign is not needed for anonymous credentials.
210 if accessKeyID == "" || secretAccessKey == "" {
211 return &req
212 }
213
214 // Initial time.
215 t := time.Now().UTC()
216
217 // Get credential string.
218 credential := GetCredential(accessKeyID, location, t, ServiceTypeS3)
219
220 // Get all signed headers.
221 signedHeaders := getSignedHeaders(req, v4IgnoredHeaders)
222
223 // Set URL query.
224 query := req.URL.Query()
225 query.Set("X-Amz-Algorithm", signV4Algorithm)
226 query.Set("X-Amz-Date", t.Format(iso8601DateFormat))
227 query.Set("X-Amz-Expires", strconv.FormatInt(expires, 10))
228 query.Set("X-Amz-SignedHeaders", signedHeaders)
229 query.Set("X-Amz-Credential", credential)
230 // Set session token if available.
231 if sessionToken != "" {
232 query.Set("X-Amz-Security-Token", sessionToken)
233 }
234 req.URL.RawQuery = query.Encode()
235
236 // Get canonical request.
237 canonicalRequest := getCanonicalRequest(req, v4IgnoredHeaders, getHashedPayload(req))
238
239 // Get string to sign from canonical request.
240 stringToSign := getStringToSignV4(t, location, canonicalRequest, ServiceTypeS3)
241
242 // Gext hmac signing key.
243 signingKey := getSigningKey(secretAccessKey, location, t, ServiceTypeS3)
244
245 // Calculate signature.
246 signature := getSignature(signingKey, stringToSign)
247
248 // Add signature header to RawQuery.
249 req.URL.RawQuery += "&X-Amz-Signature=" + signature
250
251 return &req
252}
253
254// PostPresignSignatureV4 - presigned signature for PostPolicy
255// requests.
256func PostPresignSignatureV4(policyBase64 string, t time.Time, secretAccessKey, location string) string {
257 // Get signining key.
258 signingkey := getSigningKey(secretAccessKey, location, t, ServiceTypeS3)
259 // Calculate signature.
260 signature := getSignature(signingkey, policyBase64)
261 return signature
262}
263
264// SignV4STS - signature v4 for STS request.
265func SignV4STS(req http.Request, accessKeyID, secretAccessKey, location string) *http.Request {
266 return signV4(req, accessKeyID, secretAccessKey, "", location, ServiceTypeSTS, nil)
267}
268
269// Internal function called for different service types.
270func signV4(req http.Request, accessKeyID, secretAccessKey, sessionToken, location, serviceType string, trailer http.Header) *http.Request {
271 // Signature calculation is not needed for anonymous credentials.
272 if accessKeyID == "" || secretAccessKey == "" {
273 return &req
274 }
275
276 // Initial time.
277 t := time.Now().UTC()
278
279 // Set x-amz-date.
280 req.Header.Set("X-Amz-Date", t.Format(iso8601DateFormat))
281
282 // Set session token if available.
283 if sessionToken != "" {
284 req.Header.Set("X-Amz-Security-Token", sessionToken)
285 }
286
287 if len(trailer) > 0 {
288 for k := range trailer {
289 req.Header.Add("X-Amz-Trailer", strings.ToLower(k))
290 }
291
292 req.Header.Set("Content-Encoding", "aws-chunked")
293 req.Header.Set("x-amz-decoded-content-length", strconv.FormatInt(req.ContentLength, 10))
294 }
295
296 hashedPayload := getHashedPayload(req)
297 if serviceType == ServiceTypeSTS {
298 // Content sha256 header is not sent with the request
299 // but it is expected to have sha256 of payload for signature
300 // in STS service type request.
301 req.Header.Del("X-Amz-Content-Sha256")
302 }
303
304 // Get canonical request.
305 canonicalRequest := getCanonicalRequest(req, v4IgnoredHeaders, hashedPayload)
306
307 // Get string to sign from canonical request.
308 stringToSign := getStringToSignV4(t, location, canonicalRequest, serviceType)
309
310 // Get hmac signing key.
311 signingKey := getSigningKey(secretAccessKey, location, t, serviceType)
312
313 // Get credential string.
314 credential := GetCredential(accessKeyID, location, t, serviceType)
315
316 // Get all signed headers.
317 signedHeaders := getSignedHeaders(req, v4IgnoredHeaders)
318
319 // Calculate signature.
320 signature := getSignature(signingKey, stringToSign)
321
322 // If regular request, construct the final authorization header.
323 parts := []string{
324 signV4Algorithm + " Credential=" + credential,
325 "SignedHeaders=" + signedHeaders,
326 "Signature=" + signature,
327 }
328
329 // Set authorization header.
330 auth := strings.Join(parts, ", ")
331 req.Header.Set("Authorization", auth)
332
333 if len(trailer) > 0 {
334 // Use custom chunked encoding.
335 req.Trailer = trailer
336 return StreamingUnsignedV4(&req, sessionToken, req.ContentLength, time.Now().UTC())
337 }
338 return &req
339}
340
341// SignV4 sign the request before Do(), in accordance with
342// http://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html.
343func SignV4(req http.Request, accessKeyID, secretAccessKey, sessionToken, location string) *http.Request {
344 return signV4(req, accessKeyID, secretAccessKey, sessionToken, location, ServiceTypeS3, nil)
345}
346
347// SignV4Trailer sign the request before Do(), in accordance with
348// http://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html
349func SignV4Trailer(req http.Request, accessKeyID, secretAccessKey, sessionToken, location string, trailer http.Header) *http.Request {
350 return signV4(req, accessKeyID, secretAccessKey, sessionToken, location, ServiceTypeS3, trailer)
351}
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/signer/utils.go b/vendor/github.com/minio/minio-go/v7/pkg/signer/utils.go
new file mode 100644
index 0000000..87c9939
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/pkg/signer/utils.go
@@ -0,0 +1,62 @@
1/*
2 * MinIO Go Library for Amazon S3 Compatible Cloud Storage
3 * Copyright 2015-2017 MinIO, Inc.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18package signer
19
20import (
21 "crypto/hmac"
22 "crypto/sha256"
23 "net/http"
24 "strings"
25)
26
27// unsignedPayload - value to be set to X-Amz-Content-Sha256 header when
28const unsignedPayload = "UNSIGNED-PAYLOAD"
29
30// sum256 calculate sha256 sum for an input byte array.
31func sum256(data []byte) []byte {
32 hash := sha256.New()
33 hash.Write(data)
34 return hash.Sum(nil)
35}
36
37// sumHMAC calculate hmac between two input byte array.
38func sumHMAC(key, data []byte) []byte {
39 hash := hmac.New(sha256.New, key)
40 hash.Write(data)
41 return hash.Sum(nil)
42}
43
44// getHostAddr returns host header if available, otherwise returns host from URL
45func getHostAddr(req *http.Request) string {
46 host := req.Header.Get("host")
47 if host != "" && req.Host != host {
48 return host
49 }
50 if req.Host != "" {
51 return req.Host
52 }
53 return req.URL.Host
54}
55
56// Trim leading and trailing spaces and replace sequential spaces with one space, following Trimall()
57// in http://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html
58func signV4TrimAll(input string) string {
59 // Compress adjacent spaces (a space is determined by
60 // unicode.IsSpace() internally here) to one space and return
61 return strings.Join(strings.Fields(input), " ")
62}
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/sse/sse.go b/vendor/github.com/minio/minio-go/v7/pkg/sse/sse.go
new file mode 100644
index 0000000..b5fb956
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/pkg/sse/sse.go
@@ -0,0 +1,66 @@
1/*
2 * MinIO Go Library for Amazon S3 Compatible Cloud Storage
3 * Copyright 2020 MinIO, Inc.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18package sse
19
20import "encoding/xml"
21
22// ApplySSEByDefault defines default encryption configuration, KMS or SSE. To activate
23// KMS, SSEAlgoritm needs to be set to "aws:kms"
24// Minio currently does not support Kms.
25type ApplySSEByDefault struct {
26 KmsMasterKeyID string `xml:"KMSMasterKeyID,omitempty"`
27 SSEAlgorithm string `xml:"SSEAlgorithm"`
28}
29
30// Rule layer encapsulates default encryption configuration
31type Rule struct {
32 Apply ApplySSEByDefault `xml:"ApplyServerSideEncryptionByDefault"`
33}
34
35// Configuration is the default encryption configuration structure
36type Configuration struct {
37 XMLName xml.Name `xml:"ServerSideEncryptionConfiguration"`
38 Rules []Rule `xml:"Rule"`
39}
40
41// NewConfigurationSSES3 initializes a new SSE-S3 configuration
42func NewConfigurationSSES3() *Configuration {
43 return &Configuration{
44 Rules: []Rule{
45 {
46 Apply: ApplySSEByDefault{
47 SSEAlgorithm: "AES256",
48 },
49 },
50 },
51 }
52}
53
54// NewConfigurationSSEKMS initializes a new SSE-KMS configuration
55func NewConfigurationSSEKMS(kmsMasterKey string) *Configuration {
56 return &Configuration{
57 Rules: []Rule{
58 {
59 Apply: ApplySSEByDefault{
60 KmsMasterKeyID: kmsMasterKey,
61 SSEAlgorithm: "aws:kms",
62 },
63 },
64 },
65 }
66}
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/tags/tags.go b/vendor/github.com/minio/minio-go/v7/pkg/tags/tags.go
new file mode 100644
index 0000000..7a84a6f
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/pkg/tags/tags.go
@@ -0,0 +1,413 @@
1/*
2 * MinIO Go Library for Amazon S3 Compatible Cloud Storage
3 * Copyright 2020-2022 MinIO, Inc.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18package tags
19
20import (
21 "encoding/xml"
22 "io"
23 "net/url"
24 "regexp"
25 "sort"
26 "strings"
27 "unicode/utf8"
28)
29
30// Error contains tag specific error.
31type Error interface {
32 error
33 Code() string
34}
35
36type errTag struct {
37 code string
38 message string
39}
40
41// Code contains error code.
42func (err errTag) Code() string {
43 return err.code
44}
45
46// Error contains error message.
47func (err errTag) Error() string {
48 return err.message
49}
50
51var (
52 errTooManyObjectTags = &errTag{"BadRequest", "Tags cannot be more than 10"}
53 errTooManyTags = &errTag{"BadRequest", "Tags cannot be more than 50"}
54 errInvalidTagKey = &errTag{"InvalidTag", "The TagKey you have provided is invalid"}
55 errInvalidTagValue = &errTag{"InvalidTag", "The TagValue you have provided is invalid"}
56 errDuplicateTagKey = &errTag{"InvalidTag", "Cannot provide multiple Tags with the same key"}
57)
58
59// Tag comes with limitation as per
60// https://docs.aws.amazon.com/AmazonS3/latest/dev/object-tagging.html amd
61// https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Tags.html#tag-restrictions
62const (
63 maxKeyLength = 128
64 maxValueLength = 256
65 maxObjectTagCount = 10
66 maxTagCount = 50
67)
68
69// https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Tags.html#tag-restrictions
70// borrowed from this article and also testing various ASCII characters following regex
71// is supported by AWS S3 for both tags and values.
72var validTagKeyValue = regexp.MustCompile(`^[a-zA-Z0-9-+\-._:/@ ]+$`)
73
74func checkKey(key string) error {
75 if len(key) == 0 {
76 return errInvalidTagKey
77 }
78
79 if utf8.RuneCountInString(key) > maxKeyLength || !validTagKeyValue.MatchString(key) {
80 return errInvalidTagKey
81 }
82
83 return nil
84}
85
86func checkValue(value string) error {
87 if value != "" {
88 if utf8.RuneCountInString(value) > maxValueLength || !validTagKeyValue.MatchString(value) {
89 return errInvalidTagValue
90 }
91 }
92
93 return nil
94}
95
96// Tag denotes key and value.
97type Tag struct {
98 Key string `xml:"Key"`
99 Value string `xml:"Value"`
100}
101
102func (tag Tag) String() string {
103 return tag.Key + "=" + tag.Value
104}
105
106// IsEmpty returns whether this tag is empty or not.
107func (tag Tag) IsEmpty() bool {
108 return tag.Key == ""
109}
110
111// Validate checks this tag.
112func (tag Tag) Validate() error {
113 if err := checkKey(tag.Key); err != nil {
114 return err
115 }
116
117 return checkValue(tag.Value)
118}
119
120// MarshalXML encodes to XML data.
121func (tag Tag) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
122 if err := tag.Validate(); err != nil {
123 return err
124 }
125
126 type subTag Tag // to avoid recursively calling MarshalXML()
127 return e.EncodeElement(subTag(tag), start)
128}
129
130// UnmarshalXML decodes XML data to tag.
131func (tag *Tag) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
132 type subTag Tag // to avoid recursively calling UnmarshalXML()
133 var st subTag
134 if err := d.DecodeElement(&st, &start); err != nil {
135 return err
136 }
137
138 if err := Tag(st).Validate(); err != nil {
139 return err
140 }
141
142 *tag = Tag(st)
143 return nil
144}
145
146// tagSet represents list of unique tags.
147type tagSet struct {
148 tagMap map[string]string
149 isObject bool
150}
151
152func (tags tagSet) String() string {
153 if len(tags.tagMap) == 0 {
154 return ""
155 }
156 var buf strings.Builder
157 keys := make([]string, 0, len(tags.tagMap))
158 for k := range tags.tagMap {
159 keys = append(keys, k)
160 }
161 sort.Strings(keys)
162 for _, k := range keys {
163 keyEscaped := url.QueryEscape(k)
164 valueEscaped := url.QueryEscape(tags.tagMap[k])
165 if buf.Len() > 0 {
166 buf.WriteByte('&')
167 }
168 buf.WriteString(keyEscaped)
169 buf.WriteByte('=')
170 buf.WriteString(valueEscaped)
171 }
172 return buf.String()
173}
174
175func (tags *tagSet) remove(key string) {
176 delete(tags.tagMap, key)
177}
178
179func (tags *tagSet) set(key, value string, failOnExist bool) error {
180 if failOnExist {
181 if _, found := tags.tagMap[key]; found {
182 return errDuplicateTagKey
183 }
184 }
185
186 if err := checkKey(key); err != nil {
187 return err
188 }
189
190 if err := checkValue(value); err != nil {
191 return err
192 }
193
194 if tags.isObject {
195 if len(tags.tagMap) == maxObjectTagCount {
196 return errTooManyObjectTags
197 }
198 } else if len(tags.tagMap) == maxTagCount {
199 return errTooManyTags
200 }
201
202 tags.tagMap[key] = value
203 return nil
204}
205
206func (tags tagSet) count() int {
207 return len(tags.tagMap)
208}
209
210func (tags tagSet) toMap() map[string]string {
211 m := make(map[string]string, len(tags.tagMap))
212 for key, value := range tags.tagMap {
213 m[key] = value
214 }
215 return m
216}
217
218// MarshalXML encodes to XML data.
219func (tags tagSet) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
220 tagList := struct {
221 Tags []Tag `xml:"Tag"`
222 }{}
223
224 tagList.Tags = make([]Tag, 0, len(tags.tagMap))
225 for key, value := range tags.tagMap {
226 tagList.Tags = append(tagList.Tags, Tag{key, value})
227 }
228
229 return e.EncodeElement(tagList, start)
230}
231
232// UnmarshalXML decodes XML data to tag list.
233func (tags *tagSet) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
234 tagList := struct {
235 Tags []Tag `xml:"Tag"`
236 }{}
237
238 if err := d.DecodeElement(&tagList, &start); err != nil {
239 return err
240 }
241
242 if tags.isObject {
243 if len(tagList.Tags) > maxObjectTagCount {
244 return errTooManyObjectTags
245 }
246 } else if len(tagList.Tags) > maxTagCount {
247 return errTooManyTags
248 }
249
250 m := make(map[string]string, len(tagList.Tags))
251 for _, tag := range tagList.Tags {
252 if _, found := m[tag.Key]; found {
253 return errDuplicateTagKey
254 }
255
256 m[tag.Key] = tag.Value
257 }
258
259 tags.tagMap = m
260 return nil
261}
262
263type tagging struct {
264 XMLName xml.Name `xml:"Tagging"`
265 TagSet *tagSet `xml:"TagSet"`
266}
267
268// Tags is list of tags of XML request/response as per
269// https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketTagging.html#API_GetBucketTagging_RequestBody
270type Tags tagging
271
272func (tags Tags) String() string {
273 return tags.TagSet.String()
274}
275
276// Remove removes a tag by its key.
277func (tags *Tags) Remove(key string) {
278 tags.TagSet.remove(key)
279}
280
281// Set sets new tag.
282func (tags *Tags) Set(key, value string) error {
283 return tags.TagSet.set(key, value, false)
284}
285
286// Count - return number of tags accounted for
287func (tags Tags) Count() int {
288 return tags.TagSet.count()
289}
290
291// ToMap returns copy of tags.
292func (tags Tags) ToMap() map[string]string {
293 return tags.TagSet.toMap()
294}
295
296// MapToObjectTags converts an input map of key and value into
297// *Tags data structure with validation.
298func MapToObjectTags(tagMap map[string]string) (*Tags, error) {
299 return NewTags(tagMap, true)
300}
301
302// MapToBucketTags converts an input map of key and value into
303// *Tags data structure with validation.
304func MapToBucketTags(tagMap map[string]string) (*Tags, error) {
305 return NewTags(tagMap, false)
306}
307
308// NewTags creates Tags from tagMap, If isObject is set, it validates for object tags.
309func NewTags(tagMap map[string]string, isObject bool) (*Tags, error) {
310 tagging := &Tags{
311 TagSet: &tagSet{
312 tagMap: make(map[string]string),
313 isObject: isObject,
314 },
315 }
316
317 for key, value := range tagMap {
318 if err := tagging.TagSet.set(key, value, true); err != nil {
319 return nil, err
320 }
321 }
322
323 return tagging, nil
324}
325
326func unmarshalXML(reader io.Reader, isObject bool) (*Tags, error) {
327 tagging := &Tags{
328 TagSet: &tagSet{
329 tagMap: make(map[string]string),
330 isObject: isObject,
331 },
332 }
333
334 if err := xml.NewDecoder(reader).Decode(tagging); err != nil {
335 return nil, err
336 }
337
338 return tagging, nil
339}
340
341// ParseBucketXML decodes XML data of tags in reader specified in
342// https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketTagging.html#API_PutBucketTagging_RequestSyntax.
343func ParseBucketXML(reader io.Reader) (*Tags, error) {
344 return unmarshalXML(reader, false)
345}
346
347// ParseObjectXML decodes XML data of tags in reader specified in
348// https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObjectTagging.html#API_PutObjectTagging_RequestSyntax
349func ParseObjectXML(reader io.Reader) (*Tags, error) {
350 return unmarshalXML(reader, true)
351}
352
353// stringsCut slices s around the first instance of sep,
354// returning the text before and after sep.
355// The found result reports whether sep appears in s.
356// If sep does not appear in s, cut returns s, "", false.
357func stringsCut(s, sep string) (before, after string, found bool) {
358 if i := strings.Index(s, sep); i >= 0 {
359 return s[:i], s[i+len(sep):], true
360 }
361 return s, "", false
362}
363
364func (tags *tagSet) parseTags(tgs string) (err error) {
365 for tgs != "" {
366 var key string
367 key, tgs, _ = stringsCut(tgs, "&")
368 if key == "" {
369 continue
370 }
371 key, value, _ := stringsCut(key, "=")
372 key, err1 := url.QueryUnescape(key)
373 if err1 != nil {
374 if err == nil {
375 err = err1
376 }
377 continue
378 }
379 value, err1 = url.QueryUnescape(value)
380 if err1 != nil {
381 if err == nil {
382 err = err1
383 }
384 continue
385 }
386 if err = tags.set(key, value, true); err != nil {
387 return err
388 }
389 }
390 return err
391}
392
393// Parse decodes HTTP query formatted string into tags which is limited by isObject.
394// A query formatted string is like "key1=value1&key2=value2".
395func Parse(s string, isObject bool) (*Tags, error) {
396 tagging := &Tags{
397 TagSet: &tagSet{
398 tagMap: make(map[string]string),
399 isObject: isObject,
400 },
401 }
402
403 if err := tagging.TagSet.parseTags(s); err != nil {
404 return nil, err
405 }
406
407 return tagging, nil
408}
409
410// ParseObjectTags decodes HTTP query formatted string into tags. A query formatted string is like "key1=value1&key2=value2".
411func ParseObjectTags(s string) (*Tags, error) {
412 return Parse(s, true)
413}
diff --git a/vendor/github.com/minio/minio-go/v7/post-policy.go b/vendor/github.com/minio/minio-go/v7/post-policy.go
new file mode 100644
index 0000000..3f4881e
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/post-policy.go
@@ -0,0 +1,349 @@
1/*
2 * MinIO Go Library for Amazon S3 Compatible Cloud Storage
3 * Copyright 2015-2023 MinIO, Inc.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18package minio
19
20import (
21 "encoding/base64"
22 "fmt"
23 "net/http"
24 "strings"
25 "time"
26
27 "github.com/minio/minio-go/v7/pkg/encrypt"
28)
29
30// expirationDateFormat date format for expiration key in json policy.
31const expirationDateFormat = "2006-01-02T15:04:05.000Z"
32
33// policyCondition explanation:
34// http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-HTTPPOSTConstructPolicy.html
35//
36// Example:
37//
38// policyCondition {
39// matchType: "$eq",
40// key: "$Content-Type",
41// value: "image/png",
42// }
43type policyCondition struct {
44 matchType string
45 condition string
46 value string
47}
48
49// PostPolicy - Provides strict static type conversion and validation
50// for Amazon S3's POST policy JSON string.
51type PostPolicy struct {
52 // Expiration date and time of the POST policy.
53 expiration time.Time
54 // Collection of different policy conditions.
55 conditions []policyCondition
56 // ContentLengthRange minimum and maximum allowable size for the
57 // uploaded content.
58 contentLengthRange struct {
59 min int64
60 max int64
61 }
62
63 // Post form data.
64 formData map[string]string
65}
66
67// NewPostPolicy - Instantiate new post policy.
68func NewPostPolicy() *PostPolicy {
69 p := &PostPolicy{}
70 p.conditions = make([]policyCondition, 0)
71 p.formData = make(map[string]string)
72 return p
73}
74
75// SetExpires - Sets expiration time for the new policy.
76func (p *PostPolicy) SetExpires(t time.Time) error {
77 if t.IsZero() {
78 return errInvalidArgument("No expiry time set.")
79 }
80 p.expiration = t
81 return nil
82}
83
84// SetKey - Sets an object name for the policy based upload.
85func (p *PostPolicy) SetKey(key string) error {
86 if strings.TrimSpace(key) == "" || key == "" {
87 return errInvalidArgument("Object name is empty.")
88 }
89 policyCond := policyCondition{
90 matchType: "eq",
91 condition: "$key",
92 value: key,
93 }
94 if err := p.addNewPolicy(policyCond); err != nil {
95 return err
96 }
97 p.formData["key"] = key
98 return nil
99}
100
101// SetKeyStartsWith - Sets an object name that an policy based upload
102// can start with.
103// Can use an empty value ("") to allow any key.
104func (p *PostPolicy) SetKeyStartsWith(keyStartsWith string) error {
105 policyCond := policyCondition{
106 matchType: "starts-with",
107 condition: "$key",
108 value: keyStartsWith,
109 }
110 if err := p.addNewPolicy(policyCond); err != nil {
111 return err
112 }
113 p.formData["key"] = keyStartsWith
114 return nil
115}
116
117// SetBucket - Sets bucket at which objects will be uploaded to.
118func (p *PostPolicy) SetBucket(bucketName string) error {
119 if strings.TrimSpace(bucketName) == "" || bucketName == "" {
120 return errInvalidArgument("Bucket name is empty.")
121 }
122 policyCond := policyCondition{
123 matchType: "eq",
124 condition: "$bucket",
125 value: bucketName,
126 }
127 if err := p.addNewPolicy(policyCond); err != nil {
128 return err
129 }
130 p.formData["bucket"] = bucketName
131 return nil
132}
133
134// SetCondition - Sets condition for credentials, date and algorithm
135func (p *PostPolicy) SetCondition(matchType, condition, value string) error {
136 if strings.TrimSpace(value) == "" || value == "" {
137 return errInvalidArgument("No value specified for condition")
138 }
139
140 policyCond := policyCondition{
141 matchType: matchType,
142 condition: "$" + condition,
143 value: value,
144 }
145 if condition == "X-Amz-Credential" || condition == "X-Amz-Date" || condition == "X-Amz-Algorithm" {
146 if err := p.addNewPolicy(policyCond); err != nil {
147 return err
148 }
149 p.formData[condition] = value
150 return nil
151 }
152 return errInvalidArgument("Invalid condition in policy")
153}
154
155// SetContentType - Sets content-type of the object for this policy
156// based upload.
157func (p *PostPolicy) SetContentType(contentType string) error {
158 if strings.TrimSpace(contentType) == "" || contentType == "" {
159 return errInvalidArgument("No content type specified.")
160 }
161 policyCond := policyCondition{
162 matchType: "eq",
163 condition: "$Content-Type",
164 value: contentType,
165 }
166 if err := p.addNewPolicy(policyCond); err != nil {
167 return err
168 }
169 p.formData["Content-Type"] = contentType
170 return nil
171}
172
173// SetContentTypeStartsWith - Sets what content-type of the object for this policy
174// based upload can start with.
175// Can use an empty value ("") to allow any content-type.
176func (p *PostPolicy) SetContentTypeStartsWith(contentTypeStartsWith string) error {
177 policyCond := policyCondition{
178 matchType: "starts-with",
179 condition: "$Content-Type",
180 value: contentTypeStartsWith,
181 }
182 if err := p.addNewPolicy(policyCond); err != nil {
183 return err
184 }
185 p.formData["Content-Type"] = contentTypeStartsWith
186 return nil
187}
188
189// SetContentLengthRange - Set new min and max content length
190// condition for all incoming uploads.
191func (p *PostPolicy) SetContentLengthRange(min, max int64) error {
192 if min > max {
193 return errInvalidArgument("Minimum limit is larger than maximum limit.")
194 }
195 if min < 0 {
196 return errInvalidArgument("Minimum limit cannot be negative.")
197 }
198 if max <= 0 {
199 return errInvalidArgument("Maximum limit cannot be non-positive.")
200 }
201 p.contentLengthRange.min = min
202 p.contentLengthRange.max = max
203 return nil
204}
205
206// SetSuccessActionRedirect - Sets the redirect success url of the object for this policy
207// based upload.
208func (p *PostPolicy) SetSuccessActionRedirect(redirect string) error {
209 if strings.TrimSpace(redirect) == "" || redirect == "" {
210 return errInvalidArgument("Redirect is empty")
211 }
212 policyCond := policyCondition{
213 matchType: "eq",
214 condition: "$success_action_redirect",
215 value: redirect,
216 }
217 if err := p.addNewPolicy(policyCond); err != nil {
218 return err
219 }
220 p.formData["success_action_redirect"] = redirect
221 return nil
222}
223
224// SetSuccessStatusAction - Sets the status success code of the object for this policy
225// based upload.
226func (p *PostPolicy) SetSuccessStatusAction(status string) error {
227 if strings.TrimSpace(status) == "" || status == "" {
228 return errInvalidArgument("Status is empty")
229 }
230 policyCond := policyCondition{
231 matchType: "eq",
232 condition: "$success_action_status",
233 value: status,
234 }
235 if err := p.addNewPolicy(policyCond); err != nil {
236 return err
237 }
238 p.formData["success_action_status"] = status
239 return nil
240}
241
242// SetUserMetadata - Set user metadata as a key/value couple.
243// Can be retrieved through a HEAD request or an event.
244func (p *PostPolicy) SetUserMetadata(key, value string) error {
245 if strings.TrimSpace(key) == "" || key == "" {
246 return errInvalidArgument("Key is empty")
247 }
248 if strings.TrimSpace(value) == "" || value == "" {
249 return errInvalidArgument("Value is empty")
250 }
251 headerName := fmt.Sprintf("x-amz-meta-%s", key)
252 policyCond := policyCondition{
253 matchType: "eq",
254 condition: fmt.Sprintf("$%s", headerName),
255 value: value,
256 }
257 if err := p.addNewPolicy(policyCond); err != nil {
258 return err
259 }
260 p.formData[headerName] = value
261 return nil
262}
263
264// SetChecksum sets the checksum of the request.
265func (p *PostPolicy) SetChecksum(c Checksum) {
266 if c.IsSet() {
267 p.formData[amzChecksumAlgo] = c.Type.String()
268 p.formData[c.Type.Key()] = c.Encoded()
269 }
270}
271
272// SetEncryption - sets encryption headers for POST API
273func (p *PostPolicy) SetEncryption(sse encrypt.ServerSide) {
274 if sse == nil {
275 return
276 }
277 h := http.Header{}
278 sse.Marshal(h)
279 for k, v := range h {
280 p.formData[k] = v[0]
281 }
282}
283
284// SetUserData - Set user data as a key/value couple.
285// Can be retrieved through a HEAD request or an event.
286func (p *PostPolicy) SetUserData(key, value string) error {
287 if key == "" {
288 return errInvalidArgument("Key is empty")
289 }
290 if value == "" {
291 return errInvalidArgument("Value is empty")
292 }
293 headerName := fmt.Sprintf("x-amz-%s", key)
294 policyCond := policyCondition{
295 matchType: "eq",
296 condition: fmt.Sprintf("$%s", headerName),
297 value: value,
298 }
299 if err := p.addNewPolicy(policyCond); err != nil {
300 return err
301 }
302 p.formData[headerName] = value
303 return nil
304}
305
306// addNewPolicy - internal helper to validate adding new policies.
307// Can use starts-with with an empty value ("") to allow any content within a form field.
308func (p *PostPolicy) addNewPolicy(policyCond policyCondition) error {
309 if policyCond.matchType == "" || policyCond.condition == "" {
310 return errInvalidArgument("Policy fields are empty.")
311 }
312 if policyCond.matchType != "starts-with" && policyCond.value == "" {
313 return errInvalidArgument("Policy value is empty.")
314 }
315 p.conditions = append(p.conditions, policyCond)
316 return nil
317}
318
319// String function for printing policy in json formatted string.
320func (p PostPolicy) String() string {
321 return string(p.marshalJSON())
322}
323
324// marshalJSON - Provides Marshaled JSON in bytes.
325func (p PostPolicy) marshalJSON() []byte {
326 expirationStr := `"expiration":"` + p.expiration.Format(expirationDateFormat) + `"`
327 var conditionsStr string
328 conditions := []string{}
329 for _, po := range p.conditions {
330 conditions = append(conditions, fmt.Sprintf("[\"%s\",\"%s\",\"%s\"]", po.matchType, po.condition, po.value))
331 }
332 if p.contentLengthRange.min != 0 || p.contentLengthRange.max != 0 {
333 conditions = append(conditions, fmt.Sprintf("[\"content-length-range\", %d, %d]",
334 p.contentLengthRange.min, p.contentLengthRange.max))
335 }
336 if len(conditions) > 0 {
337 conditionsStr = `"conditions":[` + strings.Join(conditions, ",") + "]"
338 }
339 retStr := "{"
340 retStr = retStr + expirationStr + ","
341 retStr += conditionsStr
342 retStr += "}"
343 return []byte(retStr)
344}
345
346// base64 - Produces base64 of PostPolicy's Marshaled json.
347func (p PostPolicy) base64() string {
348 return base64.StdEncoding.EncodeToString(p.marshalJSON())
349}
diff --git a/vendor/github.com/minio/minio-go/v7/retry-continous.go b/vendor/github.com/minio/minio-go/v7/retry-continous.go
new file mode 100644
index 0000000..bfeea95
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/retry-continous.go
@@ -0,0 +1,69 @@
1/*
2 * MinIO Go Library for Amazon S3 Compatible Cloud Storage
3 * Copyright 2015-2017 MinIO, Inc.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18package minio
19
20import "time"
21
22// newRetryTimerContinous creates a timer with exponentially increasing delays forever.
23func (c *Client) newRetryTimerContinous(unit, cap time.Duration, jitter float64, doneCh chan struct{}) <-chan int {
24 attemptCh := make(chan int)
25
26 // normalize jitter to the range [0, 1.0]
27 if jitter < NoJitter {
28 jitter = NoJitter
29 }
30 if jitter > MaxJitter {
31 jitter = MaxJitter
32 }
33
34 // computes the exponential backoff duration according to
35 // https://www.awsarchitectureblog.com/2015/03/backoff.html
36 exponentialBackoffWait := func(attempt int) time.Duration {
37 // 1<<uint(attempt) below could overflow, so limit the value of attempt
38 maxAttempt := 30
39 if attempt > maxAttempt {
40 attempt = maxAttempt
41 }
42 // sleep = random_between(0, min(cap, base * 2 ** attempt))
43 sleep := unit * time.Duration(1<<uint(attempt))
44 if sleep > cap {
45 sleep = cap
46 }
47 if jitter != NoJitter {
48 sleep -= time.Duration(c.random.Float64() * float64(sleep) * jitter)
49 }
50 return sleep
51 }
52
53 go func() {
54 defer close(attemptCh)
55 var nextBackoff int
56 for {
57 select {
58 // Attempts starts.
59 case attemptCh <- nextBackoff:
60 nextBackoff++
61 case <-doneCh:
62 // Stop the routine.
63 return
64 }
65 time.Sleep(exponentialBackoffWait(nextBackoff))
66 }
67 }()
68 return attemptCh
69}
diff --git a/vendor/github.com/minio/minio-go/v7/retry.go b/vendor/github.com/minio/minio-go/v7/retry.go
new file mode 100644
index 0000000..1c6105e
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/retry.go
@@ -0,0 +1,148 @@
1/*
2 * MinIO Go Library for Amazon S3 Compatible Cloud Storage
3 * Copyright 2015-2017 MinIO, Inc.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18package minio
19
20import (
21 "context"
22 "crypto/x509"
23 "errors"
24 "net/http"
25 "net/url"
26 "time"
27)
28
29// MaxRetry is the maximum number of retries before stopping.
30var MaxRetry = 10
31
32// MaxJitter will randomize over the full exponential backoff time
33const MaxJitter = 1.0
34
35// NoJitter disables the use of jitter for randomizing the exponential backoff time
36const NoJitter = 0.0
37
38// DefaultRetryUnit - default unit multiplicative per retry.
39// defaults to 200 * time.Millisecond
40var DefaultRetryUnit = 200 * time.Millisecond
41
42// DefaultRetryCap - Each retry attempt never waits no longer than
43// this maximum time duration.
44var DefaultRetryCap = time.Second
45
46// newRetryTimer creates a timer with exponentially increasing
47// delays until the maximum retry attempts are reached.
48func (c *Client) newRetryTimer(ctx context.Context, maxRetry int, unit, cap time.Duration, jitter float64) <-chan int {
49 attemptCh := make(chan int)
50
51 // computes the exponential backoff duration according to
52 // https://www.awsarchitectureblog.com/2015/03/backoff.html
53 exponentialBackoffWait := func(attempt int) time.Duration {
54 // normalize jitter to the range [0, 1.0]
55 if jitter < NoJitter {
56 jitter = NoJitter
57 }
58 if jitter > MaxJitter {
59 jitter = MaxJitter
60 }
61
62 // sleep = random_between(0, min(cap, base * 2 ** attempt))
63 sleep := unit * time.Duration(1<<uint(attempt))
64 if sleep > cap {
65 sleep = cap
66 }
67 if jitter != NoJitter {
68 sleep -= time.Duration(c.random.Float64() * float64(sleep) * jitter)
69 }
70 return sleep
71 }
72
73 go func() {
74 defer close(attemptCh)
75 for i := 0; i < maxRetry; i++ {
76 select {
77 case attemptCh <- i + 1:
78 case <-ctx.Done():
79 return
80 }
81
82 select {
83 case <-time.After(exponentialBackoffWait(i)):
84 case <-ctx.Done():
85 return
86 }
87 }
88 }()
89 return attemptCh
90}
91
92// List of AWS S3 error codes which are retryable.
93var retryableS3Codes = map[string]struct{}{
94 "RequestError": {},
95 "RequestTimeout": {},
96 "Throttling": {},
97 "ThrottlingException": {},
98 "RequestLimitExceeded": {},
99 "RequestThrottled": {},
100 "InternalError": {},
101 "ExpiredToken": {},
102 "ExpiredTokenException": {},
103 "SlowDown": {},
104 // Add more AWS S3 codes here.
105}
106
107// isS3CodeRetryable - is s3 error code retryable.
108func isS3CodeRetryable(s3Code string) (ok bool) {
109 _, ok = retryableS3Codes[s3Code]
110 return ok
111}
112
113// List of HTTP status codes which are retryable.
114var retryableHTTPStatusCodes = map[int]struct{}{
115 429: {}, // http.StatusTooManyRequests is not part of the Go 1.5 library, yet
116 499: {}, // client closed request, retry. A non-standard status code introduced by nginx.
117 http.StatusInternalServerError: {},
118 http.StatusBadGateway: {},
119 http.StatusServiceUnavailable: {},
120 http.StatusGatewayTimeout: {},
121 // Add more HTTP status codes here.
122}
123
124// isHTTPStatusRetryable - is HTTP error code retryable.
125func isHTTPStatusRetryable(httpStatusCode int) (ok bool) {
126 _, ok = retryableHTTPStatusCodes[httpStatusCode]
127 return ok
128}
129
130// For now, all http Do() requests are retriable except some well defined errors
131func isRequestErrorRetryable(err error) bool {
132 if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) {
133 return false
134 }
135 if ue, ok := err.(*url.Error); ok {
136 e := ue.Unwrap()
137 switch e.(type) {
138 // x509: certificate signed by unknown authority
139 case x509.UnknownAuthorityError:
140 return false
141 }
142 switch e.Error() {
143 case "http: server gave HTTP response to HTTPS client":
144 return false
145 }
146 }
147 return true
148}
diff --git a/vendor/github.com/minio/minio-go/v7/s3-endpoints.go b/vendor/github.com/minio/minio-go/v7/s3-endpoints.go
new file mode 100644
index 0000000..b1de7b6
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/s3-endpoints.go
@@ -0,0 +1,64 @@
1/*
2 * MinIO Go Library for Amazon S3 Compatible Cloud Storage
3 * Copyright 2015-2017 MinIO, Inc.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18package minio
19
20// awsS3EndpointMap Amazon S3 endpoint map.
21var awsS3EndpointMap = map[string]string{
22 "us-east-1": "s3.dualstack.us-east-1.amazonaws.com",
23 "us-east-2": "s3.dualstack.us-east-2.amazonaws.com",
24 "us-west-2": "s3.dualstack.us-west-2.amazonaws.com",
25 "us-west-1": "s3.dualstack.us-west-1.amazonaws.com",
26 "ca-central-1": "s3.dualstack.ca-central-1.amazonaws.com",
27 "eu-west-1": "s3.dualstack.eu-west-1.amazonaws.com",
28 "eu-west-2": "s3.dualstack.eu-west-2.amazonaws.com",
29 "eu-west-3": "s3.dualstack.eu-west-3.amazonaws.com",
30 "eu-central-1": "s3.dualstack.eu-central-1.amazonaws.com",
31 "eu-central-2": "s3.dualstack.eu-central-2.amazonaws.com",
32 "eu-north-1": "s3.dualstack.eu-north-1.amazonaws.com",
33 "eu-south-1": "s3.dualstack.eu-south-1.amazonaws.com",
34 "eu-south-2": "s3.dualstack.eu-south-2.amazonaws.com",
35 "ap-east-1": "s3.dualstack.ap-east-1.amazonaws.com",
36 "ap-south-1": "s3.dualstack.ap-south-1.amazonaws.com",
37 "ap-south-2": "s3.dualstack.ap-south-2.amazonaws.com",
38 "ap-southeast-1": "s3.dualstack.ap-southeast-1.amazonaws.com",
39 "ap-southeast-2": "s3.dualstack.ap-southeast-2.amazonaws.com",
40 "ap-northeast-1": "s3.dualstack.ap-northeast-1.amazonaws.com",
41 "ap-northeast-2": "s3.dualstack.ap-northeast-2.amazonaws.com",
42 "ap-northeast-3": "s3.dualstack.ap-northeast-3.amazonaws.com",
43 "af-south-1": "s3.dualstack.af-south-1.amazonaws.com",
44 "me-central-1": "s3.dualstack.me-central-1.amazonaws.com",
45 "me-south-1": "s3.dualstack.me-south-1.amazonaws.com",
46 "sa-east-1": "s3.dualstack.sa-east-1.amazonaws.com",
47 "us-gov-west-1": "s3.dualstack.us-gov-west-1.amazonaws.com",
48 "us-gov-east-1": "s3.dualstack.us-gov-east-1.amazonaws.com",
49 "cn-north-1": "s3.dualstack.cn-north-1.amazonaws.com.cn",
50 "cn-northwest-1": "s3.dualstack.cn-northwest-1.amazonaws.com.cn",
51 "ap-southeast-3": "s3.dualstack.ap-southeast-3.amazonaws.com",
52 "ap-southeast-4": "s3.dualstack.ap-southeast-4.amazonaws.com",
53 "il-central-1": "s3.dualstack.il-central-1.amazonaws.com",
54}
55
56// getS3Endpoint get Amazon S3 endpoint based on the bucket location.
57func getS3Endpoint(bucketLocation string) (s3Endpoint string) {
58 s3Endpoint, ok := awsS3EndpointMap[bucketLocation]
59 if !ok {
60 // Default to 's3.dualstack.us-east-1.amazonaws.com' endpoint.
61 s3Endpoint = "s3.dualstack.us-east-1.amazonaws.com"
62 }
63 return s3Endpoint
64}
diff --git a/vendor/github.com/minio/minio-go/v7/s3-error.go b/vendor/github.com/minio/minio-go/v7/s3-error.go
new file mode 100644
index 0000000..f365157
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/s3-error.go
@@ -0,0 +1,61 @@
1/*
2 * MinIO Go Library for Amazon S3 Compatible Cloud Storage
3 * Copyright 2015-2017 MinIO, Inc.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18package minio
19
20// Non exhaustive list of AWS S3 standard error responses -
21// http://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html
22var s3ErrorResponseMap = map[string]string{
23 "AccessDenied": "Access Denied.",
24 "BadDigest": "The Content-Md5 you specified did not match what we received.",
25 "EntityTooSmall": "Your proposed upload is smaller than the minimum allowed object size.",
26 "EntityTooLarge": "Your proposed upload exceeds the maximum allowed object size.",
27 "IncompleteBody": "You did not provide the number of bytes specified by the Content-Length HTTP header.",
28 "InternalError": "We encountered an internal error, please try again.",
29 "InvalidAccessKeyId": "The access key ID you provided does not exist in our records.",
30 "InvalidBucketName": "The specified bucket is not valid.",
31 "InvalidDigest": "The Content-Md5 you specified is not valid.",
32 "InvalidRange": "The requested range is not satisfiable",
33 "MalformedXML": "The XML you provided was not well-formed or did not validate against our published schema.",
34 "MissingContentLength": "You must provide the Content-Length HTTP header.",
35 "MissingContentMD5": "Missing required header for this request: Content-Md5.",
36 "MissingRequestBodyError": "Request body is empty.",
37 "NoSuchBucket": "The specified bucket does not exist.",
38 "NoSuchBucketPolicy": "The bucket policy does not exist",
39 "NoSuchKey": "The specified key does not exist.",
40 "NoSuchUpload": "The specified multipart upload does not exist. The upload ID may be invalid, or the upload may have been aborted or completed.",
41 "NotImplemented": "A header you provided implies functionality that is not implemented",
42 "PreconditionFailed": "At least one of the pre-conditions you specified did not hold",
43 "RequestTimeTooSkewed": "The difference between the request time and the server's time is too large.",
44 "SignatureDoesNotMatch": "The request signature we calculated does not match the signature you provided. Check your key and signing method.",
45 "MethodNotAllowed": "The specified method is not allowed against this resource.",
46 "InvalidPart": "One or more of the specified parts could not be found.",
47 "InvalidPartOrder": "The list of parts was not in ascending order. The parts list must be specified in order by part number.",
48 "InvalidObjectState": "The operation is not valid for the current state of the object.",
49 "AuthorizationHeaderMalformed": "The authorization header is malformed; the region is wrong.",
50 "MalformedPOSTRequest": "The body of your POST request is not well-formed multipart/form-data.",
51 "BucketNotEmpty": "The bucket you tried to delete is not empty",
52 "AllAccessDisabled": "All access to this bucket has been disabled.",
53 "MalformedPolicy": "Policy has invalid resource.",
54 "MissingFields": "Missing fields in request.",
55 "AuthorizationQueryParametersError": "Error parsing the X-Amz-Credential parameter; the Credential is mal-formed; expecting \"<YOUR-AKID>/YYYYMMDD/REGION/SERVICE/aws4_request\".",
56 "MalformedDate": "Invalid date format header, expected to be in ISO8601, RFC1123 or RFC1123Z time format.",
57 "BucketAlreadyOwnedByYou": "Your previous request to create the named bucket succeeded and you already own it.",
58 "InvalidDuration": "Duration provided in the request is invalid.",
59 "XAmzContentSHA256Mismatch": "The provided 'x-amz-content-sha256' header does not match what was computed.",
60 // Add new API errors here.
61}
diff --git a/vendor/github.com/minio/minio-go/v7/transport.go b/vendor/github.com/minio/minio-go/v7/transport.go
new file mode 100644
index 0000000..1bff664
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/transport.go
@@ -0,0 +1,83 @@
1//go:build go1.7 || go1.8
2// +build go1.7 go1.8
3
4/*
5 * MinIO Go Library for Amazon S3 Compatible Cloud Storage
6 * Copyright 2017-2018 MinIO, Inc.
7 *
8 * Licensed under the Apache License, Version 2.0 (the "License");
9 * you may not use this file except in compliance with the License.
10 * You may obtain a copy of the License at
11 *
12 * http://www.apache.org/licenses/LICENSE-2.0
13 *
14 * Unless required by applicable law or agreed to in writing, software
15 * distributed under the License is distributed on an "AS IS" BASIS,
16 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
17 * See the License for the specific language governing permissions and
18 * limitations under the License.
19 */
20
21package minio
22
23import (
24 "crypto/tls"
25 "crypto/x509"
26 "net"
27 "net/http"
28 "os"
29 "time"
30)
31
32// mustGetSystemCertPool - return system CAs or empty pool in case of error (or windows)
33func mustGetSystemCertPool() *x509.CertPool {
34 pool, err := x509.SystemCertPool()
35 if err != nil {
36 return x509.NewCertPool()
37 }
38 return pool
39}
40
41// DefaultTransport - this default transport is similar to
42// http.DefaultTransport but with additional param DisableCompression
43// is set to true to avoid decompressing content with 'gzip' encoding.
44var DefaultTransport = func(secure bool) (*http.Transport, error) {
45 tr := &http.Transport{
46 Proxy: http.ProxyFromEnvironment,
47 DialContext: (&net.Dialer{
48 Timeout: 30 * time.Second,
49 KeepAlive: 30 * time.Second,
50 }).DialContext,
51 MaxIdleConns: 256,
52 MaxIdleConnsPerHost: 16,
53 ResponseHeaderTimeout: time.Minute,
54 IdleConnTimeout: time.Minute,
55 TLSHandshakeTimeout: 10 * time.Second,
56 ExpectContinueTimeout: 10 * time.Second,
57 // Set this value so that the underlying transport round-tripper
58 // doesn't try to auto decode the body of objects with
59 // content-encoding set to `gzip`.
60 //
61 // Refer:
62 // https://golang.org/src/net/http/transport.go?h=roundTrip#L1843
63 DisableCompression: true,
64 }
65
66 if secure {
67 tr.TLSClientConfig = &tls.Config{
68 // Can't use SSLv3 because of POODLE and BEAST
69 // Can't use TLSv1.0 because of POODLE and BEAST using CBC cipher
70 // Can't use TLSv1.1 because of RC4 cipher usage
71 MinVersion: tls.VersionTLS12,
72 }
73 if f := os.Getenv("SSL_CERT_FILE"); f != "" {
74 rootCAs := mustGetSystemCertPool()
75 data, err := os.ReadFile(f)
76 if err == nil {
77 rootCAs.AppendCertsFromPEM(data)
78 }
79 tr.TLSClientConfig.RootCAs = rootCAs
80 }
81 }
82 return tr, nil
83}
diff --git a/vendor/github.com/minio/minio-go/v7/utils.go b/vendor/github.com/minio/minio-go/v7/utils.go
new file mode 100644
index 0000000..e39eba0
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/utils.go
@@ -0,0 +1,693 @@
1/*
2 * MinIO Go Library for Amazon S3 Compatible Cloud Storage
3 * Copyright 2015-2017 MinIO, Inc.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18package minio
19
20import (
21 "context"
22 "crypto/md5"
23 fipssha256 "crypto/sha256"
24 "encoding/base64"
25 "encoding/hex"
26 "encoding/xml"
27 "errors"
28 "fmt"
29 "hash"
30 "io"
31 "math/rand"
32 "net"
33 "net/http"
34 "net/url"
35 "regexp"
36 "strconv"
37 "strings"
38 "sync"
39 "time"
40
41 md5simd "github.com/minio/md5-simd"
42 "github.com/minio/minio-go/v7/pkg/encrypt"
43 "github.com/minio/minio-go/v7/pkg/s3utils"
44 "github.com/minio/sha256-simd"
45)
46
47func trimEtag(etag string) string {
48 etag = strings.TrimPrefix(etag, "\"")
49 return strings.TrimSuffix(etag, "\"")
50}
51
52var expirationRegex = regexp.MustCompile(`expiry-date="(.*?)", rule-id="(.*?)"`)
53
54func amzExpirationToExpiryDateRuleID(expiration string) (time.Time, string) {
55 if matches := expirationRegex.FindStringSubmatch(expiration); len(matches) == 3 {
56 expTime, err := parseRFC7231Time(matches[1])
57 if err != nil {
58 return time.Time{}, ""
59 }
60 return expTime, matches[2]
61 }
62 return time.Time{}, ""
63}
64
65var restoreRegex = regexp.MustCompile(`ongoing-request="(.*?)"(, expiry-date="(.*?)")?`)
66
67func amzRestoreToStruct(restore string) (ongoing bool, expTime time.Time, err error) {
68 matches := restoreRegex.FindStringSubmatch(restore)
69 if len(matches) != 4 {
70 return false, time.Time{}, errors.New("unexpected restore header")
71 }
72 ongoing, err = strconv.ParseBool(matches[1])
73 if err != nil {
74 return false, time.Time{}, err
75 }
76 if matches[3] != "" {
77 expTime, err = parseRFC7231Time(matches[3])
78 if err != nil {
79 return false, time.Time{}, err
80 }
81 }
82 return
83}
84
85// xmlDecoder provide decoded value in xml.
86func xmlDecoder(body io.Reader, v interface{}) error {
87 d := xml.NewDecoder(body)
88 return d.Decode(v)
89}
90
91// sum256 calculate sha256sum for an input byte array, returns hex encoded.
92func sum256Hex(data []byte) string {
93 hash := newSHA256Hasher()
94 defer hash.Close()
95 hash.Write(data)
96 return hex.EncodeToString(hash.Sum(nil))
97}
98
99// sumMD5Base64 calculate md5sum for an input byte array, returns base64 encoded.
100func sumMD5Base64(data []byte) string {
101 hash := newMd5Hasher()
102 defer hash.Close()
103 hash.Write(data)
104 return base64.StdEncoding.EncodeToString(hash.Sum(nil))
105}
106
107// getEndpointURL - construct a new endpoint.
108func getEndpointURL(endpoint string, secure bool) (*url.URL, error) {
109 // If secure is false, use 'http' scheme.
110 scheme := "https"
111 if !secure {
112 scheme = "http"
113 }
114
115 // Construct a secured endpoint URL.
116 endpointURLStr := scheme + "://" + endpoint
117 endpointURL, err := url.Parse(endpointURLStr)
118 if err != nil {
119 return nil, err
120 }
121
122 // Validate incoming endpoint URL.
123 if err := isValidEndpointURL(*endpointURL); err != nil {
124 return nil, err
125 }
126 return endpointURL, nil
127}
128
129// closeResponse close non nil response with any response Body.
130// convenient wrapper to drain any remaining data on response body.
131//
132// Subsequently this allows golang http RoundTripper
133// to re-use the same connection for future requests.
134func closeResponse(resp *http.Response) {
135 // Callers should close resp.Body when done reading from it.
136 // If resp.Body is not closed, the Client's underlying RoundTripper
137 // (typically Transport) may not be able to re-use a persistent TCP
138 // connection to the server for a subsequent "keep-alive" request.
139 if resp != nil && resp.Body != nil {
140 // Drain any remaining Body and then close the connection.
141 // Without this closing connection would disallow re-using
142 // the same connection for future uses.
143 // - http://stackoverflow.com/a/17961593/4465767
144 io.Copy(io.Discard, resp.Body)
145 resp.Body.Close()
146 }
147}
148
149var (
150 // Hex encoded string of nil sha256sum bytes.
151 emptySHA256Hex = "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
152
153 // Sentinel URL is the default url value which is invalid.
154 sentinelURL = url.URL{}
155)
156
157// Verify if input endpoint URL is valid.
158func isValidEndpointURL(endpointURL url.URL) error {
159 if endpointURL == sentinelURL {
160 return errInvalidArgument("Endpoint url cannot be empty.")
161 }
162 if endpointURL.Path != "/" && endpointURL.Path != "" {
163 return errInvalidArgument("Endpoint url cannot have fully qualified paths.")
164 }
165 host := endpointURL.Hostname()
166 if !s3utils.IsValidIP(host) && !s3utils.IsValidDomain(host) {
167 msg := "Endpoint: " + endpointURL.Host + " does not follow ip address or domain name standards."
168 return errInvalidArgument(msg)
169 }
170
171 if strings.Contains(host, ".s3.amazonaws.com") {
172 if !s3utils.IsAmazonEndpoint(endpointURL) {
173 return errInvalidArgument("Amazon S3 endpoint should be 's3.amazonaws.com'.")
174 }
175 }
176 if strings.Contains(host, ".googleapis.com") {
177 if !s3utils.IsGoogleEndpoint(endpointURL) {
178 return errInvalidArgument("Google Cloud Storage endpoint should be 'storage.googleapis.com'.")
179 }
180 }
181 return nil
182}
183
184// Verify if input expires value is valid.
185func isValidExpiry(expires time.Duration) error {
186 expireSeconds := int64(expires / time.Second)
187 if expireSeconds < 1 {
188 return errInvalidArgument("Expires cannot be lesser than 1 second.")
189 }
190 if expireSeconds > 604800 {
191 return errInvalidArgument("Expires cannot be greater than 7 days.")
192 }
193 return nil
194}
195
196// Extract only necessary metadata header key/values by
197// filtering them out with a list of custom header keys.
198func extractObjMetadata(header http.Header) http.Header {
199 preserveKeys := []string{
200 "Content-Type",
201 "Cache-Control",
202 "Content-Encoding",
203 "Content-Language",
204 "Content-Disposition",
205 "X-Amz-Storage-Class",
206 "X-Amz-Object-Lock-Mode",
207 "X-Amz-Object-Lock-Retain-Until-Date",
208 "X-Amz-Object-Lock-Legal-Hold",
209 "X-Amz-Website-Redirect-Location",
210 "X-Amz-Server-Side-Encryption",
211 "X-Amz-Tagging-Count",
212 "X-Amz-Meta-",
213 // Add new headers to be preserved.
214 // if you add new headers here, please extend
215 // PutObjectOptions{} to preserve them
216 // upon upload as well.
217 }
218 filteredHeader := make(http.Header)
219 for k, v := range header {
220 var found bool
221 for _, prefix := range preserveKeys {
222 if !strings.HasPrefix(k, prefix) {
223 continue
224 }
225 found = true
226 break
227 }
228 if found {
229 filteredHeader[k] = v
230 }
231 }
232 return filteredHeader
233}
234
235const (
236 // RFC 7231#section-7.1.1.1 timetamp format. e.g Tue, 29 Apr 2014 18:30:38 GMT
237 rfc822TimeFormat = "Mon, 2 Jan 2006 15:04:05 GMT"
238 rfc822TimeFormatSingleDigitDay = "Mon, _2 Jan 2006 15:04:05 GMT"
239 rfc822TimeFormatSingleDigitDayTwoDigitYear = "Mon, _2 Jan 06 15:04:05 GMT"
240)
241
242func parseTime(t string, formats ...string) (time.Time, error) {
243 for _, format := range formats {
244 tt, err := time.Parse(format, t)
245 if err == nil {
246 return tt, nil
247 }
248 }
249 return time.Time{}, fmt.Errorf("unable to parse %s in any of the input formats: %s", t, formats)
250}
251
252func parseRFC7231Time(lastModified string) (time.Time, error) {
253 return parseTime(lastModified, rfc822TimeFormat, rfc822TimeFormatSingleDigitDay, rfc822TimeFormatSingleDigitDayTwoDigitYear)
254}
255
256// ToObjectInfo converts http header values into ObjectInfo type,
257// extracts metadata and fills in all the necessary fields in ObjectInfo.
258func ToObjectInfo(bucketName, objectName string, h http.Header) (ObjectInfo, error) {
259 var err error
260 // Trim off the odd double quotes from ETag in the beginning and end.
261 etag := trimEtag(h.Get("ETag"))
262
263 // Parse content length is exists
264 var size int64 = -1
265 contentLengthStr := h.Get("Content-Length")
266 if contentLengthStr != "" {
267 size, err = strconv.ParseInt(contentLengthStr, 10, 64)
268 if err != nil {
269 // Content-Length is not valid
270 return ObjectInfo{}, ErrorResponse{
271 Code: "InternalError",
272 Message: fmt.Sprintf("Content-Length is not an integer, failed with %v", err),
273 BucketName: bucketName,
274 Key: objectName,
275 RequestID: h.Get("x-amz-request-id"),
276 HostID: h.Get("x-amz-id-2"),
277 Region: h.Get("x-amz-bucket-region"),
278 }
279 }
280 }
281
282 // Parse Last-Modified has http time format.
283 mtime, err := parseRFC7231Time(h.Get("Last-Modified"))
284 if err != nil {
285 return ObjectInfo{}, ErrorResponse{
286 Code: "InternalError",
287 Message: fmt.Sprintf("Last-Modified time format is invalid, failed with %v", err),
288 BucketName: bucketName,
289 Key: objectName,
290 RequestID: h.Get("x-amz-request-id"),
291 HostID: h.Get("x-amz-id-2"),
292 Region: h.Get("x-amz-bucket-region"),
293 }
294 }
295
296 // Fetch content type if any present.
297 contentType := strings.TrimSpace(h.Get("Content-Type"))
298 if contentType == "" {
299 contentType = "application/octet-stream"
300 }
301
302 expiryStr := h.Get("Expires")
303 var expiry time.Time
304 if expiryStr != "" {
305 expiry, err = parseRFC7231Time(expiryStr)
306 if err != nil {
307 return ObjectInfo{}, ErrorResponse{
308 Code: "InternalError",
309 Message: fmt.Sprintf("'Expiry' is not in supported format: %v", err),
310 BucketName: bucketName,
311 Key: objectName,
312 RequestID: h.Get("x-amz-request-id"),
313 HostID: h.Get("x-amz-id-2"),
314 Region: h.Get("x-amz-bucket-region"),
315 }
316 }
317 }
318
319 metadata := extractObjMetadata(h)
320 userMetadata := make(map[string]string)
321 for k, v := range metadata {
322 if strings.HasPrefix(k, "X-Amz-Meta-") {
323 userMetadata[strings.TrimPrefix(k, "X-Amz-Meta-")] = v[0]
324 }
325 }
326 userTags := s3utils.TagDecode(h.Get(amzTaggingHeader))
327
328 var tagCount int
329 if count := h.Get(amzTaggingCount); count != "" {
330 tagCount, err = strconv.Atoi(count)
331 if err != nil {
332 return ObjectInfo{}, ErrorResponse{
333 Code: "InternalError",
334 Message: fmt.Sprintf("x-amz-tagging-count is not an integer, failed with %v", err),
335 BucketName: bucketName,
336 Key: objectName,
337 RequestID: h.Get("x-amz-request-id"),
338 HostID: h.Get("x-amz-id-2"),
339 Region: h.Get("x-amz-bucket-region"),
340 }
341 }
342 }
343
344 // Nil if not found
345 var restore *RestoreInfo
346 if restoreHdr := h.Get(amzRestore); restoreHdr != "" {
347 ongoing, expTime, err := amzRestoreToStruct(restoreHdr)
348 if err != nil {
349 return ObjectInfo{}, err
350 }
351 restore = &RestoreInfo{OngoingRestore: ongoing, ExpiryTime: expTime}
352 }
353
354 // extract lifecycle expiry date and rule ID
355 expTime, ruleID := amzExpirationToExpiryDateRuleID(h.Get(amzExpiration))
356
357 deleteMarker := h.Get(amzDeleteMarker) == "true"
358
359 // Save object metadata info.
360 return ObjectInfo{
361 ETag: etag,
362 Key: objectName,
363 Size: size,
364 LastModified: mtime,
365 ContentType: contentType,
366 Expires: expiry,
367 VersionID: h.Get(amzVersionID),
368 IsDeleteMarker: deleteMarker,
369 ReplicationStatus: h.Get(amzReplicationStatus),
370 Expiration: expTime,
371 ExpirationRuleID: ruleID,
372 // Extract only the relevant header keys describing the object.
373 // following function filters out a list of standard set of keys
374 // which are not part of object metadata.
375 Metadata: metadata,
376 UserMetadata: userMetadata,
377 UserTags: userTags,
378 UserTagCount: tagCount,
379 Restore: restore,
380
381 // Checksum values
382 ChecksumCRC32: h.Get("x-amz-checksum-crc32"),
383 ChecksumCRC32C: h.Get("x-amz-checksum-crc32c"),
384 ChecksumSHA1: h.Get("x-amz-checksum-sha1"),
385 ChecksumSHA256: h.Get("x-amz-checksum-sha256"),
386 }, nil
387}
388
389var readFull = func(r io.Reader, buf []byte) (n int, err error) {
390 // ReadFull reads exactly len(buf) bytes from r into buf.
391 // It returns the number of bytes copied and an error if
392 // fewer bytes were read. The error is EOF only if no bytes
393 // were read. If an EOF happens after reading some but not
394 // all the bytes, ReadFull returns ErrUnexpectedEOF.
395 // On return, n == len(buf) if and only if err == nil.
396 // If r returns an error having read at least len(buf) bytes,
397 // the error is dropped.
398 for n < len(buf) && err == nil {
399 var nn int
400 nn, err = r.Read(buf[n:])
401 // Some spurious io.Reader's return
402 // io.ErrUnexpectedEOF when nn == 0
403 // this behavior is undocumented
404 // so we are on purpose not using io.ReadFull
405 // implementation because this can lead
406 // to custom handling, to avoid that
407 // we simply modify the original io.ReadFull
408 // implementation to avoid this issue.
409 // io.ErrUnexpectedEOF with nn == 0 really
410 // means that io.EOF
411 if err == io.ErrUnexpectedEOF && nn == 0 {
412 err = io.EOF
413 }
414 n += nn
415 }
416 if n >= len(buf) {
417 err = nil
418 } else if n > 0 && err == io.EOF {
419 err = io.ErrUnexpectedEOF
420 }
421 return
422}
423
424// regCred matches credential string in HTTP header
425var regCred = regexp.MustCompile("Credential=([A-Z0-9]+)/")
426
427// regCred matches signature string in HTTP header
428var regSign = regexp.MustCompile("Signature=([[0-9a-f]+)")
429
430// Redact out signature value from authorization string.
431func redactSignature(origAuth string) string {
432 if !strings.HasPrefix(origAuth, signV4Algorithm) {
433 // Set a temporary redacted auth
434 return "AWS **REDACTED**:**REDACTED**"
435 }
436
437 // Signature V4 authorization header.
438
439 // Strip out accessKeyID from:
440 // Credential=<access-key-id>/<date>/<aws-region>/<aws-service>/aws4_request
441 newAuth := regCred.ReplaceAllString(origAuth, "Credential=**REDACTED**/")
442
443 // Strip out 256-bit signature from: Signature=<256-bit signature>
444 return regSign.ReplaceAllString(newAuth, "Signature=**REDACTED**")
445}
446
447// Get default location returns the location based on the input
448// URL `u`, if region override is provided then all location
449// defaults to regionOverride.
450//
451// If no other cases match then the location is set to `us-east-1`
452// as a last resort.
453func getDefaultLocation(u url.URL, regionOverride string) (location string) {
454 if regionOverride != "" {
455 return regionOverride
456 }
457 region := s3utils.GetRegionFromURL(u)
458 if region == "" {
459 region = "us-east-1"
460 }
461 return region
462}
463
464var supportedHeaders = map[string]bool{
465 "content-type": true,
466 "cache-control": true,
467 "content-encoding": true,
468 "content-disposition": true,
469 "content-language": true,
470 "x-amz-website-redirect-location": true,
471 "x-amz-object-lock-mode": true,
472 "x-amz-metadata-directive": true,
473 "x-amz-object-lock-retain-until-date": true,
474 "expires": true,
475 "x-amz-replication-status": true,
476 // Add more supported headers here.
477 // Must be lower case.
478}
479
480// isStorageClassHeader returns true if the header is a supported storage class header
481func isStorageClassHeader(headerKey string) bool {
482 return strings.EqualFold(amzStorageClass, headerKey)
483}
484
485// isStandardHeader returns true if header is a supported header and not a custom header
486func isStandardHeader(headerKey string) bool {
487 return supportedHeaders[strings.ToLower(headerKey)]
488}
489
490// sseHeaders is list of server side encryption headers
491var sseHeaders = map[string]bool{
492 "x-amz-server-side-encryption": true,
493 "x-amz-server-side-encryption-aws-kms-key-id": true,
494 "x-amz-server-side-encryption-context": true,
495 "x-amz-server-side-encryption-customer-algorithm": true,
496 "x-amz-server-side-encryption-customer-key": true,
497 "x-amz-server-side-encryption-customer-key-md5": true,
498 // Add more supported headers here.
499 // Must be lower case.
500}
501
502// isSSEHeader returns true if header is a server side encryption header.
503func isSSEHeader(headerKey string) bool {
504 return sseHeaders[strings.ToLower(headerKey)]
505}
506
507// isAmzHeader returns true if header is a x-amz-meta-* or x-amz-acl header.
508func isAmzHeader(headerKey string) bool {
509 key := strings.ToLower(headerKey)
510
511 return strings.HasPrefix(key, "x-amz-meta-") || strings.HasPrefix(key, "x-amz-grant-") || key == "x-amz-acl" || isSSEHeader(headerKey) || strings.HasPrefix(key, "x-amz-checksum-")
512}
513
514// supportedQueryValues is a list of query strings that can be passed in when using GetObject.
515var supportedQueryValues = map[string]bool{
516 "partNumber": true,
517 "versionId": true,
518 "response-cache-control": true,
519 "response-content-disposition": true,
520 "response-content-encoding": true,
521 "response-content-language": true,
522 "response-content-type": true,
523 "response-expires": true,
524}
525
526// isStandardQueryValue will return true when the passed in query string parameter is supported rather than customized.
527func isStandardQueryValue(qsKey string) bool {
528 return supportedQueryValues[qsKey]
529}
530
531// Per documentation at https://docs.aws.amazon.com/AmazonS3/latest/userguide/LogFormat.html#LogFormatCustom, the
532// set of query params starting with "x-" are ignored by S3.
533const allowedCustomQueryPrefix = "x-"
534
535func isCustomQueryValue(qsKey string) bool {
536 return strings.HasPrefix(qsKey, allowedCustomQueryPrefix)
537}
538
539var (
540 md5Pool = sync.Pool{New: func() interface{} { return md5.New() }}
541 sha256Pool = sync.Pool{New: func() interface{} { return sha256.New() }}
542)
543
544func newMd5Hasher() md5simd.Hasher {
545 return &hashWrapper{Hash: md5Pool.Get().(hash.Hash), isMD5: true}
546}
547
548func newSHA256Hasher() md5simd.Hasher {
549 if encrypt.FIPS {
550 return &hashWrapper{Hash: fipssha256.New(), isSHA256: true}
551 }
552 return &hashWrapper{Hash: sha256Pool.Get().(hash.Hash), isSHA256: true}
553}
554
555// hashWrapper implements the md5simd.Hasher interface.
556type hashWrapper struct {
557 hash.Hash
558 isMD5 bool
559 isSHA256 bool
560}
561
562// Close will put the hasher back into the pool.
563func (m *hashWrapper) Close() {
564 if m.isMD5 && m.Hash != nil {
565 m.Reset()
566 md5Pool.Put(m.Hash)
567 }
568 if m.isSHA256 && m.Hash != nil {
569 m.Reset()
570 sha256Pool.Put(m.Hash)
571 }
572 m.Hash = nil
573}
574
575const letterBytes = "abcdefghijklmnopqrstuvwxyz01234569"
576const (
577 letterIdxBits = 6 // 6 bits to represent a letter index
578 letterIdxMask = 1<<letterIdxBits - 1 // All 1-bits, as many as letterIdxBits
579 letterIdxMax = 63 / letterIdxBits // # of letter indices fitting in 63 bits
580)
581
582// randString generates random names and prepends them with a known prefix.
583func randString(n int, src rand.Source, prefix string) string {
584 b := make([]byte, n)
585 // A rand.Int63() generates 63 random bits, enough for letterIdxMax letters!
586 for i, cache, remain := n-1, src.Int63(), letterIdxMax; i >= 0; {
587 if remain == 0 {
588 cache, remain = src.Int63(), letterIdxMax
589 }
590 if idx := int(cache & letterIdxMask); idx < len(letterBytes) {
591 b[i] = letterBytes[idx]
592 i--
593 }
594 cache >>= letterIdxBits
595 remain--
596 }
597 return prefix + string(b[0:30-len(prefix)])
598}
599
600// IsNetworkOrHostDown - if there was a network error or if the host is down.
601// expectTimeouts indicates that *context* timeouts are expected and does not
602// indicate a downed host. Other timeouts still returns down.
603func IsNetworkOrHostDown(err error, expectTimeouts bool) bool {
604 if err == nil {
605 return false
606 }
607
608 if errors.Is(err, context.Canceled) {
609 return false
610 }
611
612 if expectTimeouts && errors.Is(err, context.DeadlineExceeded) {
613 return false
614 }
615
616 if errors.Is(err, context.DeadlineExceeded) {
617 return true
618 }
619
620 // We need to figure if the error either a timeout
621 // or a non-temporary error.
622 urlErr := &url.Error{}
623 if errors.As(err, &urlErr) {
624 switch urlErr.Err.(type) {
625 case *net.DNSError, *net.OpError, net.UnknownNetworkError:
626 return true
627 }
628 }
629 var e net.Error
630 if errors.As(err, &e) {
631 if e.Timeout() {
632 return true
633 }
634 }
635
636 // Fallback to other mechanisms.
637 switch {
638 case strings.Contains(err.Error(), "Connection closed by foreign host"):
639 return true
640 case strings.Contains(err.Error(), "TLS handshake timeout"):
641 // If error is - tlsHandshakeTimeoutError.
642 return true
643 case strings.Contains(err.Error(), "i/o timeout"):
644 // If error is - tcp timeoutError.
645 return true
646 case strings.Contains(err.Error(), "connection timed out"):
647 // If err is a net.Dial timeout.
648 return true
649 case strings.Contains(err.Error(), "connection refused"):
650 // If err is connection refused
651 return true
652
653 case strings.Contains(strings.ToLower(err.Error()), "503 service unavailable"):
654 // Denial errors
655 return true
656 }
657 return false
658}
659
660// newHashReaderWrapper will hash all reads done through r.
661// When r returns io.EOF the done function will be called with the sum.
662func newHashReaderWrapper(r io.Reader, h hash.Hash, done func(hash []byte)) *hashReaderWrapper {
663 return &hashReaderWrapper{
664 r: r,
665 h: h,
666 done: done,
667 }
668}
669
670type hashReaderWrapper struct {
671 r io.Reader
672 h hash.Hash
673 done func(hash []byte)
674}
675
676// Read implements the io.Reader interface.
677func (h *hashReaderWrapper) Read(p []byte) (n int, err error) {
678 n, err = h.r.Read(p)
679 if n > 0 {
680 n2, err := h.h.Write(p[:n])
681 if err != nil {
682 return 0, err
683 }
684 if n2 != n {
685 return 0, io.ErrShortWrite
686 }
687 }
688 if err == io.EOF {
689 // Call back
690 h.done(h.h.Sum(nil))
691 }
692 return n, err
693}
diff --git a/vendor/github.com/minio/sha256-simd/.gitignore b/vendor/github.com/minio/sha256-simd/.gitignore
new file mode 100644
index 0000000..c56069f
--- /dev/null
+++ b/vendor/github.com/minio/sha256-simd/.gitignore
@@ -0,0 +1 @@
*.test \ No newline at end of file
diff --git a/vendor/github.com/minio/sha256-simd/LICENSE b/vendor/github.com/minio/sha256-simd/LICENSE
new file mode 100644
index 0000000..d645695
--- /dev/null
+++ b/vendor/github.com/minio/sha256-simd/LICENSE
@@ -0,0 +1,202 @@
1
2 Apache License
3 Version 2.0, January 2004
4 http://www.apache.org/licenses/
5
6 TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
7
8 1. Definitions.
9
10 "License" shall mean the terms and conditions for use, reproduction,
11 and distribution as defined by Sections 1 through 9 of this document.
12
13 "Licensor" shall mean the copyright owner or entity authorized by
14 the copyright owner that is granting the License.
15
16 "Legal Entity" shall mean the union of the acting entity and all
17 other entities that control, are controlled by, or are under common
18 control with that entity. For the purposes of this definition,
19 "control" means (i) the power, direct or indirect, to cause the
20 direction or management of such entity, whether by contract or
21 otherwise, or (ii) ownership of fifty percent (50%) or more of the
22 outstanding shares, or (iii) beneficial ownership of such entity.
23
24 "You" (or "Your") shall mean an individual or Legal Entity
25 exercising permissions granted by this License.
26
27 "Source" form shall mean the preferred form for making modifications,
28 including but not limited to software source code, documentation
29 source, and configuration files.
30
31 "Object" form shall mean any form resulting from mechanical
32 transformation or translation of a Source form, including but
33 not limited to compiled object code, generated documentation,
34 and conversions to other media types.
35
36 "Work" shall mean the work of authorship, whether in Source or
37 Object form, made available under the License, as indicated by a
38 copyright notice that is included in or attached to the work
39 (an example is provided in the Appendix below).
40
41 "Derivative Works" shall mean any work, whether in Source or Object
42 form, that is based on (or derived from) the Work and for which the
43 editorial revisions, annotations, elaborations, or other modifications
44 represent, as a whole, an original work of authorship. For the purposes
45 of this License, Derivative Works shall not include works that remain
46 separable from, or merely link (or bind by name) to the interfaces of,
47 the Work and Derivative Works thereof.
48
49 "Contribution" shall mean any work of authorship, including
50 the original version of the Work and any modifications or additions
51 to that Work or Derivative Works thereof, that is intentionally
52 submitted to Licensor for inclusion in the Work by the copyright owner
53 or by an individual or Legal Entity authorized to submit on behalf of
54 the copyright owner. For the purposes of this definition, "submitted"
55 means any form of electronic, verbal, or written communication sent
56 to the Licensor or its representatives, including but not limited to
57 communication on electronic mailing lists, source code control systems,
58 and issue tracking systems that are managed by, or on behalf of, the
59 Licensor for the purpose of discussing and improving the Work, but
60 excluding communication that is conspicuously marked or otherwise
61 designated in writing by the copyright owner as "Not a Contribution."
62
63 "Contributor" shall mean Licensor and any individual or Legal Entity
64 on behalf of whom a Contribution has been received by Licensor and
65 subsequently incorporated within the Work.
66
67 2. Grant of Copyright License. Subject to the terms and conditions of
68 this License, each Contributor hereby grants to You a perpetual,
69 worldwide, non-exclusive, no-charge, royalty-free, irrevocable
70 copyright license to reproduce, prepare Derivative Works of,
71 publicly display, publicly perform, sublicense, and distribute the
72 Work and such Derivative Works in Source or Object form.
73
74 3. Grant of Patent License. Subject to the terms and conditions of
75 this License, each Contributor hereby grants to You a perpetual,
76 worldwide, non-exclusive, no-charge, royalty-free, irrevocable
77 (except as stated in this section) patent license to make, have made,
78 use, offer to sell, sell, import, and otherwise transfer the Work,
79 where such license applies only to those patent claims licensable
80 by such Contributor that are necessarily infringed by their
81 Contribution(s) alone or by combination of their Contribution(s)
82 with the Work to which such Contribution(s) was submitted. If You
83 institute patent litigation against any entity (including a
84 cross-claim or counterclaim in a lawsuit) alleging that the Work
85 or a Contribution incorporated within the Work constitutes direct
86 or contributory patent infringement, then any patent licenses
87 granted to You under this License for that Work shall terminate
88 as of the date such litigation is filed.
89
90 4. Redistribution. You may reproduce and distribute copies of the
91 Work or Derivative Works thereof in any medium, with or without
92 modifications, and in Source or Object form, provided that You
93 meet the following conditions:
94
95 (a) You must give any other recipients of the Work or
96 Derivative Works a copy of this License; and
97
98 (b) You must cause any modified files to carry prominent notices
99 stating that You changed the files; and
100
101 (c) You must retain, in the Source form of any Derivative Works
102 that You distribute, all copyright, patent, trademark, and
103 attribution notices from the Source form of the Work,
104 excluding those notices that do not pertain to any part of
105 the Derivative Works; and
106
107 (d) If the Work includes a "NOTICE" text file as part of its
108 distribution, then any Derivative Works that You distribute must
109 include a readable copy of the attribution notices contained
110 within such NOTICE file, excluding those notices that do not
111 pertain to any part of the Derivative Works, in at least one
112 of the following places: within a NOTICE text file distributed
113 as part of the Derivative Works; within the Source form or
114 documentation, if provided along with the Derivative Works; or,
115 within a display generated by the Derivative Works, if and
116 wherever such third-party notices normally appear. The contents
117 of the NOTICE file are for informational purposes only and
118 do not modify the License. You may add Your own attribution
119 notices within Derivative Works that You distribute, alongside
120 or as an addendum to the NOTICE text from the Work, provided
121 that such additional attribution notices cannot be construed
122 as modifying the License.
123
124 You may add Your own copyright statement to Your modifications and
125 may provide additional or different license terms and conditions
126 for use, reproduction, or distribution of Your modifications, or
127 for any such Derivative Works as a whole, provided Your use,
128 reproduction, and distribution of the Work otherwise complies with
129 the conditions stated in this License.
130
131 5. Submission of Contributions. Unless You explicitly state otherwise,
132 any Contribution intentionally submitted for inclusion in the Work
133 by You to the Licensor shall be under the terms and conditions of
134 this License, without any additional terms or conditions.
135 Notwithstanding the above, nothing herein shall supersede or modify
136 the terms of any separate license agreement you may have executed
137 with Licensor regarding such Contributions.
138
139 6. Trademarks. This License does not grant permission to use the trade
140 names, trademarks, service marks, or product names of the Licensor,
141 except as required for reasonable and customary use in describing the
142 origin of the Work and reproducing the content of the NOTICE file.
143
144 7. Disclaimer of Warranty. Unless required by applicable law or
145 agreed to in writing, Licensor provides the Work (and each
146 Contributor provides its Contributions) on an "AS IS" BASIS,
147 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
148 implied, including, without limitation, any warranties or conditions
149 of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
150 PARTICULAR PURPOSE. You are solely responsible for determining the
151 appropriateness of using or redistributing the Work and assume any
152 risks associated with Your exercise of permissions under this License.
153
154 8. Limitation of Liability. In no event and under no legal theory,
155 whether in tort (including negligence), contract, or otherwise,
156 unless required by applicable law (such as deliberate and grossly
157 negligent acts) or agreed to in writing, shall any Contributor be
158 liable to You for damages, including any direct, indirect, special,
159 incidental, or consequential damages of any character arising as a
160 result of this License or out of the use or inability to use the
161 Work (including but not limited to damages for loss of goodwill,
162 work stoppage, computer failure or malfunction, or any and all
163 other commercial damages or losses), even if such Contributor
164 has been advised of the possibility of such damages.
165
166 9. Accepting Warranty or Additional Liability. While redistributing
167 the Work or Derivative Works thereof, You may choose to offer,
168 and charge a fee for, acceptance of support, warranty, indemnity,
169 or other liability obligations and/or rights consistent with this
170 License. However, in accepting such obligations, You may act only
171 on Your own behalf and on Your sole responsibility, not on behalf
172 of any other Contributor, and only if You agree to indemnify,
173 defend, and hold each Contributor harmless for any liability
174 incurred by, or claims asserted against, such Contributor by reason
175 of your accepting any such warranty or additional liability.
176
177 END OF TERMS AND CONDITIONS
178
179 APPENDIX: How to apply the Apache License to your work.
180
181 To apply the Apache License to your work, attach the following
182 boilerplate notice, with the fields enclosed by brackets "[]"
183 replaced with your own identifying information. (Don't include
184 the brackets!) The text should be enclosed in the appropriate
185 comment syntax for the file format. We also recommend that a
186 file or class name and description of purpose be included on the
187 same "printed page" as the copyright notice for easier
188 identification within third-party archives.
189
190 Copyright [yyyy] [name of copyright owner]
191
192 Licensed under the Apache License, Version 2.0 (the "License");
193 you may not use this file except in compliance with the License.
194 You may obtain a copy of the License at
195
196 http://www.apache.org/licenses/LICENSE-2.0
197
198 Unless required by applicable law or agreed to in writing, software
199 distributed under the License is distributed on an "AS IS" BASIS,
200 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
201 See the License for the specific language governing permissions and
202 limitations under the License.
diff --git a/vendor/github.com/minio/sha256-simd/README.md b/vendor/github.com/minio/sha256-simd/README.md
new file mode 100644
index 0000000..6117488
--- /dev/null
+++ b/vendor/github.com/minio/sha256-simd/README.md
@@ -0,0 +1,137 @@
1# sha256-simd
2
3Accelerate SHA256 computations in pure Go using AVX512, SHA Extensions for x86 and ARM64 for ARM.
4On AVX512 it provides an up to 8x improvement (over 3 GB/s per core).
5SHA Extensions give a performance boost of close to 4x over native.
6
7## Introduction
8
9This package is designed as a replacement for `crypto/sha256`.
10For ARM CPUs with the Cryptography Extensions, advantage is taken of the SHA2 instructions resulting in a massive performance improvement.
11
12This package uses Golang assembly.
13The AVX512 version is based on the Intel's "multi-buffer crypto library for IPSec" whereas the other Intel implementations are described in "Fast SHA-256 Implementations on Intel Architecture Processors" by J. Guilford et al.
14
15## Support for Intel SHA Extensions
16
17Support for the Intel SHA Extensions has been added by Kristofer Peterson (@svenski123), originally developed for spacemeshos [here](https://github.com/spacemeshos/POET/issues/23). On CPUs that support it (known thus far Intel Celeron J3455 and AMD Ryzen) it gives a significant boost in performance (with thanks to @AudriusButkevicius for reporting the results; full results [here](https://github.com/minio/sha256-simd/pull/37#issuecomment-451607827)).
18
19```
20$ benchcmp avx2.txt sha-ext.txt
21benchmark AVX2 MB/s SHA Ext MB/s speedup
22BenchmarkHash5M 514.40 1975.17 3.84x
23```
24
25Thanks to Kristofer Peterson, we also added additional performance changes such as optimized padding,
26endian conversions which sped up all implementations i.e. Intel SHA alone while doubled performance for small sizes,
27the other changes increased everything roughly 50%.
28
29## Support for AVX512
30
31We have added support for AVX512 which results in an up to 8x performance improvement over AVX2 (3.0 GHz Xeon Platinum 8124M CPU):
32
33```
34$ benchcmp avx2.txt avx512.txt
35benchmark AVX2 MB/s AVX512 MB/s speedup
36BenchmarkHash5M 448.62 3498.20 7.80x
37```
38
39The original code was developed by Intel as part of the [multi-buffer crypto library](https://github.com/intel/intel-ipsec-mb) for IPSec or more specifically this [AVX512](https://github.com/intel/intel-ipsec-mb/blob/master/avx512/sha256_x16_avx512.asm) implementation. The key idea behind it is to process a total of 16 checksums in parallel by “transposing” 16 (independent) messages of 64 bytes between a total of 16 ZMM registers (each 64 bytes wide).
40
41Transposing the input messages means that in order to take full advantage of the speedup you need to have a (server) workload where multiple threads are doing SHA256 calculations in parallel. Unfortunately for this algorithm it is not possible for two message blocks processed in parallel to be dependent on one another — because then the (interim) result of the first part of the message has to be an input into the processing of the second part of the message.
42
43Whereas the original Intel C implementation requires some sort of explicit scheduling of messages to be processed in parallel, for Golang it makes sense to take advantage of channels in order to group messages together and use channels as well for sending back the results (thereby effectively decoupling the calculations). We have implemented a fairly simple scheduling mechanism that seems to work well in practice.
44
45Due to this different way of scheduling, we decided to use an explicit method to instantiate the AVX512 version. Essentially one or more AVX512 processing servers ([`Avx512Server`](https://github.com/minio/sha256-simd/blob/master/sha256blockAvx512_amd64.go#L294)) have to be created whereby each server can hash over 3 GB/s on a single core. An `hash.Hash` object ([`Avx512Digest`](https://github.com/minio/sha256-simd/blob/master/sha256blockAvx512_amd64.go#L45)) is then instantiated using one of these servers and used in the regular fashion:
46
47```go
48import "github.com/minio/sha256-simd"
49
50func main() {
51 server := sha256.NewAvx512Server()
52 h512 := sha256.NewAvx512(server)
53 h512.Write(fileBlock)
54 digest := h512.Sum([]byte{})
55}
56```
57
58Note that, because of the scheduling overhead, for small messages (< 1 MB) you will be better off using the regular SHA256 hashing (but those are typically not performance critical anyway). Some other tips to get the best performance:
59* Have many go routines doing SHA256 calculations in parallel.
60* Try to Write() messages in multiples of 64 bytes.
61* Try to keep the overall length of messages to a roughly similar size ie. 5 MB (this way all 16 ‘lanes’ in the AVX512 computations are contributing as much as possible).
62
63More detailed information can be found in this [blog](https://blog.minio.io/accelerate-sha256-up-to-8x-over-3-gb-s-per-core-with-avx512-a0b1d64f78f) post including scaling across cores.
64
65## Drop-In Replacement
66
67The following code snippet shows how you can use `github.com/minio/sha256-simd`.
68This will automatically select the fastest method for the architecture on which it will be executed.
69
70```go
71import "github.com/minio/sha256-simd"
72
73func main() {
74 ...
75 shaWriter := sha256.New()
76 io.Copy(shaWriter, file)
77 ...
78}
79```
80
81## Performance
82
83Below is the speed in MB/s for a single core (ranked fast to slow) for blocks larger than 1 MB.
84
85| Processor | SIMD | Speed (MB/s) |
86| --------------------------------- | ------- | ------------:|
87| 3.0 GHz Intel Xeon Platinum 8124M | AVX512 | 3498 |
88| 3.7 GHz AMD Ryzen 7 2700X | SHA Ext | 1979 |
89| 1.2 GHz ARM Cortex-A53 | ARM64 | 638 |
90
91## asm2plan9s
92
93In order to be able to work more easily with AVX512/AVX2 instructions, a separate tool was developed to convert SIMD instructions into the corresponding BYTE sequence as accepted by Go assembly. See [asm2plan9s](https://github.com/minio/asm2plan9s) for more information.
94
95## Why and benefits
96
97One of the most performance sensitive parts of the [Minio](https://github.com/minio/minio) object storage server is related to SHA256 hash sums calculations. For instance during multi part uploads each part that is uploaded needs to be verified for data integrity by the server.
98
99Other applications that can benefit from enhanced SHA256 performance are deduplication in storage systems, intrusion detection, version control systems, integrity checking, etc.
100
101## ARM SHA Extensions
102
103The 64-bit ARMv8 core has introduced new instructions for SHA1 and SHA2 acceleration as part of the [Cryptography Extensions](http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.ddi0501f/CHDFJBCJ.html). Below you can see a small excerpt highlighting one of the rounds as is done for the SHA256 calculation process (for full code see [sha256block_arm64.s](https://github.com/minio/sha256-simd/blob/master/sha256block_arm64.s)).
104
105 ```
106 sha256h q2, q3, v9.4s
107 sha256h2 q3, q4, v9.4s
108 sha256su0 v5.4s, v6.4s
109 rev32 v8.16b, v8.16b
110 add v9.4s, v7.4s, v18.4s
111 mov v4.16b, v2.16b
112 sha256h q2, q3, v10.4s
113 sha256h2 q3, q4, v10.4s
114 sha256su0 v6.4s, v7.4s
115 sha256su1 v5.4s, v7.4s, v8.4s
116 ```
117
118### Detailed benchmarks
119
120Benchmarks generated on a 1.2 Ghz Quad-Core ARM Cortex A53 equipped [Pine64](https://www.pine64.com/).
121
122```
123minio@minio-arm:$ benchcmp golang.txt arm64.txt
124benchmark golang arm64 speedup
125BenchmarkHash8Bytes-4 0.68 MB/s 5.70 MB/s 8.38x
126BenchmarkHash1K-4 5.65 MB/s 326.30 MB/s 57.75x
127BenchmarkHash8K-4 6.00 MB/s 570.63 MB/s 95.11x
128BenchmarkHash1M-4 6.05 MB/s 638.23 MB/s 105.49x
129```
130
131## License
132
133Released under the Apache License v2.0. You can find the complete text in the file LICENSE.
134
135## Contributing
136
137Contributions are welcome, please send PRs for any enhancements.
diff --git a/vendor/github.com/minio/sha256-simd/cpuid_other.go b/vendor/github.com/minio/sha256-simd/cpuid_other.go
new file mode 100644
index 0000000..97af6a1
--- /dev/null
+++ b/vendor/github.com/minio/sha256-simd/cpuid_other.go
@@ -0,0 +1,50 @@
1// Minio Cloud Storage, (C) 2021 Minio, Inc.
2//
3// Licensed under the Apache License, Version 2.0 (the "License");
4// you may not use this file except in compliance with the License.
5// You may obtain a copy of the License at
6//
7// http://www.apache.org/licenses/LICENSE-2.0
8//
9// Unless required by applicable law or agreed to in writing, software
10// distributed under the License is distributed on an "AS IS" BASIS,
11// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12// See the License for the specific language governing permissions and
13// limitations under the License.
14//
15
16package sha256
17
18import (
19 "bytes"
20 "io/ioutil"
21 "runtime"
22
23 "github.com/klauspost/cpuid/v2"
24)
25
26var (
27 hasIntelSha = runtime.GOARCH == "amd64" && cpuid.CPU.Supports(cpuid.SHA, cpuid.SSSE3, cpuid.SSE4)
28 hasAvx512 = cpuid.CPU.Supports(cpuid.AVX512F, cpuid.AVX512DQ, cpuid.AVX512BW, cpuid.AVX512VL)
29)
30
31func hasArmSha2() bool {
32 if cpuid.CPU.Has(cpuid.SHA2) {
33 return true
34 }
35 if runtime.GOARCH != "arm64" || runtime.GOOS != "linux" {
36 return false
37 }
38
39 // Fall back to hacky cpuinfo parsing...
40 const procCPUInfo = "/proc/cpuinfo"
41
42 // Feature to check for.
43 const sha256Feature = "sha2"
44
45 cpuInfo, err := ioutil.ReadFile(procCPUInfo)
46 if err != nil {
47 return false
48 }
49 return bytes.Contains(cpuInfo, []byte(sha256Feature))
50}
diff --git a/vendor/github.com/minio/sha256-simd/sha256.go b/vendor/github.com/minio/sha256-simd/sha256.go
new file mode 100644
index 0000000..f146bbd
--- /dev/null
+++ b/vendor/github.com/minio/sha256-simd/sha256.go
@@ -0,0 +1,468 @@
1/*
2 * Minio Cloud Storage, (C) 2016 Minio, Inc.
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17package sha256
18
19import (
20 "crypto/sha256"
21 "encoding/binary"
22 "errors"
23 "hash"
24)
25
26// Size - The size of a SHA256 checksum in bytes.
27const Size = 32
28
29// BlockSize - The blocksize of SHA256 in bytes.
30const BlockSize = 64
31
32const (
33 chunk = BlockSize
34 init0 = 0x6A09E667
35 init1 = 0xBB67AE85
36 init2 = 0x3C6EF372
37 init3 = 0xA54FF53A
38 init4 = 0x510E527F
39 init5 = 0x9B05688C
40 init6 = 0x1F83D9AB
41 init7 = 0x5BE0CD19
42)
43
44// digest represents the partial evaluation of a checksum.
45type digest struct {
46 h [8]uint32
47 x [chunk]byte
48 nx int
49 len uint64
50}
51
52// Reset digest back to default
53func (d *digest) Reset() {
54 d.h[0] = init0
55 d.h[1] = init1
56 d.h[2] = init2
57 d.h[3] = init3
58 d.h[4] = init4
59 d.h[5] = init5
60 d.h[6] = init6
61 d.h[7] = init7
62 d.nx = 0
63 d.len = 0
64}
65
66type blockfuncType int
67
68const (
69 blockfuncStdlib blockfuncType = iota
70 blockfuncIntelSha
71 blockfuncArmSha2
72 blockfuncForceGeneric = -1
73)
74
75var blockfunc blockfuncType
76
77func init() {
78 switch {
79 case hasIntelSha:
80 blockfunc = blockfuncIntelSha
81 case hasArmSha2():
82 blockfunc = blockfuncArmSha2
83 }
84}
85
86// New returns a new hash.Hash computing the SHA256 checksum.
87func New() hash.Hash {
88 if blockfunc == blockfuncStdlib {
89 // Fallback to the standard golang implementation
90 // if no features were found.
91 return sha256.New()
92 }
93
94 d := new(digest)
95 d.Reset()
96 return d
97}
98
99// Sum256 - single caller sha256 helper
100func Sum256(data []byte) (result [Size]byte) {
101 var d digest
102 d.Reset()
103 d.Write(data)
104 result = d.checkSum()
105 return
106}
107
108// Return size of checksum
109func (d *digest) Size() int { return Size }
110
111// Return blocksize of checksum
112func (d *digest) BlockSize() int { return BlockSize }
113
114// Write to digest
115func (d *digest) Write(p []byte) (nn int, err error) {
116 nn = len(p)
117 d.len += uint64(nn)
118 if d.nx > 0 {
119 n := copy(d.x[d.nx:], p)
120 d.nx += n
121 if d.nx == chunk {
122 block(d, d.x[:])
123 d.nx = 0
124 }
125 p = p[n:]
126 }
127 if len(p) >= chunk {
128 n := len(p) &^ (chunk - 1)
129 block(d, p[:n])
130 p = p[n:]
131 }
132 if len(p) > 0 {
133 d.nx = copy(d.x[:], p)
134 }
135 return
136}
137
138// Return sha256 sum in bytes
139func (d *digest) Sum(in []byte) []byte {
140 // Make a copy of d0 so that caller can keep writing and summing.
141 d0 := *d
142 hash := d0.checkSum()
143 return append(in, hash[:]...)
144}
145
146// Intermediate checksum function
147func (d *digest) checkSum() (digest [Size]byte) {
148 n := d.nx
149
150 var k [64]byte
151 copy(k[:], d.x[:n])
152
153 k[n] = 0x80
154
155 if n >= 56 {
156 block(d, k[:])
157
158 // clear block buffer - go compiles this to optimal 1x xorps + 4x movups
159 // unfortunately expressing this more succinctly results in much worse code
160 k[0] = 0
161 k[1] = 0
162 k[2] = 0
163 k[3] = 0
164 k[4] = 0
165 k[5] = 0
166 k[6] = 0
167 k[7] = 0
168 k[8] = 0
169 k[9] = 0
170 k[10] = 0
171 k[11] = 0
172 k[12] = 0
173 k[13] = 0
174 k[14] = 0
175 k[15] = 0
176 k[16] = 0
177 k[17] = 0
178 k[18] = 0
179 k[19] = 0
180 k[20] = 0
181 k[21] = 0
182 k[22] = 0
183 k[23] = 0
184 k[24] = 0
185 k[25] = 0
186 k[26] = 0
187 k[27] = 0
188 k[28] = 0
189 k[29] = 0
190 k[30] = 0
191 k[31] = 0
192 k[32] = 0
193 k[33] = 0
194 k[34] = 0
195 k[35] = 0
196 k[36] = 0
197 k[37] = 0
198 k[38] = 0
199 k[39] = 0
200 k[40] = 0
201 k[41] = 0
202 k[42] = 0
203 k[43] = 0
204 k[44] = 0
205 k[45] = 0
206 k[46] = 0
207 k[47] = 0
208 k[48] = 0
209 k[49] = 0
210 k[50] = 0
211 k[51] = 0
212 k[52] = 0
213 k[53] = 0
214 k[54] = 0
215 k[55] = 0
216 k[56] = 0
217 k[57] = 0
218 k[58] = 0
219 k[59] = 0
220 k[60] = 0
221 k[61] = 0
222 k[62] = 0
223 k[63] = 0
224 }
225 binary.BigEndian.PutUint64(k[56:64], uint64(d.len)<<3)
226 block(d, k[:])
227
228 {
229 const i = 0
230 binary.BigEndian.PutUint32(digest[i*4:i*4+4], d.h[i])
231 }
232 {
233 const i = 1
234 binary.BigEndian.PutUint32(digest[i*4:i*4+4], d.h[i])
235 }
236 {
237 const i = 2
238 binary.BigEndian.PutUint32(digest[i*4:i*4+4], d.h[i])
239 }
240 {
241 const i = 3
242 binary.BigEndian.PutUint32(digest[i*4:i*4+4], d.h[i])
243 }
244 {
245 const i = 4
246 binary.BigEndian.PutUint32(digest[i*4:i*4+4], d.h[i])
247 }
248 {
249 const i = 5
250 binary.BigEndian.PutUint32(digest[i*4:i*4+4], d.h[i])
251 }
252 {
253 const i = 6
254 binary.BigEndian.PutUint32(digest[i*4:i*4+4], d.h[i])
255 }
256 {
257 const i = 7
258 binary.BigEndian.PutUint32(digest[i*4:i*4+4], d.h[i])
259 }
260
261 return
262}
263
264func block(dig *digest, p []byte) {
265 if blockfunc == blockfuncIntelSha {
266 blockIntelShaGo(dig, p)
267 } else if blockfunc == blockfuncArmSha2 {
268 blockArmSha2Go(dig, p)
269 } else {
270 blockGeneric(dig, p)
271 }
272}
273
274func blockGeneric(dig *digest, p []byte) {
275 var w [64]uint32
276 h0, h1, h2, h3, h4, h5, h6, h7 := dig.h[0], dig.h[1], dig.h[2], dig.h[3], dig.h[4], dig.h[5], dig.h[6], dig.h[7]
277 for len(p) >= chunk {
278 // Can interlace the computation of w with the
279 // rounds below if needed for speed.
280 for i := 0; i < 16; i++ {
281 j := i * 4
282 w[i] = uint32(p[j])<<24 | uint32(p[j+1])<<16 | uint32(p[j+2])<<8 | uint32(p[j+3])
283 }
284 for i := 16; i < 64; i++ {
285 v1 := w[i-2]
286 t1 := (v1>>17 | v1<<(32-17)) ^ (v1>>19 | v1<<(32-19)) ^ (v1 >> 10)
287 v2 := w[i-15]
288 t2 := (v2>>7 | v2<<(32-7)) ^ (v2>>18 | v2<<(32-18)) ^ (v2 >> 3)
289 w[i] = t1 + w[i-7] + t2 + w[i-16]
290 }
291
292 a, b, c, d, e, f, g, h := h0, h1, h2, h3, h4, h5, h6, h7
293
294 for i := 0; i < 64; i++ {
295 t1 := h + ((e>>6 | e<<(32-6)) ^ (e>>11 | e<<(32-11)) ^ (e>>25 | e<<(32-25))) + ((e & f) ^ (^e & g)) + _K[i] + w[i]
296
297 t2 := ((a>>2 | a<<(32-2)) ^ (a>>13 | a<<(32-13)) ^ (a>>22 | a<<(32-22))) + ((a & b) ^ (a & c) ^ (b & c))
298
299 h = g
300 g = f
301 f = e
302 e = d + t1
303 d = c
304 c = b
305 b = a
306 a = t1 + t2
307 }
308
309 h0 += a
310 h1 += b
311 h2 += c
312 h3 += d
313 h4 += e
314 h5 += f
315 h6 += g
316 h7 += h
317
318 p = p[chunk:]
319 }
320
321 dig.h[0], dig.h[1], dig.h[2], dig.h[3], dig.h[4], dig.h[5], dig.h[6], dig.h[7] = h0, h1, h2, h3, h4, h5, h6, h7
322}
323
324var _K = []uint32{
325 0x428a2f98,
326 0x71374491,
327 0xb5c0fbcf,
328 0xe9b5dba5,
329 0x3956c25b,
330 0x59f111f1,
331 0x923f82a4,
332 0xab1c5ed5,
333 0xd807aa98,
334 0x12835b01,
335 0x243185be,
336 0x550c7dc3,
337 0x72be5d74,
338 0x80deb1fe,
339 0x9bdc06a7,
340 0xc19bf174,
341 0xe49b69c1,
342 0xefbe4786,
343 0x0fc19dc6,
344 0x240ca1cc,
345 0x2de92c6f,
346 0x4a7484aa,
347 0x5cb0a9dc,
348 0x76f988da,
349 0x983e5152,
350 0xa831c66d,
351 0xb00327c8,
352 0xbf597fc7,
353 0xc6e00bf3,
354 0xd5a79147,
355 0x06ca6351,
356 0x14292967,
357 0x27b70a85,
358 0x2e1b2138,
359 0x4d2c6dfc,
360 0x53380d13,
361 0x650a7354,
362 0x766a0abb,
363 0x81c2c92e,
364 0x92722c85,
365 0xa2bfe8a1,
366 0xa81a664b,
367 0xc24b8b70,
368 0xc76c51a3,
369 0xd192e819,
370 0xd6990624,
371 0xf40e3585,
372 0x106aa070,
373 0x19a4c116,
374 0x1e376c08,
375 0x2748774c,
376 0x34b0bcb5,
377 0x391c0cb3,
378 0x4ed8aa4a,
379 0x5b9cca4f,
380 0x682e6ff3,
381 0x748f82ee,
382 0x78a5636f,
383 0x84c87814,
384 0x8cc70208,
385 0x90befffa,
386 0xa4506ceb,
387 0xbef9a3f7,
388 0xc67178f2,
389}
390
391const (
392 magic256 = "sha\x03"
393 marshaledSize = len(magic256) + 8*4 + chunk + 8
394)
395
396func (d *digest) MarshalBinary() ([]byte, error) {
397 b := make([]byte, 0, marshaledSize)
398 b = append(b, magic256...)
399 b = appendUint32(b, d.h[0])
400 b = appendUint32(b, d.h[1])
401 b = appendUint32(b, d.h[2])
402 b = appendUint32(b, d.h[3])
403 b = appendUint32(b, d.h[4])
404 b = appendUint32(b, d.h[5])
405 b = appendUint32(b, d.h[6])
406 b = appendUint32(b, d.h[7])
407 b = append(b, d.x[:d.nx]...)
408 b = b[:len(b)+len(d.x)-d.nx] // already zero
409 b = appendUint64(b, d.len)
410 return b, nil
411}
412
413func (d *digest) UnmarshalBinary(b []byte) error {
414 if len(b) < len(magic256) || string(b[:len(magic256)]) != magic256 {
415 return errors.New("crypto/sha256: invalid hash state identifier")
416 }
417 if len(b) != marshaledSize {
418 return errors.New("crypto/sha256: invalid hash state size")
419 }
420 b = b[len(magic256):]
421 b, d.h[0] = consumeUint32(b)
422 b, d.h[1] = consumeUint32(b)
423 b, d.h[2] = consumeUint32(b)
424 b, d.h[3] = consumeUint32(b)
425 b, d.h[4] = consumeUint32(b)
426 b, d.h[5] = consumeUint32(b)
427 b, d.h[6] = consumeUint32(b)
428 b, d.h[7] = consumeUint32(b)
429 b = b[copy(d.x[:], b):]
430 b, d.len = consumeUint64(b)
431 d.nx = int(d.len % chunk)
432 return nil
433}
434
435func appendUint32(b []byte, v uint32) []byte {
436 return append(b,
437 byte(v>>24),
438 byte(v>>16),
439 byte(v>>8),
440 byte(v),
441 )
442}
443
444func appendUint64(b []byte, v uint64) []byte {
445 return append(b,
446 byte(v>>56),
447 byte(v>>48),
448 byte(v>>40),
449 byte(v>>32),
450 byte(v>>24),
451 byte(v>>16),
452 byte(v>>8),
453 byte(v),
454 )
455}
456
457func consumeUint64(b []byte) ([]byte, uint64) {
458 _ = b[7]
459 x := uint64(b[7]) | uint64(b[6])<<8 | uint64(b[5])<<16 | uint64(b[4])<<24 |
460 uint64(b[3])<<32 | uint64(b[2])<<40 | uint64(b[1])<<48 | uint64(b[0])<<56
461 return b[8:], x
462}
463
464func consumeUint32(b []byte) ([]byte, uint32) {
465 _ = b[3]
466 x := uint32(b[3]) | uint32(b[2])<<8 | uint32(b[1])<<16 | uint32(b[0])<<24
467 return b[4:], x
468}
diff --git a/vendor/github.com/minio/sha256-simd/sha256blockAvx512_amd64.asm b/vendor/github.com/minio/sha256-simd/sha256blockAvx512_amd64.asm
new file mode 100644
index 0000000..c959b1a
--- /dev/null
+++ b/vendor/github.com/minio/sha256-simd/sha256blockAvx512_amd64.asm
@@ -0,0 +1,686 @@
1
2// 16x Parallel implementation of SHA256 for AVX512
3
4//
5// Minio Cloud Storage, (C) 2017 Minio, Inc.
6//
7// Licensed under the Apache License, Version 2.0 (the "License");
8// you may not use this file except in compliance with the License.
9// You may obtain a copy of the License at
10//
11// http://www.apache.org/licenses/LICENSE-2.0
12//
13// Unless required by applicable law or agreed to in writing, software
14// distributed under the License is distributed on an "AS IS" BASIS,
15// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16// See the License for the specific language governing permissions and
17// limitations under the License.
18
19//
20// This code is based on the Intel Multi-Buffer Crypto for IPSec library
21// and more specifically the following implementation:
22// https://github.com/intel/intel-ipsec-mb/blob/master/avx512/sha256_x16_avx512.asm
23//
24// For Golang it has been converted into Plan 9 assembly with the help of
25// github.com/minio/asm2plan9s to assemble the AVX512 instructions
26//
27
28// Copyright (c) 2017, Intel Corporation
29//
30// Redistribution and use in source and binary forms, with or without
31// modification, are permitted provided that the following conditions are met:
32//
33// * Redistributions of source code must retain the above copyright notice,
34// this list of conditions and the following disclaimer.
35// * Redistributions in binary form must reproduce the above copyright
36// notice, this list of conditions and the following disclaimer in the
37// documentation and/or other materials provided with the distribution.
38// * Neither the name of Intel Corporation nor the names of its contributors
39// may be used to endorse or promote products derived from this software
40// without specific prior written permission.
41//
42// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
43// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
44// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
45// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
46// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
47// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
48// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
49// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
50// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
51// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
52
53#define SHA256_DIGEST_ROW_SIZE 64
54
55// arg1
56#define STATE rdi
57#define STATE_P9 DI
58// arg2
59#define INP_SIZE rsi
60#define INP_SIZE_P9 SI
61
62#define IDX rcx
63#define TBL rdx
64#define TBL_P9 DX
65
66#define INPUT rax
67#define INPUT_P9 AX
68
69#define inp0 r9
70#define SCRATCH_P9 R12
71#define SCRATCH r12
72#define maskp r13
73#define MASKP_P9 R13
74#define mask r14
75#define MASK_P9 R14
76
77#define A zmm0
78#define B zmm1
79#define C zmm2
80#define D zmm3
81#define E zmm4
82#define F zmm5
83#define G zmm6
84#define H zmm7
85#define T1 zmm8
86#define TMP0 zmm9
87#define TMP1 zmm10
88#define TMP2 zmm11
89#define TMP3 zmm12
90#define TMP4 zmm13
91#define TMP5 zmm14
92#define TMP6 zmm15
93
94#define W0 zmm16
95#define W1 zmm17
96#define W2 zmm18
97#define W3 zmm19
98#define W4 zmm20
99#define W5 zmm21
100#define W6 zmm22
101#define W7 zmm23
102#define W8 zmm24
103#define W9 zmm25
104#define W10 zmm26
105#define W11 zmm27
106#define W12 zmm28
107#define W13 zmm29
108#define W14 zmm30
109#define W15 zmm31
110
111
112#define TRANSPOSE16(_r0, _r1, _r2, _r3, _r4, _r5, _r6, _r7, _r8, _r9, _r10, _r11, _r12, _r13, _r14, _r15, _t0, _t1) \
113 \
114 \ // input r0 = {a15 a14 a13 a12 a11 a10 a9 a8 a7 a6 a5 a4 a3 a2 a1 a0}
115 \ // r1 = {b15 b14 b13 b12 b11 b10 b9 b8 b7 b6 b5 b4 b3 b2 b1 b0}
116 \ // r2 = {c15 c14 c13 c12 c11 c10 c9 c8 c7 c6 c5 c4 c3 c2 c1 c0}
117 \ // r3 = {d15 d14 d13 d12 d11 d10 d9 d8 d7 d6 d5 d4 d3 d2 d1 d0}
118 \ // r4 = {e15 e14 e13 e12 e11 e10 e9 e8 e7 e6 e5 e4 e3 e2 e1 e0}
119 \ // r5 = {f15 f14 f13 f12 f11 f10 f9 f8 f7 f6 f5 f4 f3 f2 f1 f0}
120 \ // r6 = {g15 g14 g13 g12 g11 g10 g9 g8 g7 g6 g5 g4 g3 g2 g1 g0}
121 \ // r7 = {h15 h14 h13 h12 h11 h10 h9 h8 h7 h6 h5 h4 h3 h2 h1 h0}
122 \ // r8 = {i15 i14 i13 i12 i11 i10 i9 i8 i7 i6 i5 i4 i3 i2 i1 i0}
123 \ // r9 = {j15 j14 j13 j12 j11 j10 j9 j8 j7 j6 j5 j4 j3 j2 j1 j0}
124 \ // r10 = {k15 k14 k13 k12 k11 k10 k9 k8 k7 k6 k5 k4 k3 k2 k1 k0}
125 \ // r11 = {l15 l14 l13 l12 l11 l10 l9 l8 l7 l6 l5 l4 l3 l2 l1 l0}
126 \ // r12 = {m15 m14 m13 m12 m11 m10 m9 m8 m7 m6 m5 m4 m3 m2 m1 m0}
127 \ // r13 = {n15 n14 n13 n12 n11 n10 n9 n8 n7 n6 n5 n4 n3 n2 n1 n0}
128 \ // r14 = {o15 o14 o13 o12 o11 o10 o9 o8 o7 o6 o5 o4 o3 o2 o1 o0}
129 \ // r15 = {p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0}
130 \
131 \ // output r0 = { p0 o0 n0 m0 l0 k0 j0 i0 h0 g0 f0 e0 d0 c0 b0 a0}
132 \ // r1 = { p1 o1 n1 m1 l1 k1 j1 i1 h1 g1 f1 e1 d1 c1 b1 a1}
133 \ // r2 = { p2 o2 n2 m2 l2 k2 j2 i2 h2 g2 f2 e2 d2 c2 b2 a2}
134 \ // r3 = { p3 o3 n3 m3 l3 k3 j3 i3 h3 g3 f3 e3 d3 c3 b3 a3}
135 \ // r4 = { p4 o4 n4 m4 l4 k4 j4 i4 h4 g4 f4 e4 d4 c4 b4 a4}
136 \ // r5 = { p5 o5 n5 m5 l5 k5 j5 i5 h5 g5 f5 e5 d5 c5 b5 a5}
137 \ // r6 = { p6 o6 n6 m6 l6 k6 j6 i6 h6 g6 f6 e6 d6 c6 b6 a6}
138 \ // r7 = { p7 o7 n7 m7 l7 k7 j7 i7 h7 g7 f7 e7 d7 c7 b7 a7}
139 \ // r8 = { p8 o8 n8 m8 l8 k8 j8 i8 h8 g8 f8 e8 d8 c8 b8 a8}
140 \ // r9 = { p9 o9 n9 m9 l9 k9 j9 i9 h9 g9 f9 e9 d9 c9 b9 a9}
141 \ // r10 = {p10 o10 n10 m10 l10 k10 j10 i10 h10 g10 f10 e10 d10 c10 b10 a10}
142 \ // r11 = {p11 o11 n11 m11 l11 k11 j11 i11 h11 g11 f11 e11 d11 c11 b11 a11}
143 \ // r12 = {p12 o12 n12 m12 l12 k12 j12 i12 h12 g12 f12 e12 d12 c12 b12 a12}
144 \ // r13 = {p13 o13 n13 m13 l13 k13 j13 i13 h13 g13 f13 e13 d13 c13 b13 a13}
145 \ // r14 = {p14 o14 n14 m14 l14 k14 j14 i14 h14 g14 f14 e14 d14 c14 b14 a14}
146 \ // r15 = {p15 o15 n15 m15 l15 k15 j15 i15 h15 g15 f15 e15 d15 c15 b15 a15}
147 \
148 \ // process top half
149 vshufps _t0, _r0, _r1, 0x44 \ // t0 = {b13 b12 a13 a12 b9 b8 a9 a8 b5 b4 a5 a4 b1 b0 a1 a0}
150 vshufps _r0, _r0, _r1, 0xEE \ // r0 = {b15 b14 a15 a14 b11 b10 a11 a10 b7 b6 a7 a6 b3 b2 a3 a2}
151 vshufps _t1, _r2, _r3, 0x44 \ // t1 = {d13 d12 c13 c12 d9 d8 c9 c8 d5 d4 c5 c4 d1 d0 c1 c0}
152 vshufps _r2, _r2, _r3, 0xEE \ // r2 = {d15 d14 c15 c14 d11 d10 c11 c10 d7 d6 c7 c6 d3 d2 c3 c2}
153 \
154 vshufps _r3, _t0, _t1, 0xDD \ // r3 = {d13 c13 b13 a13 d9 c9 b9 a9 d5 c5 b5 a5 d1 c1 b1 a1}
155 vshufps _r1, _r0, _r2, 0x88 \ // r1 = {d14 c14 b14 a14 d10 c10 b10 a10 d6 c6 b6 a6 d2 c2 b2 a2}
156 vshufps _r0, _r0, _r2, 0xDD \ // r0 = {d15 c15 b15 a15 d11 c11 b11 a11 d7 c7 b7 a7 d3 c3 b3 a3}
157 vshufps _t0, _t0, _t1, 0x88 \ // t0 = {d12 c12 b12 a12 d8 c8 b8 a8 d4 c4 b4 a4 d0 c0 b0 a0}
158 \
159 \ // use r2 in place of t0
160 vshufps _r2, _r4, _r5, 0x44 \ // r2 = {f13 f12 e13 e12 f9 f8 e9 e8 f5 f4 e5 e4 f1 f0 e1 e0}
161 vshufps _r4, _r4, _r5, 0xEE \ // r4 = {f15 f14 e15 e14 f11 f10 e11 e10 f7 f6 e7 e6 f3 f2 e3 e2}
162 vshufps _t1, _r6, _r7, 0x44 \ // t1 = {h13 h12 g13 g12 h9 h8 g9 g8 h5 h4 g5 g4 h1 h0 g1 g0}
163 vshufps _r6, _r6, _r7, 0xEE \ // r6 = {h15 h14 g15 g14 h11 h10 g11 g10 h7 h6 g7 g6 h3 h2 g3 g2}
164 \
165 vshufps _r7, _r2, _t1, 0xDD \ // r7 = {h13 g13 f13 e13 h9 g9 f9 e9 h5 g5 f5 e5 h1 g1 f1 e1}
166 vshufps _r5, _r4, _r6, 0x88 \ // r5 = {h14 g14 f14 e14 h10 g10 f10 e10 h6 g6 f6 e6 h2 g2 f2 e2}
167 vshufps _r4, _r4, _r6, 0xDD \ // r4 = {h15 g15 f15 e15 h11 g11 f11 e11 h7 g7 f7 e7 h3 g3 f3 e3}
168 vshufps _r2, _r2, _t1, 0x88 \ // r2 = {h12 g12 f12 e12 h8 g8 f8 e8 h4 g4 f4 e4 h0 g0 f0 e0}
169 \
170 \ // use r6 in place of t0
171 vshufps _r6, _r8, _r9, 0x44 \ // r6 = {j13 j12 i13 i12 j9 j8 i9 i8 j5 j4 i5 i4 j1 j0 i1 i0}
172 vshufps _r8, _r8, _r9, 0xEE \ // r8 = {j15 j14 i15 i14 j11 j10 i11 i10 j7 j6 i7 i6 j3 j2 i3 i2}
173 vshufps _t1, _r10, _r11, 0x44 \ // t1 = {l13 l12 k13 k12 l9 l8 k9 k8 l5 l4 k5 k4 l1 l0 k1 k0}
174 vshufps _r10, _r10, _r11, 0xEE \ // r10 = {l15 l14 k15 k14 l11 l10 k11 k10 l7 l6 k7 k6 l3 l2 k3 k2}
175 \
176 vshufps _r11, _r6, _t1, 0xDD \ // r11 = {l13 k13 j13 113 l9 k9 j9 i9 l5 k5 j5 i5 l1 k1 j1 i1}
177 vshufps _r9, _r8, _r10, 0x88 \ // r9 = {l14 k14 j14 114 l10 k10 j10 i10 l6 k6 j6 i6 l2 k2 j2 i2}
178 vshufps _r8, _r8, _r10, 0xDD \ // r8 = {l15 k15 j15 115 l11 k11 j11 i11 l7 k7 j7 i7 l3 k3 j3 i3}
179 vshufps _r6, _r6, _t1, 0x88 \ // r6 = {l12 k12 j12 112 l8 k8 j8 i8 l4 k4 j4 i4 l0 k0 j0 i0}
180 \
181 \ // use r10 in place of t0
182 vshufps _r10, _r12, _r13, 0x44 \ // r10 = {n13 n12 m13 m12 n9 n8 m9 m8 n5 n4 m5 m4 n1 n0 a1 m0}
183 vshufps _r12, _r12, _r13, 0xEE \ // r12 = {n15 n14 m15 m14 n11 n10 m11 m10 n7 n6 m7 m6 n3 n2 a3 m2}
184 vshufps _t1, _r14, _r15, 0x44 \ // t1 = {p13 p12 013 012 p9 p8 09 08 p5 p4 05 04 p1 p0 01 00}
185 vshufps _r14, _r14, _r15, 0xEE \ // r14 = {p15 p14 015 014 p11 p10 011 010 p7 p6 07 06 p3 p2 03 02}
186 \
187 vshufps _r15, _r10, _t1, 0xDD \ // r15 = {p13 013 n13 m13 p9 09 n9 m9 p5 05 n5 m5 p1 01 n1 m1}
188 vshufps _r13, _r12, _r14, 0x88 \ // r13 = {p14 014 n14 m14 p10 010 n10 m10 p6 06 n6 m6 p2 02 n2 m2}
189 vshufps _r12, _r12, _r14, 0xDD \ // r12 = {p15 015 n15 m15 p11 011 n11 m11 p7 07 n7 m7 p3 03 n3 m3}
190 vshufps _r10, _r10, _t1, 0x88 \ // r10 = {p12 012 n12 m12 p8 08 n8 m8 p4 04 n4 m4 p0 00 n0 m0}
191 \
192 \ // At this point, the registers that contain interesting data are:
193 \ // t0, r3, r1, r0, r2, r7, r5, r4, r6, r11, r9, r8, r10, r15, r13, r12
194 \ // Can use t1 and r14 as scratch registers
195 LEAQ PSHUFFLE_TRANSPOSE16_MASK1<>(SB), BX \
196 LEAQ PSHUFFLE_TRANSPOSE16_MASK2<>(SB), R8 \
197 \
198 vmovdqu32 _r14, [rbx] \
199 vpermi2q _r14, _t0, _r2 \ // r14 = {h8 g8 f8 e8 d8 c8 b8 a8 h0 g0 f0 e0 d0 c0 b0 a0}
200 vmovdqu32 _t1, [r8] \
201 vpermi2q _t1, _t0, _r2 \ // t1 = {h12 g12 f12 e12 d12 c12 b12 a12 h4 g4 f4 e4 d4 c4 b4 a4}
202 \
203 vmovdqu32 _r2, [rbx] \
204 vpermi2q _r2, _r3, _r7 \ // r2 = {h9 g9 f9 e9 d9 c9 b9 a9 h1 g1 f1 e1 d1 c1 b1 a1}
205 vmovdqu32 _t0, [r8] \
206 vpermi2q _t0, _r3, _r7 \ // t0 = {h13 g13 f13 e13 d13 c13 b13 a13 h5 g5 f5 e5 d5 c5 b5 a5}
207 \
208 vmovdqu32 _r3, [rbx] \
209 vpermi2q _r3, _r1, _r5 \ // r3 = {h10 g10 f10 e10 d10 c10 b10 a10 h2 g2 f2 e2 d2 c2 b2 a2}
210 vmovdqu32 _r7, [r8] \
211 vpermi2q _r7, _r1, _r5 \ // r7 = {h14 g14 f14 e14 d14 c14 b14 a14 h6 g6 f6 e6 d6 c6 b6 a6}
212 \
213 vmovdqu32 _r1, [rbx] \
214 vpermi2q _r1, _r0, _r4 \ // r1 = {h11 g11 f11 e11 d11 c11 b11 a11 h3 g3 f3 e3 d3 c3 b3 a3}
215 vmovdqu32 _r5, [r8] \
216 vpermi2q _r5, _r0, _r4 \ // r5 = {h15 g15 f15 e15 d15 c15 b15 a15 h7 g7 f7 e7 d7 c7 b7 a7}
217 \
218 vmovdqu32 _r0, [rbx] \
219 vpermi2q _r0, _r6, _r10 \ // r0 = {p8 o8 n8 m8 l8 k8 j8 i8 p0 o0 n0 m0 l0 k0 j0 i0}
220 vmovdqu32 _r4, [r8] \
221 vpermi2q _r4, _r6, _r10 \ // r4 = {p12 o12 n12 m12 l12 k12 j12 i12 p4 o4 n4 m4 l4 k4 j4 i4}
222 \
223 vmovdqu32 _r6, [rbx] \
224 vpermi2q _r6, _r11, _r15 \ // r6 = {p9 o9 n9 m9 l9 k9 j9 i9 p1 o1 n1 m1 l1 k1 j1 i1}
225 vmovdqu32 _r10, [r8] \
226 vpermi2q _r10, _r11, _r15 \ // r10 = {p13 o13 n13 m13 l13 k13 j13 i13 p5 o5 n5 m5 l5 k5 j5 i5}
227 \
228 vmovdqu32 _r11, [rbx] \
229 vpermi2q _r11, _r9, _r13 \ // r11 = {p10 o10 n10 m10 l10 k10 j10 i10 p2 o2 n2 m2 l2 k2 j2 i2}
230 vmovdqu32 _r15, [r8] \
231 vpermi2q _r15, _r9, _r13 \ // r15 = {p14 o14 n14 m14 l14 k14 j14 i14 p6 o6 n6 m6 l6 k6 j6 i6}
232 \
233 vmovdqu32 _r9, [rbx] \
234 vpermi2q _r9, _r8, _r12 \ // r9 = {p11 o11 n11 m11 l11 k11 j11 i11 p3 o3 n3 m3 l3 k3 j3 i3}
235 vmovdqu32 _r13, [r8] \
236 vpermi2q _r13, _r8, _r12 \ // r13 = {p15 o15 n15 m15 l15 k15 j15 i15 p7 o7 n7 m7 l7 k7 j7 i7}
237 \
238 \ // At this point r8 and r12 can be used as scratch registers
239 vshuff64x2 _r8, _r14, _r0, 0xEE \ // r8 = {p8 o8 n8 m8 l8 k8 j8 i8 h8 g8 f8 e8 d8 c8 b8 a8}
240 vshuff64x2 _r0, _r14, _r0, 0x44 \ // r0 = {p0 o0 n0 m0 l0 k0 j0 i0 h0 g0 f0 e0 d0 c0 b0 a0}
241 \
242 vshuff64x2 _r12, _t1, _r4, 0xEE \ // r12 = {p12 o12 n12 m12 l12 k12 j12 i12 h12 g12 f12 e12 d12 c12 b12 a12}
243 vshuff64x2 _r4, _t1, _r4, 0x44 \ // r4 = {p4 o4 n4 m4 l4 k4 j4 i4 h4 g4 f4 e4 d4 c4 b4 a4}
244 \
245 vshuff64x2 _r14, _r7, _r15, 0xEE \ // r14 = {p14 o14 n14 m14 l14 k14 j14 i14 h14 g14 f14 e14 d14 c14 b14 a14}
246 vshuff64x2 _t1, _r7, _r15, 0x44 \ // t1 = {p6 o6 n6 m6 l6 k6 j6 i6 h6 g6 f6 e6 d6 c6 b6 a6}
247 \
248 vshuff64x2 _r15, _r5, _r13, 0xEE \ // r15 = {p15 o15 n15 m15 l15 k15 j15 i15 h15 g15 f15 e15 d15 c15 b15 a15}
249 vshuff64x2 _r7, _r5, _r13, 0x44 \ // r7 = {p7 o7 n7 m7 l7 k7 j7 i7 h7 g7 f7 e7 d7 c7 b7 a7}
250 \
251 vshuff64x2 _r13, _t0, _r10, 0xEE \ // r13 = {p13 o13 n13 m13 l13 k13 j13 i13 h13 g13 f13 e13 d13 c13 b13 a13}
252 vshuff64x2 _r5, _t0, _r10, 0x44 \ // r5 = {p5 o5 n5 m5 l5 k5 j5 i5 h5 g5 f5 e5 d5 c5 b5 a5}
253 \
254 vshuff64x2 _r10, _r3, _r11, 0xEE \ // r10 = {p10 o10 n10 m10 l10 k10 j10 i10 h10 g10 f10 e10 d10 c10 b10 a10}
255 vshuff64x2 _t0, _r3, _r11, 0x44 \ // t0 = {p2 o2 n2 m2 l2 k2 j2 i2 h2 g2 f2 e2 d2 c2 b2 a2}
256 \
257 vshuff64x2 _r11, _r1, _r9, 0xEE \ // r11 = {p11 o11 n11 m11 l11 k11 j11 i11 h11 g11 f11 e11 d11 c11 b11 a11}
258 vshuff64x2 _r3, _r1, _r9, 0x44 \ // r3 = {p3 o3 n3 m3 l3 k3 j3 i3 h3 g3 f3 e3 d3 c3 b3 a3}
259 \
260 vshuff64x2 _r9, _r2, _r6, 0xEE \ // r9 = {p9 o9 n9 m9 l9 k9 j9 i9 h9 g9 f9 e9 d9 c9 b9 a9}
261 vshuff64x2 _r1, _r2, _r6, 0x44 \ // r1 = {p1 o1 n1 m1 l1 k1 j1 i1 h1 g1 f1 e1 d1 c1 b1 a1}
262 \
263 vmovdqu32 _r2, _t0 \ // r2 = {p2 o2 n2 m2 l2 k2 j2 i2 h2 g2 f2 e2 d2 c2 b2 a2}
264 vmovdqu32 _r6, _t1 \ // r6 = {p6 o6 n6 m6 l6 k6 j6 i6 h6 g6 f6 e6 d6 c6 b6 a6}
265
266
267// CH(A, B, C) = (A&B) ^ (~A&C)
268// MAJ(E, F, G) = (E&F) ^ (E&G) ^ (F&G)
269// SIGMA0 = ROR_2 ^ ROR_13 ^ ROR_22
270// SIGMA1 = ROR_6 ^ ROR_11 ^ ROR_25
271// sigma0 = ROR_7 ^ ROR_18 ^ SHR_3
272// sigma1 = ROR_17 ^ ROR_19 ^ SHR_10
273
274// Main processing loop per round
275#define PROCESS_LOOP(_WT, _ROUND, _A, _B, _C, _D, _E, _F, _G, _H) \
276 \ // T1 = H + SIGMA1(E) + CH(E, F, G) + Kt + Wt
277 \ // T2 = SIGMA0(A) + MAJ(A, B, C)
278 \ // H=G, G=F, F=E, E=D+T1, D=C, C=B, B=A, A=T1+T2
279 \
280 \ // H becomes T2, then add T1 for A
281 \ // D becomes D + T1 for E
282 \
283 vpaddd T1, _H, TMP3 \ // T1 = H + Kt
284 vmovdqu32 TMP0, _E \
285 vprord TMP1, _E, 6 \ // ROR_6(E)
286 vprord TMP2, _E, 11 \ // ROR_11(E)
287 vprord TMP3, _E, 25 \ // ROR_25(E)
288 vpternlogd TMP0, _F, _G, 0xCA \ // TMP0 = CH(E,F,G)
289 vpaddd T1, T1, _WT \ // T1 = T1 + Wt
290 vpternlogd TMP1, TMP2, TMP3, 0x96 \ // TMP1 = SIGMA1(E)
291 vpaddd T1, T1, TMP0 \ // T1 = T1 + CH(E,F,G)
292 vpaddd T1, T1, TMP1 \ // T1 = T1 + SIGMA1(E)
293 vpaddd _D, _D, T1 \ // D = D + T1
294 \
295 vprord _H, _A, 2 \ // ROR_2(A)
296 vprord TMP2, _A, 13 \ // ROR_13(A)
297 vprord TMP3, _A, 22 \ // ROR_22(A)
298 vmovdqu32 TMP0, _A \
299 vpternlogd TMP0, _B, _C, 0xE8 \ // TMP0 = MAJ(A,B,C)
300 vpternlogd _H, TMP2, TMP3, 0x96 \ // H(T2) = SIGMA0(A)
301 vpaddd _H, _H, TMP0 \ // H(T2) = SIGMA0(A) + MAJ(A,B,C)
302 vpaddd _H, _H, T1 \ // H(A) = H(T2) + T1
303 \
304 vmovdqu32 TMP3, [TBL + ((_ROUND+1)*64)] \ // Next Kt
305
306
307#define MSG_SCHED_ROUND_16_63(_WT, _WTp1, _WTp9, _WTp14) \
308 vprord TMP4, _WTp14, 17 \ // ROR_17(Wt-2)
309 vprord TMP5, _WTp14, 19 \ // ROR_19(Wt-2)
310 vpsrld TMP6, _WTp14, 10 \ // SHR_10(Wt-2)
311 vpternlogd TMP4, TMP5, TMP6, 0x96 \ // TMP4 = sigma1(Wt-2)
312 \
313 vpaddd _WT, _WT, TMP4 \ // Wt = Wt-16 + sigma1(Wt-2)
314 vpaddd _WT, _WT, _WTp9 \ // Wt = Wt-16 + sigma1(Wt-2) + Wt-7
315 \
316 vprord TMP4, _WTp1, 7 \ // ROR_7(Wt-15)
317 vprord TMP5, _WTp1, 18 \ // ROR_18(Wt-15)
318 vpsrld TMP6, _WTp1, 3 \ // SHR_3(Wt-15)
319 vpternlogd TMP4, TMP5, TMP6, 0x96 \ // TMP4 = sigma0(Wt-15)
320 \
321 vpaddd _WT, _WT, TMP4 \ // Wt = Wt-16 + sigma1(Wt-2) +
322 \ // Wt-7 + sigma0(Wt-15) +
323
324
325// Note this is reading in a block of data for one lane
326// When all 16 are read, the data must be transposed to build msg schedule
327#define MSG_SCHED_ROUND_00_15(_WT, OFFSET, LABEL) \
328 TESTQ $(1<<OFFSET), MASK_P9 \
329 JE LABEL \
330 MOVQ OFFSET*24(INPUT_P9), R9 \
331 vmovups _WT, [inp0+IDX] \
332LABEL: \
333
334#define MASKED_LOAD(_WT, OFFSET, LABEL) \
335 TESTQ $(1<<OFFSET), MASK_P9 \
336 JE LABEL \
337 MOVQ OFFSET*24(INPUT_P9), R9 \
338 vmovups _WT,[inp0+IDX] \
339LABEL: \
340
341TEXT ·sha256_x16_avx512(SB), 7, $0
342 MOVQ digests+0(FP), STATE_P9 //
343 MOVQ scratch+8(FP), SCRATCH_P9
344 MOVQ mask_len+32(FP), INP_SIZE_P9 // number of blocks to process
345 MOVQ mask+24(FP), MASKP_P9
346 MOVQ (MASKP_P9), MASK_P9
347 kmovq k1, mask
348 LEAQ inputs+48(FP), INPUT_P9
349
350 // Initialize digests
351 vmovdqu32 A, [STATE + 0*SHA256_DIGEST_ROW_SIZE]
352 vmovdqu32 B, [STATE + 1*SHA256_DIGEST_ROW_SIZE]
353 vmovdqu32 C, [STATE + 2*SHA256_DIGEST_ROW_SIZE]
354 vmovdqu32 D, [STATE + 3*SHA256_DIGEST_ROW_SIZE]
355 vmovdqu32 E, [STATE + 4*SHA256_DIGEST_ROW_SIZE]
356 vmovdqu32 F, [STATE + 5*SHA256_DIGEST_ROW_SIZE]
357 vmovdqu32 G, [STATE + 6*SHA256_DIGEST_ROW_SIZE]
358 vmovdqu32 H, [STATE + 7*SHA256_DIGEST_ROW_SIZE]
359
360 MOVQ table+16(FP), TBL_P9
361
362 xor IDX, IDX
363
364 // Read in first block of input data
365 MASKED_LOAD( W0, 0, skipInput0)
366 MASKED_LOAD( W1, 1, skipInput1)
367 MASKED_LOAD( W2, 2, skipInput2)
368 MASKED_LOAD( W3, 3, skipInput3)
369 MASKED_LOAD( W4, 4, skipInput4)
370 MASKED_LOAD( W5, 5, skipInput5)
371 MASKED_LOAD( W6, 6, skipInput6)
372 MASKED_LOAD( W7, 7, skipInput7)
373 MASKED_LOAD( W8, 8, skipInput8)
374 MASKED_LOAD( W9, 9, skipInput9)
375 MASKED_LOAD(W10, 10, skipInput10)
376 MASKED_LOAD(W11, 11, skipInput11)
377 MASKED_LOAD(W12, 12, skipInput12)
378 MASKED_LOAD(W13, 13, skipInput13)
379 MASKED_LOAD(W14, 14, skipInput14)
380 MASKED_LOAD(W15, 15, skipInput15)
381
382lloop:
383 LEAQ PSHUFFLE_BYTE_FLIP_MASK<>(SB), TBL_P9
384 vmovdqu32 TMP2, [TBL]
385
386 // Get first K from table
387 MOVQ table+16(FP), TBL_P9
388 vmovdqu32 TMP3, [TBL]
389
390 // Save digests for later addition
391 vmovdqu32 [SCRATCH + 64*0], A
392 vmovdqu32 [SCRATCH + 64*1], B
393 vmovdqu32 [SCRATCH + 64*2], C
394 vmovdqu32 [SCRATCH + 64*3], D
395 vmovdqu32 [SCRATCH + 64*4], E
396 vmovdqu32 [SCRATCH + 64*5], F
397 vmovdqu32 [SCRATCH + 64*6], G
398 vmovdqu32 [SCRATCH + 64*7], H
399
400 add IDX, 64
401
402 // Transpose input data
403 TRANSPOSE16(W0, W1, W2, W3, W4, W5, W6, W7, W8, W9, W10, W11, W12, W13, W14, W15, TMP0, TMP1)
404
405 vpshufb W0, W0, TMP2
406 vpshufb W1, W1, TMP2
407 vpshufb W2, W2, TMP2
408 vpshufb W3, W3, TMP2
409 vpshufb W4, W4, TMP2
410 vpshufb W5, W5, TMP2
411 vpshufb W6, W6, TMP2
412 vpshufb W7, W7, TMP2
413 vpshufb W8, W8, TMP2
414 vpshufb W9, W9, TMP2
415 vpshufb W10, W10, TMP2
416 vpshufb W11, W11, TMP2
417 vpshufb W12, W12, TMP2
418 vpshufb W13, W13, TMP2
419 vpshufb W14, W14, TMP2
420 vpshufb W15, W15, TMP2
421
422 // MSG Schedule for W0-W15 is now complete in registers
423 // Process first 48 rounds
424 // Calculate next Wt+16 after processing is complete and Wt is unneeded
425
426 PROCESS_LOOP( W0, 0, A, B, C, D, E, F, G, H)
427 MSG_SCHED_ROUND_16_63( W0, W1, W9, W14)
428 PROCESS_LOOP( W1, 1, H, A, B, C, D, E, F, G)
429 MSG_SCHED_ROUND_16_63( W1, W2, W10, W15)
430 PROCESS_LOOP( W2, 2, G, H, A, B, C, D, E, F)
431 MSG_SCHED_ROUND_16_63( W2, W3, W11, W0)
432 PROCESS_LOOP( W3, 3, F, G, H, A, B, C, D, E)
433 MSG_SCHED_ROUND_16_63( W3, W4, W12, W1)
434 PROCESS_LOOP( W4, 4, E, F, G, H, A, B, C, D)
435 MSG_SCHED_ROUND_16_63( W4, W5, W13, W2)
436 PROCESS_LOOP( W5, 5, D, E, F, G, H, A, B, C)
437 MSG_SCHED_ROUND_16_63( W5, W6, W14, W3)
438 PROCESS_LOOP( W6, 6, C, D, E, F, G, H, A, B)
439 MSG_SCHED_ROUND_16_63( W6, W7, W15, W4)
440 PROCESS_LOOP( W7, 7, B, C, D, E, F, G, H, A)
441 MSG_SCHED_ROUND_16_63( W7, W8, W0, W5)
442 PROCESS_LOOP( W8, 8, A, B, C, D, E, F, G, H)
443 MSG_SCHED_ROUND_16_63( W8, W9, W1, W6)
444 PROCESS_LOOP( W9, 9, H, A, B, C, D, E, F, G)
445 MSG_SCHED_ROUND_16_63( W9, W10, W2, W7)
446 PROCESS_LOOP(W10, 10, G, H, A, B, C, D, E, F)
447 MSG_SCHED_ROUND_16_63(W10, W11, W3, W8)
448 PROCESS_LOOP(W11, 11, F, G, H, A, B, C, D, E)
449 MSG_SCHED_ROUND_16_63(W11, W12, W4, W9)
450 PROCESS_LOOP(W12, 12, E, F, G, H, A, B, C, D)
451 MSG_SCHED_ROUND_16_63(W12, W13, W5, W10)
452 PROCESS_LOOP(W13, 13, D, E, F, G, H, A, B, C)
453 MSG_SCHED_ROUND_16_63(W13, W14, W6, W11)
454 PROCESS_LOOP(W14, 14, C, D, E, F, G, H, A, B)
455 MSG_SCHED_ROUND_16_63(W14, W15, W7, W12)
456 PROCESS_LOOP(W15, 15, B, C, D, E, F, G, H, A)
457 MSG_SCHED_ROUND_16_63(W15, W0, W8, W13)
458 PROCESS_LOOP( W0, 16, A, B, C, D, E, F, G, H)
459 MSG_SCHED_ROUND_16_63( W0, W1, W9, W14)
460 PROCESS_LOOP( W1, 17, H, A, B, C, D, E, F, G)
461 MSG_SCHED_ROUND_16_63( W1, W2, W10, W15)
462 PROCESS_LOOP( W2, 18, G, H, A, B, C, D, E, F)
463 MSG_SCHED_ROUND_16_63( W2, W3, W11, W0)
464 PROCESS_LOOP( W3, 19, F, G, H, A, B, C, D, E)
465 MSG_SCHED_ROUND_16_63( W3, W4, W12, W1)
466 PROCESS_LOOP( W4, 20, E, F, G, H, A, B, C, D)
467 MSG_SCHED_ROUND_16_63( W4, W5, W13, W2)
468 PROCESS_LOOP( W5, 21, D, E, F, G, H, A, B, C)
469 MSG_SCHED_ROUND_16_63( W5, W6, W14, W3)
470 PROCESS_LOOP( W6, 22, C, D, E, F, G, H, A, B)
471 MSG_SCHED_ROUND_16_63( W6, W7, W15, W4)
472 PROCESS_LOOP( W7, 23, B, C, D, E, F, G, H, A)
473 MSG_SCHED_ROUND_16_63( W7, W8, W0, W5)
474 PROCESS_LOOP( W8, 24, A, B, C, D, E, F, G, H)
475 MSG_SCHED_ROUND_16_63( W8, W9, W1, W6)
476 PROCESS_LOOP( W9, 25, H, A, B, C, D, E, F, G)
477 MSG_SCHED_ROUND_16_63( W9, W10, W2, W7)
478 PROCESS_LOOP(W10, 26, G, H, A, B, C, D, E, F)
479 MSG_SCHED_ROUND_16_63(W10, W11, W3, W8)
480 PROCESS_LOOP(W11, 27, F, G, H, A, B, C, D, E)
481 MSG_SCHED_ROUND_16_63(W11, W12, W4, W9)
482 PROCESS_LOOP(W12, 28, E, F, G, H, A, B, C, D)
483 MSG_SCHED_ROUND_16_63(W12, W13, W5, W10)
484 PROCESS_LOOP(W13, 29, D, E, F, G, H, A, B, C)
485 MSG_SCHED_ROUND_16_63(W13, W14, W6, W11)
486 PROCESS_LOOP(W14, 30, C, D, E, F, G, H, A, B)
487 MSG_SCHED_ROUND_16_63(W14, W15, W7, W12)
488 PROCESS_LOOP(W15, 31, B, C, D, E, F, G, H, A)
489 MSG_SCHED_ROUND_16_63(W15, W0, W8, W13)
490 PROCESS_LOOP( W0, 32, A, B, C, D, E, F, G, H)
491 MSG_SCHED_ROUND_16_63( W0, W1, W9, W14)
492 PROCESS_LOOP( W1, 33, H, A, B, C, D, E, F, G)
493 MSG_SCHED_ROUND_16_63( W1, W2, W10, W15)
494 PROCESS_LOOP( W2, 34, G, H, A, B, C, D, E, F)
495 MSG_SCHED_ROUND_16_63( W2, W3, W11, W0)
496 PROCESS_LOOP( W3, 35, F, G, H, A, B, C, D, E)
497 MSG_SCHED_ROUND_16_63( W3, W4, W12, W1)
498 PROCESS_LOOP( W4, 36, E, F, G, H, A, B, C, D)
499 MSG_SCHED_ROUND_16_63( W4, W5, W13, W2)
500 PROCESS_LOOP( W5, 37, D, E, F, G, H, A, B, C)
501 MSG_SCHED_ROUND_16_63( W5, W6, W14, W3)
502 PROCESS_LOOP( W6, 38, C, D, E, F, G, H, A, B)
503 MSG_SCHED_ROUND_16_63( W6, W7, W15, W4)
504 PROCESS_LOOP( W7, 39, B, C, D, E, F, G, H, A)
505 MSG_SCHED_ROUND_16_63( W7, W8, W0, W5)
506 PROCESS_LOOP( W8, 40, A, B, C, D, E, F, G, H)
507 MSG_SCHED_ROUND_16_63( W8, W9, W1, W6)
508 PROCESS_LOOP( W9, 41, H, A, B, C, D, E, F, G)
509 MSG_SCHED_ROUND_16_63( W9, W10, W2, W7)
510 PROCESS_LOOP(W10, 42, G, H, A, B, C, D, E, F)
511 MSG_SCHED_ROUND_16_63(W10, W11, W3, W8)
512 PROCESS_LOOP(W11, 43, F, G, H, A, B, C, D, E)
513 MSG_SCHED_ROUND_16_63(W11, W12, W4, W9)
514 PROCESS_LOOP(W12, 44, E, F, G, H, A, B, C, D)
515 MSG_SCHED_ROUND_16_63(W12, W13, W5, W10)
516 PROCESS_LOOP(W13, 45, D, E, F, G, H, A, B, C)
517 MSG_SCHED_ROUND_16_63(W13, W14, W6, W11)
518 PROCESS_LOOP(W14, 46, C, D, E, F, G, H, A, B)
519 MSG_SCHED_ROUND_16_63(W14, W15, W7, W12)
520 PROCESS_LOOP(W15, 47, B, C, D, E, F, G, H, A)
521 MSG_SCHED_ROUND_16_63(W15, W0, W8, W13)
522
523 // Check if this is the last block
524 sub INP_SIZE, 1
525 JE lastLoop
526
527 // Load next mask for inputs
528 ADDQ $8, MASKP_P9
529 MOVQ (MASKP_P9), MASK_P9
530
531 // Process last 16 rounds
532 // Read in next block msg data for use in first 16 words of msg sched
533
534 PROCESS_LOOP( W0, 48, A, B, C, D, E, F, G, H)
535 MSG_SCHED_ROUND_00_15( W0, 0, skipNext0)
536 PROCESS_LOOP( W1, 49, H, A, B, C, D, E, F, G)
537 MSG_SCHED_ROUND_00_15( W1, 1, skipNext1)
538 PROCESS_LOOP( W2, 50, G, H, A, B, C, D, E, F)
539 MSG_SCHED_ROUND_00_15( W2, 2, skipNext2)
540 PROCESS_LOOP( W3, 51, F, G, H, A, B, C, D, E)
541 MSG_SCHED_ROUND_00_15( W3, 3, skipNext3)
542 PROCESS_LOOP( W4, 52, E, F, G, H, A, B, C, D)
543 MSG_SCHED_ROUND_00_15( W4, 4, skipNext4)
544 PROCESS_LOOP( W5, 53, D, E, F, G, H, A, B, C)
545 MSG_SCHED_ROUND_00_15( W5, 5, skipNext5)
546 PROCESS_LOOP( W6, 54, C, D, E, F, G, H, A, B)
547 MSG_SCHED_ROUND_00_15( W6, 6, skipNext6)
548 PROCESS_LOOP( W7, 55, B, C, D, E, F, G, H, A)
549 MSG_SCHED_ROUND_00_15( W7, 7, skipNext7)
550 PROCESS_LOOP( W8, 56, A, B, C, D, E, F, G, H)
551 MSG_SCHED_ROUND_00_15( W8, 8, skipNext8)
552 PROCESS_LOOP( W9, 57, H, A, B, C, D, E, F, G)
553 MSG_SCHED_ROUND_00_15( W9, 9, skipNext9)
554 PROCESS_LOOP(W10, 58, G, H, A, B, C, D, E, F)
555 MSG_SCHED_ROUND_00_15(W10, 10, skipNext10)
556 PROCESS_LOOP(W11, 59, F, G, H, A, B, C, D, E)
557 MSG_SCHED_ROUND_00_15(W11, 11, skipNext11)
558 PROCESS_LOOP(W12, 60, E, F, G, H, A, B, C, D)
559 MSG_SCHED_ROUND_00_15(W12, 12, skipNext12)
560 PROCESS_LOOP(W13, 61, D, E, F, G, H, A, B, C)
561 MSG_SCHED_ROUND_00_15(W13, 13, skipNext13)
562 PROCESS_LOOP(W14, 62, C, D, E, F, G, H, A, B)
563 MSG_SCHED_ROUND_00_15(W14, 14, skipNext14)
564 PROCESS_LOOP(W15, 63, B, C, D, E, F, G, H, A)
565 MSG_SCHED_ROUND_00_15(W15, 15, skipNext15)
566
567 // Add old digest
568 vmovdqu32 TMP2, A
569 vmovdqu32 A, [SCRATCH + 64*0]
570 vpaddd A{k1}, A, TMP2
571 vmovdqu32 TMP2, B
572 vmovdqu32 B, [SCRATCH + 64*1]
573 vpaddd B{k1}, B, TMP2
574 vmovdqu32 TMP2, C
575 vmovdqu32 C, [SCRATCH + 64*2]
576 vpaddd C{k1}, C, TMP2
577 vmovdqu32 TMP2, D
578 vmovdqu32 D, [SCRATCH + 64*3]
579 vpaddd D{k1}, D, TMP2
580 vmovdqu32 TMP2, E
581 vmovdqu32 E, [SCRATCH + 64*4]
582 vpaddd E{k1}, E, TMP2
583 vmovdqu32 TMP2, F
584 vmovdqu32 F, [SCRATCH + 64*5]
585 vpaddd F{k1}, F, TMP2
586 vmovdqu32 TMP2, G
587 vmovdqu32 G, [SCRATCH + 64*6]
588 vpaddd G{k1}, G, TMP2
589 vmovdqu32 TMP2, H
590 vmovdqu32 H, [SCRATCH + 64*7]
591 vpaddd H{k1}, H, TMP2
592
593 kmovq k1, mask
594 JMP lloop
595
596lastLoop:
597 // Process last 16 rounds
598 PROCESS_LOOP( W0, 48, A, B, C, D, E, F, G, H)
599 PROCESS_LOOP( W1, 49, H, A, B, C, D, E, F, G)
600 PROCESS_LOOP( W2, 50, G, H, A, B, C, D, E, F)
601 PROCESS_LOOP( W3, 51, F, G, H, A, B, C, D, E)
602 PROCESS_LOOP( W4, 52, E, F, G, H, A, B, C, D)
603 PROCESS_LOOP( W5, 53, D, E, F, G, H, A, B, C)
604 PROCESS_LOOP( W6, 54, C, D, E, F, G, H, A, B)
605 PROCESS_LOOP( W7, 55, B, C, D, E, F, G, H, A)
606 PROCESS_LOOP( W8, 56, A, B, C, D, E, F, G, H)
607 PROCESS_LOOP( W9, 57, H, A, B, C, D, E, F, G)
608 PROCESS_LOOP(W10, 58, G, H, A, B, C, D, E, F)
609 PROCESS_LOOP(W11, 59, F, G, H, A, B, C, D, E)
610 PROCESS_LOOP(W12, 60, E, F, G, H, A, B, C, D)
611 PROCESS_LOOP(W13, 61, D, E, F, G, H, A, B, C)
612 PROCESS_LOOP(W14, 62, C, D, E, F, G, H, A, B)
613 PROCESS_LOOP(W15, 63, B, C, D, E, F, G, H, A)
614
615 // Add old digest
616 vmovdqu32 TMP2, A
617 vmovdqu32 A, [SCRATCH + 64*0]
618 vpaddd A{k1}, A, TMP2
619 vmovdqu32 TMP2, B
620 vmovdqu32 B, [SCRATCH + 64*1]
621 vpaddd B{k1}, B, TMP2
622 vmovdqu32 TMP2, C
623 vmovdqu32 C, [SCRATCH + 64*2]
624 vpaddd C{k1}, C, TMP2
625 vmovdqu32 TMP2, D
626 vmovdqu32 D, [SCRATCH + 64*3]
627 vpaddd D{k1}, D, TMP2
628 vmovdqu32 TMP2, E
629 vmovdqu32 E, [SCRATCH + 64*4]
630 vpaddd E{k1}, E, TMP2
631 vmovdqu32 TMP2, F
632 vmovdqu32 F, [SCRATCH + 64*5]
633 vpaddd F{k1}, F, TMP2
634 vmovdqu32 TMP2, G
635 vmovdqu32 G, [SCRATCH + 64*6]
636 vpaddd G{k1}, G, TMP2
637 vmovdqu32 TMP2, H
638 vmovdqu32 H, [SCRATCH + 64*7]
639 vpaddd H{k1}, H, TMP2
640
641 // Write out digest
642 vmovdqu32 [STATE + 0*SHA256_DIGEST_ROW_SIZE], A
643 vmovdqu32 [STATE + 1*SHA256_DIGEST_ROW_SIZE], B
644 vmovdqu32 [STATE + 2*SHA256_DIGEST_ROW_SIZE], C
645 vmovdqu32 [STATE + 3*SHA256_DIGEST_ROW_SIZE], D
646 vmovdqu32 [STATE + 4*SHA256_DIGEST_ROW_SIZE], E
647 vmovdqu32 [STATE + 5*SHA256_DIGEST_ROW_SIZE], F
648 vmovdqu32 [STATE + 6*SHA256_DIGEST_ROW_SIZE], G
649 vmovdqu32 [STATE + 7*SHA256_DIGEST_ROW_SIZE], H
650
651 VZEROUPPER
652 RET
653
654//
655// Tables
656//
657
658DATA PSHUFFLE_BYTE_FLIP_MASK<>+0x000(SB)/8, $0x0405060700010203
659DATA PSHUFFLE_BYTE_FLIP_MASK<>+0x008(SB)/8, $0x0c0d0e0f08090a0b
660DATA PSHUFFLE_BYTE_FLIP_MASK<>+0x010(SB)/8, $0x0405060700010203
661DATA PSHUFFLE_BYTE_FLIP_MASK<>+0x018(SB)/8, $0x0c0d0e0f08090a0b
662DATA PSHUFFLE_BYTE_FLIP_MASK<>+0x020(SB)/8, $0x0405060700010203
663DATA PSHUFFLE_BYTE_FLIP_MASK<>+0x028(SB)/8, $0x0c0d0e0f08090a0b
664DATA PSHUFFLE_BYTE_FLIP_MASK<>+0x030(SB)/8, $0x0405060700010203
665DATA PSHUFFLE_BYTE_FLIP_MASK<>+0x038(SB)/8, $0x0c0d0e0f08090a0b
666GLOBL PSHUFFLE_BYTE_FLIP_MASK<>(SB), 8, $64
667
668DATA PSHUFFLE_TRANSPOSE16_MASK1<>+0x000(SB)/8, $0x0000000000000000
669DATA PSHUFFLE_TRANSPOSE16_MASK1<>+0x008(SB)/8, $0x0000000000000001
670DATA PSHUFFLE_TRANSPOSE16_MASK1<>+0x010(SB)/8, $0x0000000000000008
671DATA PSHUFFLE_TRANSPOSE16_MASK1<>+0x018(SB)/8, $0x0000000000000009
672DATA PSHUFFLE_TRANSPOSE16_MASK1<>+0x020(SB)/8, $0x0000000000000004
673DATA PSHUFFLE_TRANSPOSE16_MASK1<>+0x028(SB)/8, $0x0000000000000005
674DATA PSHUFFLE_TRANSPOSE16_MASK1<>+0x030(SB)/8, $0x000000000000000C
675DATA PSHUFFLE_TRANSPOSE16_MASK1<>+0x038(SB)/8, $0x000000000000000D
676GLOBL PSHUFFLE_TRANSPOSE16_MASK1<>(SB), 8, $64
677
678DATA PSHUFFLE_TRANSPOSE16_MASK2<>+0x000(SB)/8, $0x0000000000000002
679DATA PSHUFFLE_TRANSPOSE16_MASK2<>+0x008(SB)/8, $0x0000000000000003
680DATA PSHUFFLE_TRANSPOSE16_MASK2<>+0x010(SB)/8, $0x000000000000000A
681DATA PSHUFFLE_TRANSPOSE16_MASK2<>+0x018(SB)/8, $0x000000000000000B
682DATA PSHUFFLE_TRANSPOSE16_MASK2<>+0x020(SB)/8, $0x0000000000000006
683DATA PSHUFFLE_TRANSPOSE16_MASK2<>+0x028(SB)/8, $0x0000000000000007
684DATA PSHUFFLE_TRANSPOSE16_MASK2<>+0x030(SB)/8, $0x000000000000000E
685DATA PSHUFFLE_TRANSPOSE16_MASK2<>+0x038(SB)/8, $0x000000000000000F
686GLOBL PSHUFFLE_TRANSPOSE16_MASK2<>(SB), 8, $64
diff --git a/vendor/github.com/minio/sha256-simd/sha256blockAvx512_amd64.go b/vendor/github.com/minio/sha256-simd/sha256blockAvx512_amd64.go
new file mode 100644
index 0000000..4b9473a
--- /dev/null
+++ b/vendor/github.com/minio/sha256-simd/sha256blockAvx512_amd64.go
@@ -0,0 +1,501 @@
1//go:build !noasm && !appengine && gc
2// +build !noasm,!appengine,gc
3
4/*
5 * Minio Cloud Storage, (C) 2017 Minio, Inc.
6 *
7 * Licensed under the Apache License, Version 2.0 (the "License");
8 * you may not use this file except in compliance with the License.
9 * You may obtain a copy of the License at
10 *
11 * http://www.apache.org/licenses/LICENSE-2.0
12 *
13 * Unless required by applicable law or agreed to in writing, software
14 * distributed under the License is distributed on an "AS IS" BASIS,
15 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 * See the License for the specific language governing permissions and
17 * limitations under the License.
18 */
19
20package sha256
21
22import (
23 "encoding/binary"
24 "errors"
25 "hash"
26 "sort"
27 "sync/atomic"
28 "time"
29)
30
31//go:noescape
32func sha256X16Avx512(digests *[512]byte, scratch *[512]byte, table *[512]uint64, mask []uint64, inputs [16][]byte)
33
34// Avx512ServerUID - Do not start at 0 but next multiple of 16 so as to be able to
35// differentiate with default initialiation value of 0
36const Avx512ServerUID = 16
37
38var uidCounter uint64
39
40// NewAvx512 - initialize sha256 Avx512 implementation.
41func NewAvx512(a512srv *Avx512Server) hash.Hash {
42 uid := atomic.AddUint64(&uidCounter, 1)
43 return &Avx512Digest{uid: uid, a512srv: a512srv}
44}
45
46// Avx512Digest - Type for computing SHA256 using Avx512
47type Avx512Digest struct {
48 uid uint64
49 a512srv *Avx512Server
50 x [chunk]byte
51 nx int
52 len uint64
53 final bool
54 result [Size]byte
55}
56
57// Size - Return size of checksum
58func (d *Avx512Digest) Size() int { return Size }
59
60// BlockSize - Return blocksize of checksum
61func (d Avx512Digest) BlockSize() int { return BlockSize }
62
63// Reset - reset sha digest to its initial values
64func (d *Avx512Digest) Reset() {
65 d.a512srv.blocksCh <- blockInput{uid: d.uid, reset: true}
66 d.nx = 0
67 d.len = 0
68 d.final = false
69}
70
71// Write to digest
72func (d *Avx512Digest) Write(p []byte) (nn int, err error) {
73
74 if d.final {
75 return 0, errors.New("Avx512Digest already finalized. Reset first before writing again")
76 }
77
78 nn = len(p)
79 d.len += uint64(nn)
80 if d.nx > 0 {
81 n := copy(d.x[d.nx:], p)
82 d.nx += n
83 if d.nx == chunk {
84 d.a512srv.blocksCh <- blockInput{uid: d.uid, msg: d.x[:]}
85 d.nx = 0
86 }
87 p = p[n:]
88 }
89 if len(p) >= chunk {
90 n := len(p) &^ (chunk - 1)
91 d.a512srv.blocksCh <- blockInput{uid: d.uid, msg: p[:n]}
92 p = p[n:]
93 }
94 if len(p) > 0 {
95 d.nx = copy(d.x[:], p)
96 }
97 return
98}
99
100// Sum - Return sha256 sum in bytes
101func (d *Avx512Digest) Sum(in []byte) (result []byte) {
102
103 if d.final {
104 return append(in, d.result[:]...)
105 }
106
107 trail := make([]byte, 0, 128)
108 trail = append(trail, d.x[:d.nx]...)
109
110 len := d.len
111 // Padding. Add a 1 bit and 0 bits until 56 bytes mod 64.
112 var tmp [64]byte
113 tmp[0] = 0x80
114 if len%64 < 56 {
115 trail = append(trail, tmp[0:56-len%64]...)
116 } else {
117 trail = append(trail, tmp[0:64+56-len%64]...)
118 }
119 d.nx = 0
120
121 // Length in bits.
122 len <<= 3
123 for i := uint(0); i < 8; i++ {
124 tmp[i] = byte(len >> (56 - 8*i))
125 }
126 trail = append(trail, tmp[0:8]...)
127
128 sumCh := make(chan [Size]byte)
129 d.a512srv.blocksCh <- blockInput{uid: d.uid, msg: trail, final: true, sumCh: sumCh}
130 d.result = <-sumCh
131 d.final = true
132 return append(in, d.result[:]...)
133}
134
135var table = [512]uint64{
136 0x428a2f98428a2f98, 0x428a2f98428a2f98, 0x428a2f98428a2f98, 0x428a2f98428a2f98,
137 0x428a2f98428a2f98, 0x428a2f98428a2f98, 0x428a2f98428a2f98, 0x428a2f98428a2f98,
138 0x7137449171374491, 0x7137449171374491, 0x7137449171374491, 0x7137449171374491,
139 0x7137449171374491, 0x7137449171374491, 0x7137449171374491, 0x7137449171374491,
140 0xb5c0fbcfb5c0fbcf, 0xb5c0fbcfb5c0fbcf, 0xb5c0fbcfb5c0fbcf, 0xb5c0fbcfb5c0fbcf,
141 0xb5c0fbcfb5c0fbcf, 0xb5c0fbcfb5c0fbcf, 0xb5c0fbcfb5c0fbcf, 0xb5c0fbcfb5c0fbcf,
142 0xe9b5dba5e9b5dba5, 0xe9b5dba5e9b5dba5, 0xe9b5dba5e9b5dba5, 0xe9b5dba5e9b5dba5,
143 0xe9b5dba5e9b5dba5, 0xe9b5dba5e9b5dba5, 0xe9b5dba5e9b5dba5, 0xe9b5dba5e9b5dba5,
144 0x3956c25b3956c25b, 0x3956c25b3956c25b, 0x3956c25b3956c25b, 0x3956c25b3956c25b,
145 0x3956c25b3956c25b, 0x3956c25b3956c25b, 0x3956c25b3956c25b, 0x3956c25b3956c25b,
146 0x59f111f159f111f1, 0x59f111f159f111f1, 0x59f111f159f111f1, 0x59f111f159f111f1,
147 0x59f111f159f111f1, 0x59f111f159f111f1, 0x59f111f159f111f1, 0x59f111f159f111f1,
148 0x923f82a4923f82a4, 0x923f82a4923f82a4, 0x923f82a4923f82a4, 0x923f82a4923f82a4,
149 0x923f82a4923f82a4, 0x923f82a4923f82a4, 0x923f82a4923f82a4, 0x923f82a4923f82a4,
150 0xab1c5ed5ab1c5ed5, 0xab1c5ed5ab1c5ed5, 0xab1c5ed5ab1c5ed5, 0xab1c5ed5ab1c5ed5,
151 0xab1c5ed5ab1c5ed5, 0xab1c5ed5ab1c5ed5, 0xab1c5ed5ab1c5ed5, 0xab1c5ed5ab1c5ed5,
152 0xd807aa98d807aa98, 0xd807aa98d807aa98, 0xd807aa98d807aa98, 0xd807aa98d807aa98,
153 0xd807aa98d807aa98, 0xd807aa98d807aa98, 0xd807aa98d807aa98, 0xd807aa98d807aa98,
154 0x12835b0112835b01, 0x12835b0112835b01, 0x12835b0112835b01, 0x12835b0112835b01,
155 0x12835b0112835b01, 0x12835b0112835b01, 0x12835b0112835b01, 0x12835b0112835b01,
156 0x243185be243185be, 0x243185be243185be, 0x243185be243185be, 0x243185be243185be,
157 0x243185be243185be, 0x243185be243185be, 0x243185be243185be, 0x243185be243185be,
158 0x550c7dc3550c7dc3, 0x550c7dc3550c7dc3, 0x550c7dc3550c7dc3, 0x550c7dc3550c7dc3,
159 0x550c7dc3550c7dc3, 0x550c7dc3550c7dc3, 0x550c7dc3550c7dc3, 0x550c7dc3550c7dc3,
160 0x72be5d7472be5d74, 0x72be5d7472be5d74, 0x72be5d7472be5d74, 0x72be5d7472be5d74,
161 0x72be5d7472be5d74, 0x72be5d7472be5d74, 0x72be5d7472be5d74, 0x72be5d7472be5d74,
162 0x80deb1fe80deb1fe, 0x80deb1fe80deb1fe, 0x80deb1fe80deb1fe, 0x80deb1fe80deb1fe,
163 0x80deb1fe80deb1fe, 0x80deb1fe80deb1fe, 0x80deb1fe80deb1fe, 0x80deb1fe80deb1fe,
164 0x9bdc06a79bdc06a7, 0x9bdc06a79bdc06a7, 0x9bdc06a79bdc06a7, 0x9bdc06a79bdc06a7,
165 0x9bdc06a79bdc06a7, 0x9bdc06a79bdc06a7, 0x9bdc06a79bdc06a7, 0x9bdc06a79bdc06a7,
166 0xc19bf174c19bf174, 0xc19bf174c19bf174, 0xc19bf174c19bf174, 0xc19bf174c19bf174,
167 0xc19bf174c19bf174, 0xc19bf174c19bf174, 0xc19bf174c19bf174, 0xc19bf174c19bf174,
168 0xe49b69c1e49b69c1, 0xe49b69c1e49b69c1, 0xe49b69c1e49b69c1, 0xe49b69c1e49b69c1,
169 0xe49b69c1e49b69c1, 0xe49b69c1e49b69c1, 0xe49b69c1e49b69c1, 0xe49b69c1e49b69c1,
170 0xefbe4786efbe4786, 0xefbe4786efbe4786, 0xefbe4786efbe4786, 0xefbe4786efbe4786,
171 0xefbe4786efbe4786, 0xefbe4786efbe4786, 0xefbe4786efbe4786, 0xefbe4786efbe4786,
172 0x0fc19dc60fc19dc6, 0x0fc19dc60fc19dc6, 0x0fc19dc60fc19dc6, 0x0fc19dc60fc19dc6,
173 0x0fc19dc60fc19dc6, 0x0fc19dc60fc19dc6, 0x0fc19dc60fc19dc6, 0x0fc19dc60fc19dc6,
174 0x240ca1cc240ca1cc, 0x240ca1cc240ca1cc, 0x240ca1cc240ca1cc, 0x240ca1cc240ca1cc,
175 0x240ca1cc240ca1cc, 0x240ca1cc240ca1cc, 0x240ca1cc240ca1cc, 0x240ca1cc240ca1cc,
176 0x2de92c6f2de92c6f, 0x2de92c6f2de92c6f, 0x2de92c6f2de92c6f, 0x2de92c6f2de92c6f,
177 0x2de92c6f2de92c6f, 0x2de92c6f2de92c6f, 0x2de92c6f2de92c6f, 0x2de92c6f2de92c6f,
178 0x4a7484aa4a7484aa, 0x4a7484aa4a7484aa, 0x4a7484aa4a7484aa, 0x4a7484aa4a7484aa,
179 0x4a7484aa4a7484aa, 0x4a7484aa4a7484aa, 0x4a7484aa4a7484aa, 0x4a7484aa4a7484aa,
180 0x5cb0a9dc5cb0a9dc, 0x5cb0a9dc5cb0a9dc, 0x5cb0a9dc5cb0a9dc, 0x5cb0a9dc5cb0a9dc,
181 0x5cb0a9dc5cb0a9dc, 0x5cb0a9dc5cb0a9dc, 0x5cb0a9dc5cb0a9dc, 0x5cb0a9dc5cb0a9dc,
182 0x76f988da76f988da, 0x76f988da76f988da, 0x76f988da76f988da, 0x76f988da76f988da,
183 0x76f988da76f988da, 0x76f988da76f988da, 0x76f988da76f988da, 0x76f988da76f988da,
184 0x983e5152983e5152, 0x983e5152983e5152, 0x983e5152983e5152, 0x983e5152983e5152,
185 0x983e5152983e5152, 0x983e5152983e5152, 0x983e5152983e5152, 0x983e5152983e5152,
186 0xa831c66da831c66d, 0xa831c66da831c66d, 0xa831c66da831c66d, 0xa831c66da831c66d,
187 0xa831c66da831c66d, 0xa831c66da831c66d, 0xa831c66da831c66d, 0xa831c66da831c66d,
188 0xb00327c8b00327c8, 0xb00327c8b00327c8, 0xb00327c8b00327c8, 0xb00327c8b00327c8,
189 0xb00327c8b00327c8, 0xb00327c8b00327c8, 0xb00327c8b00327c8, 0xb00327c8b00327c8,
190 0xbf597fc7bf597fc7, 0xbf597fc7bf597fc7, 0xbf597fc7bf597fc7, 0xbf597fc7bf597fc7,
191 0xbf597fc7bf597fc7, 0xbf597fc7bf597fc7, 0xbf597fc7bf597fc7, 0xbf597fc7bf597fc7,
192 0xc6e00bf3c6e00bf3, 0xc6e00bf3c6e00bf3, 0xc6e00bf3c6e00bf3, 0xc6e00bf3c6e00bf3,
193 0xc6e00bf3c6e00bf3, 0xc6e00bf3c6e00bf3, 0xc6e00bf3c6e00bf3, 0xc6e00bf3c6e00bf3,
194 0xd5a79147d5a79147, 0xd5a79147d5a79147, 0xd5a79147d5a79147, 0xd5a79147d5a79147,
195 0xd5a79147d5a79147, 0xd5a79147d5a79147, 0xd5a79147d5a79147, 0xd5a79147d5a79147,
196 0x06ca635106ca6351, 0x06ca635106ca6351, 0x06ca635106ca6351, 0x06ca635106ca6351,
197 0x06ca635106ca6351, 0x06ca635106ca6351, 0x06ca635106ca6351, 0x06ca635106ca6351,
198 0x1429296714292967, 0x1429296714292967, 0x1429296714292967, 0x1429296714292967,
199 0x1429296714292967, 0x1429296714292967, 0x1429296714292967, 0x1429296714292967,
200 0x27b70a8527b70a85, 0x27b70a8527b70a85, 0x27b70a8527b70a85, 0x27b70a8527b70a85,
201 0x27b70a8527b70a85, 0x27b70a8527b70a85, 0x27b70a8527b70a85, 0x27b70a8527b70a85,
202 0x2e1b21382e1b2138, 0x2e1b21382e1b2138, 0x2e1b21382e1b2138, 0x2e1b21382e1b2138,
203 0x2e1b21382e1b2138, 0x2e1b21382e1b2138, 0x2e1b21382e1b2138, 0x2e1b21382e1b2138,
204 0x4d2c6dfc4d2c6dfc, 0x4d2c6dfc4d2c6dfc, 0x4d2c6dfc4d2c6dfc, 0x4d2c6dfc4d2c6dfc,
205 0x4d2c6dfc4d2c6dfc, 0x4d2c6dfc4d2c6dfc, 0x4d2c6dfc4d2c6dfc, 0x4d2c6dfc4d2c6dfc,
206 0x53380d1353380d13, 0x53380d1353380d13, 0x53380d1353380d13, 0x53380d1353380d13,
207 0x53380d1353380d13, 0x53380d1353380d13, 0x53380d1353380d13, 0x53380d1353380d13,
208 0x650a7354650a7354, 0x650a7354650a7354, 0x650a7354650a7354, 0x650a7354650a7354,
209 0x650a7354650a7354, 0x650a7354650a7354, 0x650a7354650a7354, 0x650a7354650a7354,
210 0x766a0abb766a0abb, 0x766a0abb766a0abb, 0x766a0abb766a0abb, 0x766a0abb766a0abb,
211 0x766a0abb766a0abb, 0x766a0abb766a0abb, 0x766a0abb766a0abb, 0x766a0abb766a0abb,
212 0x81c2c92e81c2c92e, 0x81c2c92e81c2c92e, 0x81c2c92e81c2c92e, 0x81c2c92e81c2c92e,
213 0x81c2c92e81c2c92e, 0x81c2c92e81c2c92e, 0x81c2c92e81c2c92e, 0x81c2c92e81c2c92e,
214 0x92722c8592722c85, 0x92722c8592722c85, 0x92722c8592722c85, 0x92722c8592722c85,
215 0x92722c8592722c85, 0x92722c8592722c85, 0x92722c8592722c85, 0x92722c8592722c85,
216 0xa2bfe8a1a2bfe8a1, 0xa2bfe8a1a2bfe8a1, 0xa2bfe8a1a2bfe8a1, 0xa2bfe8a1a2bfe8a1,
217 0xa2bfe8a1a2bfe8a1, 0xa2bfe8a1a2bfe8a1, 0xa2bfe8a1a2bfe8a1, 0xa2bfe8a1a2bfe8a1,
218 0xa81a664ba81a664b, 0xa81a664ba81a664b, 0xa81a664ba81a664b, 0xa81a664ba81a664b,
219 0xa81a664ba81a664b, 0xa81a664ba81a664b, 0xa81a664ba81a664b, 0xa81a664ba81a664b,
220 0xc24b8b70c24b8b70, 0xc24b8b70c24b8b70, 0xc24b8b70c24b8b70, 0xc24b8b70c24b8b70,
221 0xc24b8b70c24b8b70, 0xc24b8b70c24b8b70, 0xc24b8b70c24b8b70, 0xc24b8b70c24b8b70,
222 0xc76c51a3c76c51a3, 0xc76c51a3c76c51a3, 0xc76c51a3c76c51a3, 0xc76c51a3c76c51a3,
223 0xc76c51a3c76c51a3, 0xc76c51a3c76c51a3, 0xc76c51a3c76c51a3, 0xc76c51a3c76c51a3,
224 0xd192e819d192e819, 0xd192e819d192e819, 0xd192e819d192e819, 0xd192e819d192e819,
225 0xd192e819d192e819, 0xd192e819d192e819, 0xd192e819d192e819, 0xd192e819d192e819,
226 0xd6990624d6990624, 0xd6990624d6990624, 0xd6990624d6990624, 0xd6990624d6990624,
227 0xd6990624d6990624, 0xd6990624d6990624, 0xd6990624d6990624, 0xd6990624d6990624,
228 0xf40e3585f40e3585, 0xf40e3585f40e3585, 0xf40e3585f40e3585, 0xf40e3585f40e3585,
229 0xf40e3585f40e3585, 0xf40e3585f40e3585, 0xf40e3585f40e3585, 0xf40e3585f40e3585,
230 0x106aa070106aa070, 0x106aa070106aa070, 0x106aa070106aa070, 0x106aa070106aa070,
231 0x106aa070106aa070, 0x106aa070106aa070, 0x106aa070106aa070, 0x106aa070106aa070,
232 0x19a4c11619a4c116, 0x19a4c11619a4c116, 0x19a4c11619a4c116, 0x19a4c11619a4c116,
233 0x19a4c11619a4c116, 0x19a4c11619a4c116, 0x19a4c11619a4c116, 0x19a4c11619a4c116,
234 0x1e376c081e376c08, 0x1e376c081e376c08, 0x1e376c081e376c08, 0x1e376c081e376c08,
235 0x1e376c081e376c08, 0x1e376c081e376c08, 0x1e376c081e376c08, 0x1e376c081e376c08,
236 0x2748774c2748774c, 0x2748774c2748774c, 0x2748774c2748774c, 0x2748774c2748774c,
237 0x2748774c2748774c, 0x2748774c2748774c, 0x2748774c2748774c, 0x2748774c2748774c,
238 0x34b0bcb534b0bcb5, 0x34b0bcb534b0bcb5, 0x34b0bcb534b0bcb5, 0x34b0bcb534b0bcb5,
239 0x34b0bcb534b0bcb5, 0x34b0bcb534b0bcb5, 0x34b0bcb534b0bcb5, 0x34b0bcb534b0bcb5,
240 0x391c0cb3391c0cb3, 0x391c0cb3391c0cb3, 0x391c0cb3391c0cb3, 0x391c0cb3391c0cb3,
241 0x391c0cb3391c0cb3, 0x391c0cb3391c0cb3, 0x391c0cb3391c0cb3, 0x391c0cb3391c0cb3,
242 0x4ed8aa4a4ed8aa4a, 0x4ed8aa4a4ed8aa4a, 0x4ed8aa4a4ed8aa4a, 0x4ed8aa4a4ed8aa4a,
243 0x4ed8aa4a4ed8aa4a, 0x4ed8aa4a4ed8aa4a, 0x4ed8aa4a4ed8aa4a, 0x4ed8aa4a4ed8aa4a,
244 0x5b9cca4f5b9cca4f, 0x5b9cca4f5b9cca4f, 0x5b9cca4f5b9cca4f, 0x5b9cca4f5b9cca4f,
245 0x5b9cca4f5b9cca4f, 0x5b9cca4f5b9cca4f, 0x5b9cca4f5b9cca4f, 0x5b9cca4f5b9cca4f,
246 0x682e6ff3682e6ff3, 0x682e6ff3682e6ff3, 0x682e6ff3682e6ff3, 0x682e6ff3682e6ff3,
247 0x682e6ff3682e6ff3, 0x682e6ff3682e6ff3, 0x682e6ff3682e6ff3, 0x682e6ff3682e6ff3,
248 0x748f82ee748f82ee, 0x748f82ee748f82ee, 0x748f82ee748f82ee, 0x748f82ee748f82ee,
249 0x748f82ee748f82ee, 0x748f82ee748f82ee, 0x748f82ee748f82ee, 0x748f82ee748f82ee,
250 0x78a5636f78a5636f, 0x78a5636f78a5636f, 0x78a5636f78a5636f, 0x78a5636f78a5636f,
251 0x78a5636f78a5636f, 0x78a5636f78a5636f, 0x78a5636f78a5636f, 0x78a5636f78a5636f,
252 0x84c8781484c87814, 0x84c8781484c87814, 0x84c8781484c87814, 0x84c8781484c87814,
253 0x84c8781484c87814, 0x84c8781484c87814, 0x84c8781484c87814, 0x84c8781484c87814,
254 0x8cc702088cc70208, 0x8cc702088cc70208, 0x8cc702088cc70208, 0x8cc702088cc70208,
255 0x8cc702088cc70208, 0x8cc702088cc70208, 0x8cc702088cc70208, 0x8cc702088cc70208,
256 0x90befffa90befffa, 0x90befffa90befffa, 0x90befffa90befffa, 0x90befffa90befffa,
257 0x90befffa90befffa, 0x90befffa90befffa, 0x90befffa90befffa, 0x90befffa90befffa,
258 0xa4506ceba4506ceb, 0xa4506ceba4506ceb, 0xa4506ceba4506ceb, 0xa4506ceba4506ceb,
259 0xa4506ceba4506ceb, 0xa4506ceba4506ceb, 0xa4506ceba4506ceb, 0xa4506ceba4506ceb,
260 0xbef9a3f7bef9a3f7, 0xbef9a3f7bef9a3f7, 0xbef9a3f7bef9a3f7, 0xbef9a3f7bef9a3f7,
261 0xbef9a3f7bef9a3f7, 0xbef9a3f7bef9a3f7, 0xbef9a3f7bef9a3f7, 0xbef9a3f7bef9a3f7,
262 0xc67178f2c67178f2, 0xc67178f2c67178f2, 0xc67178f2c67178f2, 0xc67178f2c67178f2,
263 0xc67178f2c67178f2, 0xc67178f2c67178f2, 0xc67178f2c67178f2, 0xc67178f2c67178f2}
264
265// Interface function to assembly ode
266func blockAvx512(digests *[512]byte, input [16][]byte, mask []uint64) [16][Size]byte {
267
268 scratch := [512]byte{}
269 sha256X16Avx512(digests, &scratch, &table, mask, input)
270
271 output := [16][Size]byte{}
272 for i := 0; i < 16; i++ {
273 output[i] = getDigest(i, digests[:])
274 }
275
276 return output
277}
278
279func getDigest(index int, state []byte) (sum [Size]byte) {
280 for j := 0; j < 16; j += 2 {
281 for i := index*4 + j*Size; i < index*4+(j+1)*Size; i += Size {
282 binary.BigEndian.PutUint32(sum[j*2:], binary.LittleEndian.Uint32(state[i:i+4]))
283 }
284 }
285 return
286}
287
288// Message to send across input channel
289type blockInput struct {
290 uid uint64
291 msg []byte
292 reset bool
293 final bool
294 sumCh chan [Size]byte
295}
296
297// Avx512Server - Type to implement 16x parallel handling of SHA256 invocations
298type Avx512Server struct {
299 blocksCh chan blockInput // Input channel
300 totalIn int // Total number of inputs waiting to be processed
301 lanes [16]Avx512LaneInfo // Array with info per lane (out of 16)
302 digests map[uint64][Size]byte // Map of uids to (interim) digest results
303}
304
305// Avx512LaneInfo - Info for each lane
306type Avx512LaneInfo struct {
307 uid uint64 // unique identification for this SHA processing
308 block []byte // input block to be processed
309 outputCh chan [Size]byte // channel for output result
310}
311
312// NewAvx512Server - Create new object for parallel processing handling
313func NewAvx512Server() *Avx512Server {
314 a512srv := &Avx512Server{}
315 a512srv.digests = make(map[uint64][Size]byte)
316 a512srv.blocksCh = make(chan blockInput)
317
318 // Start a single thread for reading from the input channel
319 go a512srv.Process()
320 return a512srv
321}
322
323// Process - Sole handler for reading from the input channel
324func (a512srv *Avx512Server) Process() {
325 for {
326 select {
327 case block := <-a512srv.blocksCh:
328 if block.reset {
329 a512srv.reset(block.uid)
330 continue
331 }
332 index := block.uid & 0xf
333 // fmt.Println("Adding message:", block.uid, index)
334
335 if a512srv.lanes[index].block != nil { // If slot is already filled, process all inputs
336 //fmt.Println("Invoking Blocks()")
337 a512srv.blocks()
338 }
339 a512srv.totalIn++
340 a512srv.lanes[index] = Avx512LaneInfo{uid: block.uid, block: block.msg}
341 if block.final {
342 a512srv.lanes[index].outputCh = block.sumCh
343 }
344 if a512srv.totalIn == len(a512srv.lanes) {
345 // fmt.Println("Invoking Blocks() while FULL: ")
346 a512srv.blocks()
347 }
348
349 // TODO: test with larger timeout
350 case <-time.After(1 * time.Microsecond):
351 for _, lane := range a512srv.lanes {
352 if lane.block != nil { // check if there is any input to process
353 // fmt.Println("Invoking Blocks() on TIMEOUT: ")
354 a512srv.blocks()
355 break // we are done
356 }
357 }
358 }
359 }
360}
361
362// Do a reset for this calculation
363func (a512srv *Avx512Server) reset(uid uint64) {
364
365 // Check if there is a message still waiting to be processed (and remove if so)
366 for i, lane := range a512srv.lanes {
367 if lane.uid == uid {
368 if lane.block != nil {
369 a512srv.lanes[i] = Avx512LaneInfo{} // clear message
370 a512srv.totalIn--
371 }
372 }
373 }
374
375 // Delete entry from hash map
376 delete(a512srv.digests, uid)
377}
378
379// Invoke assembly and send results back
380func (a512srv *Avx512Server) blocks() {
381
382 inputs := [16][]byte{}
383 for i := range inputs {
384 inputs[i] = a512srv.lanes[i].block
385 }
386
387 mask := expandMask(genMask(inputs))
388 outputs := blockAvx512(a512srv.getDigests(), inputs, mask)
389
390 a512srv.totalIn = 0
391 for i := 0; i < len(outputs); i++ {
392 uid, outputCh := a512srv.lanes[i].uid, a512srv.lanes[i].outputCh
393 a512srv.digests[uid] = outputs[i]
394 a512srv.lanes[i] = Avx512LaneInfo{}
395
396 if outputCh != nil {
397 // Send back result
398 outputCh <- outputs[i]
399 delete(a512srv.digests, uid) // Delete entry from hashmap
400 }
401 }
402}
403
404func (a512srv *Avx512Server) Write(uid uint64, p []byte) (nn int, err error) {
405 a512srv.blocksCh <- blockInput{uid: uid, msg: p}
406 return len(p), nil
407}
408
409// Sum - return sha256 sum in bytes for a given sum id.
410func (a512srv *Avx512Server) Sum(uid uint64, p []byte) [32]byte {
411 sumCh := make(chan [32]byte)
412 a512srv.blocksCh <- blockInput{uid: uid, msg: p, final: true, sumCh: sumCh}
413 return <-sumCh
414}
415
416func (a512srv *Avx512Server) getDigests() *[512]byte {
417 digests := [512]byte{}
418 for i, lane := range a512srv.lanes {
419 a, ok := a512srv.digests[lane.uid]
420 if ok {
421 binary.BigEndian.PutUint32(digests[(i+0*16)*4:], binary.LittleEndian.Uint32(a[0:4]))
422 binary.BigEndian.PutUint32(digests[(i+1*16)*4:], binary.LittleEndian.Uint32(a[4:8]))
423 binary.BigEndian.PutUint32(digests[(i+2*16)*4:], binary.LittleEndian.Uint32(a[8:12]))
424 binary.BigEndian.PutUint32(digests[(i+3*16)*4:], binary.LittleEndian.Uint32(a[12:16]))
425 binary.BigEndian.PutUint32(digests[(i+4*16)*4:], binary.LittleEndian.Uint32(a[16:20]))
426 binary.BigEndian.PutUint32(digests[(i+5*16)*4:], binary.LittleEndian.Uint32(a[20:24]))
427 binary.BigEndian.PutUint32(digests[(i+6*16)*4:], binary.LittleEndian.Uint32(a[24:28]))
428 binary.BigEndian.PutUint32(digests[(i+7*16)*4:], binary.LittleEndian.Uint32(a[28:32]))
429 } else {
430 binary.LittleEndian.PutUint32(digests[(i+0*16)*4:], init0)
431 binary.LittleEndian.PutUint32(digests[(i+1*16)*4:], init1)
432 binary.LittleEndian.PutUint32(digests[(i+2*16)*4:], init2)
433 binary.LittleEndian.PutUint32(digests[(i+3*16)*4:], init3)
434 binary.LittleEndian.PutUint32(digests[(i+4*16)*4:], init4)
435 binary.LittleEndian.PutUint32(digests[(i+5*16)*4:], init5)
436 binary.LittleEndian.PutUint32(digests[(i+6*16)*4:], init6)
437 binary.LittleEndian.PutUint32(digests[(i+7*16)*4:], init7)
438 }
439 }
440 return &digests
441}
442
443// Helper struct for sorting blocks based on length
444type lane struct {
445 len uint
446 pos uint
447}
448
449type lanes []lane
450
451func (lns lanes) Len() int { return len(lns) }
452func (lns lanes) Swap(i, j int) { lns[i], lns[j] = lns[j], lns[i] }
453func (lns lanes) Less(i, j int) bool { return lns[i].len < lns[j].len }
454
455// Helper struct for
456type maskRounds struct {
457 mask uint64
458 rounds uint64
459}
460
461func genMask(input [16][]byte) [16]maskRounds {
462
463 // Sort on blocks length small to large
464 var sorted [16]lane
465 for c, inpt := range input {
466 sorted[c] = lane{uint(len(inpt)), uint(c)}
467 }
468 sort.Sort(lanes(sorted[:]))
469
470 // Create mask array including 'rounds' between masks
471 m, round, index := uint64(0xffff), uint64(0), 0
472 var mr [16]maskRounds
473 for _, s := range sorted {
474 if s.len > 0 {
475 if uint64(s.len)>>6 > round {
476 mr[index] = maskRounds{m, (uint64(s.len) >> 6) - round}
477 index++
478 }
479 round = uint64(s.len) >> 6
480 }
481 m = m & ^(1 << uint(s.pos))
482 }
483
484 return mr
485}
486
487// TODO: remove function
488func expandMask(mr [16]maskRounds) []uint64 {
489 size := uint64(0)
490 for _, r := range mr {
491 size += r.rounds
492 }
493 result, index := make([]uint64, size), 0
494 for _, r := range mr {
495 for j := uint64(0); j < r.rounds; j++ {
496 result[index] = r.mask
497 index++
498 }
499 }
500 return result
501}
diff --git a/vendor/github.com/minio/sha256-simd/sha256blockAvx512_amd64.s b/vendor/github.com/minio/sha256-simd/sha256blockAvx512_amd64.s
new file mode 100644
index 0000000..cca534e
--- /dev/null
+++ b/vendor/github.com/minio/sha256-simd/sha256blockAvx512_amd64.s
@@ -0,0 +1,267 @@
1//+build !noasm,!appengine,gc
2
3TEXT ·sha256X16Avx512(SB), 7, $0
4 MOVQ digests+0(FP), DI
5 MOVQ scratch+8(FP), R12
6 MOVQ mask_len+32(FP), SI
7 MOVQ mask_base+24(FP), R13
8 MOVQ (R13), R14
9 LONG $0x92fbc1c4; BYTE $0xce
10 LEAQ inputs+48(FP), AX
11 QUAD $0xf162076f487ef162; QUAD $0x7ef162014f6f487e; QUAD $0x487ef16202576f48; QUAD $0x6f487ef162035f6f; QUAD $0x6f6f487ef1620467; QUAD $0x06776f487ef16205; LONG $0x487ef162; WORD $0x7f6f; BYTE $0x07
12 MOVQ table+16(FP), DX
13 WORD $0x3148; BYTE $0xc9
14 TESTQ $(1<<0), R14
15 JE skipInput0
16 MOVQ 0*24(AX), R9
17 LONG $0x487cc162; WORD $0x0410; BYTE $0x09
18
19skipInput0:
20 TESTQ $(1<<1), R14
21 JE skipInput1
22 MOVQ 1*24(AX), R9
23 LONG $0x487cc162; WORD $0x0c10; BYTE $0x09
24
25skipInput1:
26 TESTQ $(1<<2), R14
27 JE skipInput2
28 MOVQ 2*24(AX), R9
29 LONG $0x487cc162; WORD $0x1410; BYTE $0x09
30
31skipInput2:
32 TESTQ $(1<<3), R14
33 JE skipInput3
34 MOVQ 3*24(AX), R9
35 LONG $0x487cc162; WORD $0x1c10; BYTE $0x09
36
37skipInput3:
38 TESTQ $(1<<4), R14
39 JE skipInput4
40 MOVQ 4*24(AX), R9
41 LONG $0x487cc162; WORD $0x2410; BYTE $0x09
42
43skipInput4:
44 TESTQ $(1<<5), R14
45 JE skipInput5
46 MOVQ 5*24(AX), R9
47 LONG $0x487cc162; WORD $0x2c10; BYTE $0x09
48
49skipInput5:
50 TESTQ $(1<<6), R14
51 JE skipInput6
52 MOVQ 6*24(AX), R9
53 LONG $0x487cc162; WORD $0x3410; BYTE $0x09
54
55skipInput6:
56 TESTQ $(1<<7), R14
57 JE skipInput7
58 MOVQ 7*24(AX), R9
59 LONG $0x487cc162; WORD $0x3c10; BYTE $0x09
60
61skipInput7:
62 TESTQ $(1<<8), R14
63 JE skipInput8
64 MOVQ 8*24(AX), R9
65 LONG $0x487c4162; WORD $0x0410; BYTE $0x09
66
67skipInput8:
68 TESTQ $(1<<9), R14
69 JE skipInput9
70 MOVQ 9*24(AX), R9
71 LONG $0x487c4162; WORD $0x0c10; BYTE $0x09
72
73skipInput9:
74 TESTQ $(1<<10), R14
75 JE skipInput10
76 MOVQ 10*24(AX), R9
77 LONG $0x487c4162; WORD $0x1410; BYTE $0x09
78
79skipInput10:
80 TESTQ $(1<<11), R14
81 JE skipInput11
82 MOVQ 11*24(AX), R9
83 LONG $0x487c4162; WORD $0x1c10; BYTE $0x09
84
85skipInput11:
86 TESTQ $(1<<12), R14
87 JE skipInput12
88 MOVQ 12*24(AX), R9
89 LONG $0x487c4162; WORD $0x2410; BYTE $0x09
90
91skipInput12:
92 TESTQ $(1<<13), R14
93 JE skipInput13
94 MOVQ 13*24(AX), R9
95 LONG $0x487c4162; WORD $0x2c10; BYTE $0x09
96
97skipInput13:
98 TESTQ $(1<<14), R14
99 JE skipInput14
100 MOVQ 14*24(AX), R9
101 LONG $0x487c4162; WORD $0x3410; BYTE $0x09
102
103skipInput14:
104 TESTQ $(1<<15), R14
105 JE skipInput15
106 MOVQ 15*24(AX), R9
107 LONG $0x487c4162; WORD $0x3c10; BYTE $0x09
108
109skipInput15:
110lloop:
111 LEAQ PSHUFFLE_BYTE_FLIP_MASK<>(SB), DX
112 LONG $0x487e7162; WORD $0x1a6f
113 MOVQ table+16(FP), DX
114 QUAD $0xd162226f487e7162; QUAD $0x7ed16224047f487e; QUAD $0x7ed16201244c7f48; QUAD $0x7ed1620224547f48; QUAD $0x7ed16203245c7f48; QUAD $0x7ed1620424647f48; QUAD $0x7ed16205246c7f48; QUAD $0x7ed1620624747f48; QUAD $0xc1834807247c7f48; QUAD $0x44c9c6407c316240; QUAD $0x62eec1c6407ca162; QUAD $0xa16244d3c6406c31; QUAD $0x34c162eed3c6406c; QUAD $0x407ca162dddac648; QUAD $0xc6407ca16288cac6; QUAD $0xcac648345162ddc2; QUAD $0x44d5c6405ca16288; QUAD $0x62eee5c6405ca162; QUAD $0xa16244d7c6404c31; QUAD $0x6cc162eef7c6404c; QUAD $0x405ca162ddfac640; QUAD $0xc6405ca16288eec6; QUAD $0xd2c6406cc162dde6; QUAD $0x44f1c6403c816288; QUAD $0x62eec1c6403c0162; QUAD $0x016244d3c6402c11; QUAD $0x4c4162eed3c6402c; QUAD $0x403c0162dddac640; QUAD $0xc6403c016288cac6; QUAD $0xf2c6404cc162ddc2; QUAD $0x44d5c6401c016288; QUAD $0x62eee5c6401c0162; QUAD $0x016244d7c6400c11; QUAD $0x2c4162eef7c6400c; QUAD $0x401c0162ddfac640; QUAD $0xc6401c016288eec6; QUAD $0xd2c6402c4162dde6; BYTE $0x88
115 LEAQ PSHUFFLE_TRANSPOSE16_MASK1<>(SB), BX
116 LEAQ PSHUFFLE_TRANSPOSE16_MASK2<>(SB), R8
117 QUAD $0x2262336f487e6162; QUAD $0x487e5162f27648b5; QUAD $0xd27648b53262106f; QUAD $0xa262136f487ee162; QUAD $0x487e5162d77640e5; QUAD $0xcf7640e53262086f; QUAD $0xa2621b6f487ee162; QUAD $0x487ec162dd7640f5; QUAD $0xfd7640f5a262386f; QUAD $0xa2620b6f487ee162; QUAD $0x487ec162cc7640fd; QUAD $0xec7640fda262286f; QUAD $0x8262036f487ee162; QUAD $0x487ec162c27640cd; QUAD $0xe27640cd8262206f; QUAD $0x8262336f487ee162; QUAD $0x487e4162f77640a5; QUAD $0xd77640a50262106f; QUAD $0x02621b6f487e6162; QUAD $0x487e4162dd7640b5; QUAD $0xfd7640b50262386f; QUAD $0x02620b6f487e6162; QUAD $0x487e4162cc7640bd; QUAD $0xec7640bd0262286f; QUAD $0x62eec023408d2362; QUAD $0x236244c023408da3; QUAD $0xada362eee42348ad; QUAD $0x40c5036244e42348; QUAD $0x2340c51362eef723; QUAD $0xfd2340d5036244d7; QUAD $0x44fd2340d58362ee; QUAD $0x62eeea2348b50362; QUAD $0x036244ea2348b583; QUAD $0xe51362eed32340e5; QUAD $0x40f5036244cb2340; QUAD $0x2340f58362eed923; QUAD $0xce2340ed236244d9; QUAD $0x44ce2340eda362ee; QUAD $0xc162d16f487ec162; QUAD $0x407dc262f26f487e; QUAD $0xcb004075c262c300; QUAD $0xc262d300406dc262; QUAD $0x405dc262db004065; QUAD $0xeb004055c262e300; QUAD $0xc262f300404dc262; QUAD $0x403d4262fb004045; QUAD $0xcb0040354262c300; QUAD $0x4262d300402d4262; QUAD $0x401d4262db004025; QUAD $0xeb0040154262e300; QUAD $0x4262f300400d4262; QUAD $0x48455162fb004005; QUAD $0xcc6f487e7162c4fe; QUAD $0x6206c472482df162; QUAD $0xf1620bc4724825f1; QUAD $0x55736219c472481d; QUAD $0x483d3162cace2548; QUAD $0xd42548255362c0fe; QUAD $0x62c1fe483d516296; QUAD $0x65d162c2fe483d51; QUAD $0x724845f162d8fe48; QUAD $0xc0724825f16202c0; QUAD $0x16c072481df1620d; QUAD $0x7362c86f487e7162; QUAD $0x25d362e8ca254875; QUAD $0x4845d16296fc2548; QUAD $0xf8fe4845d162f9fe; QUAD $0x6201626f487e7162; QUAD $0x916211c672481591; QUAD $0x05916213c672480d; QUAD $0x480d53620ad67248; QUAD $0xfe407dc16296ef25; QUAD $0x62c1fe407d8162c5; QUAD $0xb16207c1724815b1; QUAD $0x05b16212c172480d; QUAD $0x480d536203d17248; QUAD $0xfe407dc16296ef25; QUAD $0x62c4fe484d5162c5; QUAD $0x2df162cb6f487e71; QUAD $0x4825f16206c37248; QUAD $0x72481df1620bc372; QUAD $0xcd25485d736219c3; QUAD $0x62c1fe483d3162ca; QUAD $0x516296d425482553; QUAD $0x483d5162c1fe483d; QUAD $0xd0fe486dd162c2fe; QUAD $0x6202c772484df162; QUAD $0xf1620dc7724825f1; QUAD $0x7e716216c772481d; QUAD $0x25487d7362cf6f48; QUAD $0xf4254825d362e8c9; QUAD $0x62f1fe484dd16296; QUAD $0x7e7162f0fe484dd1; QUAD $0x4815916202626f48; QUAD $0x72480d916211c772; QUAD $0xd7724805916213c7; QUAD $0x96ef25480d53620a; QUAD $0x8162cdfe4075c162; QUAD $0x4815b162cafe4075; QUAD $0x72480db16207c272; QUAD $0xd2724805b16212c2; QUAD $0x96ef25480d536203; QUAD $0x5162cdfe4075c162; QUAD $0x487e7162c4fe4855; QUAD $0xc272482df162ca6f; QUAD $0x0bc2724825f16206; QUAD $0x6219c272481df162; QUAD $0x3162cacc25486573; QUAD $0x48255362c2fe483d; QUAD $0xfe483d516296d425; QUAD $0x62c2fe483d5162c1; QUAD $0x55f162c8fe4875d1; QUAD $0x4825f16202c67248; QUAD $0x72481df1620dc672; QUAD $0xce6f487e716216c6; QUAD $0x62e8c82548457362; QUAD $0xd16296ec254825d3; QUAD $0x4855d162e9fe4855; QUAD $0x626f487e7162e8fe; QUAD $0x11c0724815b16203; QUAD $0x6213c072480db162; QUAD $0x53620ad0724805b1; QUAD $0x6dc16296ef25480d; QUAD $0xfe406d8162d5fe40; QUAD $0x07c3724815b162d3; QUAD $0x6212c372480db162; QUAD $0x536203d3724805b1; QUAD $0x6dc16296ef25480d; QUAD $0xfe485d5162d5fe40; QUAD $0x62c96f487e7162c4; QUAD $0xf16206c172482df1; QUAD $0x1df1620bc1724825; QUAD $0x486d736219c17248; QUAD $0xfe483d3162cacb25; QUAD $0x96d42548255362c3; QUAD $0x5162c1fe483d5162; QUAD $0x487dd162c2fe483d; QUAD $0xc572485df162c0fe; QUAD $0x0dc5724825f16202; QUAD $0x6216c572481df162; QUAD $0x4d7362cd6f487e71; QUAD $0x4825d362e8cf2548; QUAD $0xfe485dd16296e425; QUAD $0x62e0fe485dd162e1; QUAD $0xb16204626f487e71; QUAD $0x0db16211c1724815; QUAD $0x4805b16213c17248; QUAD $0x25480d53620ad172; QUAD $0xddfe4065c16296ef; QUAD $0xb162dcfe40658162; QUAD $0x0db16207c4724815; QUAD $0x4805b16212c47248; QUAD $0x25480d536203d472; QUAD $0xddfe4065c16296ef; QUAD $0x7162c4fe48655162; QUAD $0x482df162c86f487e; QUAD $0x724825f16206c072; QUAD $0xc072481df1620bc0; QUAD $0xcaca254875736219; QUAD $0x5362c4fe483d3162; QUAD $0x3d516296d4254825; QUAD $0xfe483d5162c1fe48; QUAD $0x62f8fe4845d162c2; QUAD $0xf16202c4724865f1; QUAD $0x1df1620dc4724825; QUAD $0x487e716216c47248; QUAD $0xce2548557362cc6f; QUAD $0x96dc254825d362e8; QUAD $0xd162d9fe4865d162; QUAD $0x487e7162d8fe4865; QUAD $0x724815b16205626f; QUAD $0xc272480db16211c2; QUAD $0x0ad2724805b16213; QUAD $0x6296ef25480d5362; QUAD $0x5d8162e5fe405dc1; QUAD $0x724815b162e5fe40; QUAD $0xc572480db16207c5; QUAD $0x03d5724805b16212; QUAD $0x6296ef25480d5362; QUAD $0x6d5162e5fe405dc1; QUAD $0x6f487e7162c4fe48; QUAD $0x06c772482df162cf; QUAD $0x620bc7724825f162; QUAD $0x736219c772481df1; QUAD $0x3d3162cac925487d; QUAD $0x2548255362c5fe48; QUAD $0xc1fe483d516296d4; QUAD $0xd162c2fe483d5162; QUAD $0x486df162f0fe484d; QUAD $0x724825f16202c372; QUAD $0xc372481df1620dc3; QUAD $0x62cb6f487e716216; QUAD $0xd362e8cd25485d73; QUAD $0x6dd16296d4254825; QUAD $0xfe486dd162d1fe48; QUAD $0x06626f487e7162d0; QUAD $0x6211c3724815b162; QUAD $0xb16213c372480db1; QUAD $0x0d53620ad3724805; QUAD $0x4055c16296ef2548; QUAD $0xeefe40558162edfe; QUAD $0x6207c6724815b162; QUAD $0xb16212c672480db1; QUAD $0x0d536203d6724805; QUAD $0x4055c16296ef2548; QUAD $0xc4fe48755162edfe; QUAD $0xf162ce6f487e7162; QUAD $0x25f16206c672482d; QUAD $0x481df1620bc67248; QUAD $0x254845736219c672; QUAD $0xc6fe483d3162cac8; QUAD $0x6296d42548255362; QUAD $0x3d5162c1fe483d51; QUAD $0xfe4855d162c2fe48; QUAD $0x02c2724875f162e8; QUAD $0x620dc2724825f162; QUAD $0x716216c272481df1; QUAD $0x48657362ca6f487e; QUAD $0x254825d362e8cc25; QUAD $0xc9fe4875d16296cc; QUAD $0x7162c8fe4875d162; QUAD $0x15b16207626f487e; QUAD $0x480db16211c47248; QUAD $0x724805b16213c472; QUAD $0xef25480d53620ad4; QUAD $0x62f5fe404dc16296; QUAD $0x15b162f7fe404d81; QUAD $0x480db16207c77248; QUAD $0x724805b16212c772; QUAD $0xef25480d536203d7; QUAD $0x62f5fe404dc16296; QUAD $0x7e7162c4fe487d51; QUAD $0x72482df162cd6f48; QUAD $0xc5724825f16206c5; QUAD $0x19c572481df1620b; QUAD $0x62cacf25484d7362; QUAD $0x255362c7fe483d31; QUAD $0x483d516296d42548; QUAD $0xc2fe483d5162c1fe; QUAD $0xf162e0fe485dd162; QUAD $0x25f16202c172487d; QUAD $0x481df1620dc17248; QUAD $0x6f487e716216c172; QUAD $0xe8cb25486d7362c9; QUAD $0x6296c4254825d362; QUAD $0x7dd162c1fe487dd1; QUAD $0x6f487e7162c0fe48; QUAD $0xc5724815b1620862; QUAD $0x13c572480db16211; QUAD $0x620ad5724805b162; QUAD $0xc16296ef25480d53; QUAD $0x4045a162fdfe4045; QUAD $0xc07248159162f8fe; QUAD $0x12c072480d916207; QUAD $0x6203d07248059162; QUAD $0xc16296ef25480d53; QUAD $0x48455162fdfe4045; QUAD $0xcc6f487e7162c4fe; QUAD $0x6206c472482df162; QUAD $0xf1620bc4724825f1; QUAD $0x55736219c472481d; QUAD $0x483d1162cace2548; QUAD $0xd42548255362c0fe; QUAD $0x62c1fe483d516296; QUAD $0x65d162c2fe483d51; QUAD $0x724845f162d8fe48; QUAD $0xc0724825f16202c0; QUAD $0x16c072481df1620d; QUAD $0x7362c86f487e7162; QUAD $0x25d362e8ca254875; QUAD $0x4845d16296fc2548; QUAD $0xf8fe4845d162f9fe; QUAD $0x6209626f487e7162; QUAD $0xb16211c6724815b1; QUAD $0x05b16213c672480d; QUAD $0x480d53620ad67248; QUAD $0xfe403d416296ef25; QUAD $0x62c1fe403d2162c5; QUAD $0x916207c172481591; QUAD $0x05916212c172480d; QUAD $0x480d536203d17248; QUAD $0xfe403d416296ef25; QUAD $0x62c4fe484d5162c5; QUAD $0x2df162cb6f487e71; QUAD $0x4825f16206c37248; QUAD $0x72481df1620bc372; QUAD $0xcd25485d736219c3; QUAD $0x62c1fe483d1162ca; QUAD $0x516296d425482553; QUAD $0x483d5162c1fe483d; QUAD $0xd0fe486dd162c2fe; QUAD $0x6202c772484df162; QUAD $0xf1620dc7724825f1; QUAD $0x7e716216c772481d; QUAD $0x25487d7362cf6f48; QUAD $0xf4254825d362e8c9; QUAD $0x62f1fe484dd16296; QUAD $0x7e7162f0fe484dd1; QUAD $0x4815b1620a626f48; QUAD $0x72480db16211c772; QUAD $0xd7724805b16213c7; QUAD $0x96ef25480d53620a; QUAD $0x2162cdfe40354162; QUAD $0x48159162cafe4035; QUAD $0x72480d916207c272; QUAD $0xd2724805916212c2; QUAD $0x96ef25480d536203; QUAD $0x5162cdfe40354162; QUAD $0x487e7162c4fe4855; QUAD $0xc272482df162ca6f; QUAD $0x0bc2724825f16206; QUAD $0x6219c272481df162; QUAD $0x1162cacc25486573; QUAD $0x48255362c2fe483d; QUAD $0xfe483d516296d425; QUAD $0x62c2fe483d5162c1; QUAD $0x55f162c8fe4875d1; QUAD $0x4825f16202c67248; QUAD $0x72481df1620dc672; QUAD $0xce6f487e716216c6; QUAD $0x62e8c82548457362; QUAD $0xd16296ec254825d3; QUAD $0x4855d162e9fe4855; QUAD $0x626f487e7162e8fe; QUAD $0x11c072481591620b; QUAD $0x6213c072480d9162; QUAD $0x53620ad072480591; QUAD $0x2d416296ef25480d; QUAD $0xfe402d2162d5fe40; QUAD $0x07c37248159162d3; QUAD $0x6212c372480d9162; QUAD $0x536203d372480591; QUAD $0x2d416296ef25480d; QUAD $0xfe485d5162d5fe40; QUAD $0x62c96f487e7162c4; QUAD $0xf16206c172482df1; QUAD $0x1df1620bc1724825; QUAD $0x486d736219c17248; QUAD $0xfe483d1162cacb25; QUAD $0x96d42548255362c3; QUAD $0x5162c1fe483d5162; QUAD $0x487dd162c2fe483d; QUAD $0xc572485df162c0fe; QUAD $0x0dc5724825f16202; QUAD $0x6216c572481df162; QUAD $0x4d7362cd6f487e71; QUAD $0x4825d362e8cf2548; QUAD $0xfe485dd16296e425; QUAD $0x62e0fe485dd162e1; QUAD $0x91620c626f487e71; QUAD $0x0d916211c1724815; QUAD $0x4805916213c17248; QUAD $0x25480d53620ad172; QUAD $0xddfe4025416296ef; QUAD $0x9162dcfe40252162; QUAD $0x0d916207c4724815; QUAD $0x4805916212c47248; QUAD $0x25480d536203d472; QUAD $0xddfe4025416296ef; QUAD $0x7162c4fe48655162; QUAD $0x482df162c86f487e; QUAD $0x724825f16206c072; QUAD $0xc072481df1620bc0; QUAD $0xcaca254875736219; QUAD $0x5362c4fe483d1162; QUAD $0x3d516296d4254825; QUAD $0xfe483d5162c1fe48; QUAD $0x62f8fe4845d162c2; QUAD $0xf16202c4724865f1; QUAD $0x1df1620dc4724825; QUAD $0x487e716216c47248; QUAD $0xce2548557362cc6f; QUAD $0x96dc254825d362e8; QUAD $0xd162d9fe4865d162; QUAD $0x487e7162d8fe4865; QUAD $0x72481591620d626f; QUAD $0xc272480d916211c2; QUAD $0x0ad2724805916213; QUAD $0x6296ef25480d5362; QUAD $0x1d2162e5fe401d41; QUAD $0x7248159162e5fe40; QUAD $0xc572480d916207c5; QUAD $0x03d5724805916212; QUAD $0x6296ef25480d5362; QUAD $0x6d5162e5fe401d41; QUAD $0x6f487e7162c4fe48; QUAD $0x06c772482df162cf; QUAD $0x620bc7724825f162; QUAD $0x736219c772481df1; QUAD $0x3d1162cac925487d; QUAD $0x2548255362c5fe48; QUAD $0xc1fe483d516296d4; QUAD $0xd162c2fe483d5162; QUAD $0x486df162f0fe484d; QUAD $0x724825f16202c372; QUAD $0xc372481df1620dc3; QUAD $0x62cb6f487e716216; QUAD $0xd362e8cd25485d73; QUAD $0x6dd16296d4254825; QUAD $0xfe486dd162d1fe48; QUAD $0x0e626f487e7162d0; QUAD $0x6211c37248159162; QUAD $0x916213c372480d91; QUAD $0x0d53620ad3724805; QUAD $0x4015416296ef2548; QUAD $0xeefe40152162edfe; QUAD $0x6207c67248159162; QUAD $0x916212c672480d91; QUAD $0x0d536203d6724805; QUAD $0x4015416296ef2548; QUAD $0xc4fe48755162edfe; QUAD $0xf162ce6f487e7162; QUAD $0x25f16206c672482d; QUAD $0x481df1620bc67248; QUAD $0x254845736219c672; QUAD $0xc6fe483d1162cac8; QUAD $0x6296d42548255362; QUAD $0x3d5162c1fe483d51; QUAD $0xfe4855d162c2fe48; QUAD $0x02c2724875f162e8; QUAD $0x620dc2724825f162; QUAD $0x716216c272481df1; QUAD $0x48657362ca6f487e; QUAD $0x254825d362e8cc25; QUAD $0xc9fe4875d16296cc; QUAD $0x7162c8fe4875d162; QUAD $0x1591620f626f487e; QUAD $0x480d916211c47248; QUAD $0x724805916213c472; QUAD $0xef25480d53620ad4; QUAD $0x62f5fe400d416296; QUAD $0x159162f7fe400d21; QUAD $0x480d916207c77248; QUAD $0x724805916212c772; QUAD $0xef25480d536203d7; QUAD $0x62f5fe400d416296; QUAD $0x7e7162c4fe487d51; QUAD $0x72482df162cd6f48; QUAD $0xc5724825f16206c5; QUAD $0x19c572481df1620b; QUAD $0x62cacf25484d7362; QUAD $0x255362c7fe483d11; QUAD $0x483d516296d42548; QUAD $0xc2fe483d5162c1fe; QUAD $0xf162e0fe485dd162; QUAD $0x25f16202c172487d; QUAD $0x481df1620dc17248; QUAD $0x6f487e716216c172; QUAD $0xe8cb25486d7362c9; QUAD $0x6296c4254825d362; QUAD $0x7dd162c1fe487dd1; QUAD $0x6f487e7162c0fe48; QUAD $0xc572481591621062; QUAD $0x13c572480d916211; QUAD $0x620ad57248059162; QUAD $0x416296ef25480d53; QUAD $0x40050162fdfe4005; QUAD $0xc0724815b162f8fe; QUAD $0x12c072480db16207; QUAD $0x6203d0724805b162; QUAD $0x416296ef25480d53; QUAD $0x48455162fdfe4005; QUAD $0xcc6f487e7162c4fe; QUAD $0x6206c472482df162; QUAD $0xf1620bc4724825f1; QUAD $0x55736219c472481d; QUAD $0x483d3162cace2548; QUAD $0xd42548255362c0fe; QUAD $0x62c1fe483d516296; QUAD $0x65d162c2fe483d51; QUAD $0x724845f162d8fe48; QUAD $0xc0724825f16202c0; QUAD $0x16c072481df1620d; QUAD $0x7362c86f487e7162; QUAD $0x25d362e8ca254875; QUAD $0x4845d16296fc2548; QUAD $0xf8fe4845d162f9fe; QUAD $0x6211626f487e7162; QUAD $0x916211c672481591; QUAD $0x05916213c672480d; QUAD $0x480d53620ad67248; QUAD $0xfe407dc16296ef25; QUAD $0x62c1fe407d8162c5; QUAD $0xb16207c1724815b1; QUAD $0x05b16212c172480d; QUAD $0x480d536203d17248; QUAD $0xfe407dc16296ef25; QUAD $0x62c4fe484d5162c5; QUAD $0x2df162cb6f487e71; QUAD $0x4825f16206c37248; QUAD $0x72481df1620bc372; QUAD $0xcd25485d736219c3; QUAD $0x62c1fe483d3162ca; QUAD $0x516296d425482553; QUAD $0x483d5162c1fe483d; QUAD $0xd0fe486dd162c2fe; QUAD $0x6202c772484df162; QUAD $0xf1620dc7724825f1; QUAD $0x7e716216c772481d; QUAD $0x25487d7362cf6f48; QUAD $0xf4254825d362e8c9; QUAD $0x62f1fe484dd16296; QUAD $0x7e7162f0fe484dd1; QUAD $0x4815916212626f48; QUAD $0x72480d916211c772; QUAD $0xd7724805916213c7; QUAD $0x96ef25480d53620a; QUAD $0x8162cdfe4075c162; QUAD $0x4815b162cafe4075; QUAD $0x72480db16207c272; QUAD $0xd2724805b16212c2; QUAD $0x96ef25480d536203; QUAD $0x5162cdfe4075c162; QUAD $0x487e7162c4fe4855; QUAD $0xc272482df162ca6f; QUAD $0x0bc2724825f16206; QUAD $0x6219c272481df162; QUAD $0x3162cacc25486573; QUAD $0x48255362c2fe483d; QUAD $0xfe483d516296d425; QUAD $0x62c2fe483d5162c1; QUAD $0x55f162c8fe4875d1; QUAD $0x4825f16202c67248; QUAD $0x72481df1620dc672; QUAD $0xce6f487e716216c6; QUAD $0x62e8c82548457362; QUAD $0xd16296ec254825d3; QUAD $0x4855d162e9fe4855; QUAD $0x626f487e7162e8fe; QUAD $0x11c0724815b16213; QUAD $0x6213c072480db162; QUAD $0x53620ad0724805b1; QUAD $0x6dc16296ef25480d; QUAD $0xfe406d8162d5fe40; QUAD $0x07c3724815b162d3; QUAD $0x6212c372480db162; QUAD $0x536203d3724805b1; QUAD $0x6dc16296ef25480d; QUAD $0xfe485d5162d5fe40; QUAD $0x62c96f487e7162c4; QUAD $0xf16206c172482df1; QUAD $0x1df1620bc1724825; QUAD $0x486d736219c17248; QUAD $0xfe483d3162cacb25; QUAD $0x96d42548255362c3; QUAD $0x5162c1fe483d5162; QUAD $0x487dd162c2fe483d; QUAD $0xc572485df162c0fe; QUAD $0x0dc5724825f16202; QUAD $0x6216c572481df162; QUAD $0x4d7362cd6f487e71; QUAD $0x4825d362e8cf2548; QUAD $0xfe485dd16296e425; QUAD $0x62e0fe485dd162e1; QUAD $0xb16214626f487e71; QUAD $0x0db16211c1724815; QUAD $0x4805b16213c17248; QUAD $0x25480d53620ad172; QUAD $0xddfe4065c16296ef; QUAD $0xb162dcfe40658162; QUAD $0x0db16207c4724815; QUAD $0x4805b16212c47248; QUAD $0x25480d536203d472; QUAD $0xddfe4065c16296ef; QUAD $0x7162c4fe48655162; QUAD $0x482df162c86f487e; QUAD $0x724825f16206c072; QUAD $0xc072481df1620bc0; QUAD $0xcaca254875736219; QUAD $0x5362c4fe483d3162; QUAD $0x3d516296d4254825; QUAD $0xfe483d5162c1fe48; QUAD $0x62f8fe4845d162c2; QUAD $0xf16202c4724865f1; QUAD $0x1df1620dc4724825; QUAD $0x487e716216c47248; QUAD $0xce2548557362cc6f; QUAD $0x96dc254825d362e8; QUAD $0xd162d9fe4865d162; QUAD $0x487e7162d8fe4865; QUAD $0x724815b16215626f; QUAD $0xc272480db16211c2; QUAD $0x0ad2724805b16213; QUAD $0x6296ef25480d5362; QUAD $0x5d8162e5fe405dc1; QUAD $0x724815b162e5fe40; QUAD $0xc572480db16207c5; QUAD $0x03d5724805b16212; QUAD $0x6296ef25480d5362; QUAD $0x6d5162e5fe405dc1; QUAD $0x6f487e7162c4fe48; QUAD $0x06c772482df162cf; QUAD $0x620bc7724825f162; QUAD $0x736219c772481df1; QUAD $0x3d3162cac925487d; QUAD $0x2548255362c5fe48; QUAD $0xc1fe483d516296d4; QUAD $0xd162c2fe483d5162; QUAD $0x486df162f0fe484d; QUAD $0x724825f16202c372; QUAD $0xc372481df1620dc3; QUAD $0x62cb6f487e716216; QUAD $0xd362e8cd25485d73; QUAD $0x6dd16296d4254825; QUAD $0xfe486dd162d1fe48; QUAD $0x16626f487e7162d0; QUAD $0x6211c3724815b162; QUAD $0xb16213c372480db1; QUAD $0x0d53620ad3724805; QUAD $0x4055c16296ef2548; QUAD $0xeefe40558162edfe; QUAD $0x6207c6724815b162; QUAD $0xb16212c672480db1; QUAD $0x0d536203d6724805; QUAD $0x4055c16296ef2548; QUAD $0xc4fe48755162edfe; QUAD $0xf162ce6f487e7162; QUAD $0x25f16206c672482d; QUAD $0x481df1620bc67248; QUAD $0x254845736219c672; QUAD $0xc6fe483d3162cac8; QUAD $0x6296d42548255362; QUAD $0x3d5162c1fe483d51; QUAD $0xfe4855d162c2fe48; QUAD $0x02c2724875f162e8; QUAD $0x620dc2724825f162; QUAD $0x716216c272481df1; QUAD $0x48657362ca6f487e; QUAD $0x254825d362e8cc25; QUAD $0xc9fe4875d16296cc; QUAD $0x7162c8fe4875d162; QUAD $0x15b16217626f487e; QUAD $0x480db16211c47248; QUAD $0x724805b16213c472; QUAD $0xef25480d53620ad4; QUAD $0x62f5fe404dc16296; QUAD $0x15b162f7fe404d81; QUAD $0x480db16207c77248; QUAD $0x724805b16212c772; QUAD $0xef25480d536203d7; QUAD $0x62f5fe404dc16296; QUAD $0x7e7162c4fe487d51; QUAD $0x72482df162cd6f48; QUAD $0xc5724825f16206c5; QUAD $0x19c572481df1620b; QUAD $0x62cacf25484d7362; QUAD $0x255362c7fe483d31; QUAD $0x483d516296d42548; QUAD $0xc2fe483d5162c1fe; QUAD $0xf162e0fe485dd162; QUAD $0x25f16202c172487d; QUAD $0x481df1620dc17248; QUAD $0x6f487e716216c172; QUAD $0xe8cb25486d7362c9; QUAD $0x6296c4254825d362; QUAD $0x7dd162c1fe487dd1; QUAD $0x6f487e7162c0fe48; QUAD $0xc5724815b1621862; QUAD $0x13c572480db16211; QUAD $0x620ad5724805b162; QUAD $0xc16296ef25480d53; QUAD $0x4045a162fdfe4045; QUAD $0xc07248159162f8fe; QUAD $0x12c072480d916207; QUAD $0x6203d07248059162; QUAD $0xc16296ef25480d53; QUAD $0x48455162fdfe4045; QUAD $0xcc6f487e7162c4fe; QUAD $0x6206c472482df162; QUAD $0xf1620bc4724825f1; QUAD $0x55736219c472481d; QUAD $0x483d1162cace2548; QUAD $0xd42548255362c0fe; QUAD $0x62c1fe483d516296; QUAD $0x65d162c2fe483d51; QUAD $0x724845f162d8fe48; QUAD $0xc0724825f16202c0; QUAD $0x16c072481df1620d; QUAD $0x7362c86f487e7162; QUAD $0x25d362e8ca254875; QUAD $0x4845d16296fc2548; QUAD $0xf8fe4845d162f9fe; QUAD $0x6219626f487e7162; QUAD $0xb16211c6724815b1; QUAD $0x05b16213c672480d; QUAD $0x480d53620ad67248; QUAD $0xfe403d416296ef25; QUAD $0x62c1fe403d2162c5; QUAD $0x916207c172481591; QUAD $0x05916212c172480d; QUAD $0x480d536203d17248; QUAD $0xfe403d416296ef25; QUAD $0x62c4fe484d5162c5; QUAD $0x2df162cb6f487e71; QUAD $0x4825f16206c37248; QUAD $0x72481df1620bc372; QUAD $0xcd25485d736219c3; QUAD $0x62c1fe483d1162ca; QUAD $0x516296d425482553; QUAD $0x483d5162c1fe483d; QUAD $0xd0fe486dd162c2fe; QUAD $0x6202c772484df162; QUAD $0xf1620dc7724825f1; QUAD $0x7e716216c772481d; QUAD $0x25487d7362cf6f48; QUAD $0xf4254825d362e8c9; QUAD $0x62f1fe484dd16296; QUAD $0x7e7162f0fe484dd1; QUAD $0x4815b1621a626f48; QUAD $0x72480db16211c772; QUAD $0xd7724805b16213c7; QUAD $0x96ef25480d53620a; QUAD $0x2162cdfe40354162; QUAD $0x48159162cafe4035; QUAD $0x72480d916207c272; QUAD $0xd2724805916212c2; QUAD $0x96ef25480d536203; QUAD $0x5162cdfe40354162; QUAD $0x487e7162c4fe4855; QUAD $0xc272482df162ca6f; QUAD $0x0bc2724825f16206; QUAD $0x6219c272481df162; QUAD $0x1162cacc25486573; QUAD $0x48255362c2fe483d; QUAD $0xfe483d516296d425; QUAD $0x62c2fe483d5162c1; QUAD $0x55f162c8fe4875d1; QUAD $0x4825f16202c67248; QUAD $0x72481df1620dc672; QUAD $0xce6f487e716216c6; QUAD $0x62e8c82548457362; QUAD $0xd16296ec254825d3; QUAD $0x4855d162e9fe4855; QUAD $0x626f487e7162e8fe; QUAD $0x11c072481591621b; QUAD $0x6213c072480d9162; QUAD $0x53620ad072480591; QUAD $0x2d416296ef25480d; QUAD $0xfe402d2162d5fe40; QUAD $0x07c37248159162d3; QUAD $0x6212c372480d9162; QUAD $0x536203d372480591; QUAD $0x2d416296ef25480d; QUAD $0xfe485d5162d5fe40; QUAD $0x62c96f487e7162c4; QUAD $0xf16206c172482df1; QUAD $0x1df1620bc1724825; QUAD $0x486d736219c17248; QUAD $0xfe483d1162cacb25; QUAD $0x96d42548255362c3; QUAD $0x5162c1fe483d5162; QUAD $0x487dd162c2fe483d; QUAD $0xc572485df162c0fe; QUAD $0x0dc5724825f16202; QUAD $0x6216c572481df162; QUAD $0x4d7362cd6f487e71; QUAD $0x4825d362e8cf2548; QUAD $0xfe485dd16296e425; QUAD $0x62e0fe485dd162e1; QUAD $0x91621c626f487e71; QUAD $0x0d916211c1724815; QUAD $0x4805916213c17248; QUAD $0x25480d53620ad172; QUAD $0xddfe4025416296ef; QUAD $0x9162dcfe40252162; QUAD $0x0d916207c4724815; QUAD $0x4805916212c47248; QUAD $0x25480d536203d472; QUAD $0xddfe4025416296ef; QUAD $0x7162c4fe48655162; QUAD $0x482df162c86f487e; QUAD $0x724825f16206c072; QUAD $0xc072481df1620bc0; QUAD $0xcaca254875736219; QUAD $0x5362c4fe483d1162; QUAD $0x3d516296d4254825; QUAD $0xfe483d5162c1fe48; QUAD $0x62f8fe4845d162c2; QUAD $0xf16202c4724865f1; QUAD $0x1df1620dc4724825; QUAD $0x487e716216c47248; QUAD $0xce2548557362cc6f; QUAD $0x96dc254825d362e8; QUAD $0xd162d9fe4865d162; QUAD $0x487e7162d8fe4865; QUAD $0x72481591621d626f; QUAD $0xc272480d916211c2; QUAD $0x0ad2724805916213; QUAD $0x6296ef25480d5362; QUAD $0x1d2162e5fe401d41; QUAD $0x7248159162e5fe40; QUAD $0xc572480d916207c5; QUAD $0x03d5724805916212; QUAD $0x6296ef25480d5362; QUAD $0x6d5162e5fe401d41; QUAD $0x6f487e7162c4fe48; QUAD $0x06c772482df162cf; QUAD $0x620bc7724825f162; QUAD $0x736219c772481df1; QUAD $0x3d1162cac925487d; QUAD $0x2548255362c5fe48; QUAD $0xc1fe483d516296d4; QUAD $0xd162c2fe483d5162; QUAD $0x486df162f0fe484d; QUAD $0x724825f16202c372; QUAD $0xc372481df1620dc3; QUAD $0x62cb6f487e716216; QUAD $0xd362e8cd25485d73; QUAD $0x6dd16296d4254825; QUAD $0xfe486dd162d1fe48; QUAD $0x1e626f487e7162d0; QUAD $0x6211c37248159162; QUAD $0x916213c372480d91; QUAD $0x0d53620ad3724805; QUAD $0x4015416296ef2548; QUAD $0xeefe40152162edfe; QUAD $0x6207c67248159162; QUAD $0x916212c672480d91; QUAD $0x0d536203d6724805; QUAD $0x4015416296ef2548; QUAD $0xc4fe48755162edfe; QUAD $0xf162ce6f487e7162; QUAD $0x25f16206c672482d; QUAD $0x481df1620bc67248; QUAD $0x254845736219c672; QUAD $0xc6fe483d1162cac8; QUAD $0x6296d42548255362; QUAD $0x3d5162c1fe483d51; QUAD $0xfe4855d162c2fe48; QUAD $0x02c2724875f162e8; QUAD $0x620dc2724825f162; QUAD $0x716216c272481df1; QUAD $0x48657362ca6f487e; QUAD $0x254825d362e8cc25; QUAD $0xc9fe4875d16296cc; QUAD $0x7162c8fe4875d162; QUAD $0x1591621f626f487e; QUAD $0x480d916211c47248; QUAD $0x724805916213c472; QUAD $0xef25480d53620ad4; QUAD $0x62f5fe400d416296; QUAD $0x159162f7fe400d21; QUAD $0x480d916207c77248; QUAD $0x724805916212c772; QUAD $0xef25480d536203d7; QUAD $0x62f5fe400d416296; QUAD $0x7e7162c4fe487d51; QUAD $0x72482df162cd6f48; QUAD $0xc5724825f16206c5; QUAD $0x19c572481df1620b; QUAD $0x62cacf25484d7362; QUAD $0x255362c7fe483d11; QUAD $0x483d516296d42548; QUAD $0xc2fe483d5162c1fe; QUAD $0xf162e0fe485dd162; QUAD $0x25f16202c172487d; QUAD $0x481df1620dc17248; QUAD $0x6f487e716216c172; QUAD $0xe8cb25486d7362c9; QUAD $0x6296c4254825d362; QUAD $0x7dd162c1fe487dd1; QUAD $0x6f487e7162c0fe48; QUAD $0xc572481591622062; QUAD $0x13c572480d916211; QUAD $0x620ad57248059162; QUAD $0x416296ef25480d53; QUAD $0x40050162fdfe4005; QUAD $0xc0724815b162f8fe; QUAD $0x12c072480db16207; QUAD $0x6203d0724805b162; QUAD $0x416296ef25480d53; QUAD $0x48455162fdfe4005; QUAD $0xcc6f487e7162c4fe; QUAD $0x6206c472482df162; QUAD $0xf1620bc4724825f1; QUAD $0x55736219c472481d; QUAD $0x483d3162cace2548; QUAD $0xd42548255362c0fe; QUAD $0x62c1fe483d516296; QUAD $0x65d162c2fe483d51; QUAD $0x724845f162d8fe48; QUAD $0xc0724825f16202c0; QUAD $0x16c072481df1620d; QUAD $0x7362c86f487e7162; QUAD $0x25d362e8ca254875; QUAD $0x4845d16296fc2548; QUAD $0xf8fe4845d162f9fe; QUAD $0x6221626f487e7162; QUAD $0x916211c672481591; QUAD $0x05916213c672480d; QUAD $0x480d53620ad67248; QUAD $0xfe407dc16296ef25; QUAD $0x62c1fe407d8162c5; QUAD $0xb16207c1724815b1; QUAD $0x05b16212c172480d; QUAD $0x480d536203d17248; QUAD $0xfe407dc16296ef25; QUAD $0x62c4fe484d5162c5; QUAD $0x2df162cb6f487e71; QUAD $0x4825f16206c37248; QUAD $0x72481df1620bc372; QUAD $0xcd25485d736219c3; QUAD $0x62c1fe483d3162ca; QUAD $0x516296d425482553; QUAD $0x483d5162c1fe483d; QUAD $0xd0fe486dd162c2fe; QUAD $0x6202c772484df162; QUAD $0xf1620dc7724825f1; QUAD $0x7e716216c772481d; QUAD $0x25487d7362cf6f48; QUAD $0xf4254825d362e8c9; QUAD $0x62f1fe484dd16296; QUAD $0x7e7162f0fe484dd1; QUAD $0x4815916222626f48; QUAD $0x72480d916211c772; QUAD $0xd7724805916213c7; QUAD $0x96ef25480d53620a; QUAD $0x8162cdfe4075c162; QUAD $0x4815b162cafe4075; QUAD $0x72480db16207c272; QUAD $0xd2724805b16212c2; QUAD $0x96ef25480d536203; QUAD $0x5162cdfe4075c162; QUAD $0x487e7162c4fe4855; QUAD $0xc272482df162ca6f; QUAD $0x0bc2724825f16206; QUAD $0x6219c272481df162; QUAD $0x3162cacc25486573; QUAD $0x48255362c2fe483d; QUAD $0xfe483d516296d425; QUAD $0x62c2fe483d5162c1; QUAD $0x55f162c8fe4875d1; QUAD $0x4825f16202c67248; QUAD $0x72481df1620dc672; QUAD $0xce6f487e716216c6; QUAD $0x62e8c82548457362; QUAD $0xd16296ec254825d3; QUAD $0x4855d162e9fe4855; QUAD $0x626f487e7162e8fe; QUAD $0x11c0724815b16223; QUAD $0x6213c072480db162; QUAD $0x53620ad0724805b1; QUAD $0x6dc16296ef25480d; QUAD $0xfe406d8162d5fe40; QUAD $0x07c3724815b162d3; QUAD $0x6212c372480db162; QUAD $0x536203d3724805b1; QUAD $0x6dc16296ef25480d; QUAD $0xfe485d5162d5fe40; QUAD $0x62c96f487e7162c4; QUAD $0xf16206c172482df1; QUAD $0x1df1620bc1724825; QUAD $0x486d736219c17248; QUAD $0xfe483d3162cacb25; QUAD $0x96d42548255362c3; QUAD $0x5162c1fe483d5162; QUAD $0x487dd162c2fe483d; QUAD $0xc572485df162c0fe; QUAD $0x0dc5724825f16202; QUAD $0x6216c572481df162; QUAD $0x4d7362cd6f487e71; QUAD $0x4825d362e8cf2548; QUAD $0xfe485dd16296e425; QUAD $0x62e0fe485dd162e1; QUAD $0xb16224626f487e71; QUAD $0x0db16211c1724815; QUAD $0x4805b16213c17248; QUAD $0x25480d53620ad172; QUAD $0xddfe4065c16296ef; QUAD $0xb162dcfe40658162; QUAD $0x0db16207c4724815; QUAD $0x4805b16212c47248; QUAD $0x25480d536203d472; QUAD $0xddfe4065c16296ef; QUAD $0x7162c4fe48655162; QUAD $0x482df162c86f487e; QUAD $0x724825f16206c072; QUAD $0xc072481df1620bc0; QUAD $0xcaca254875736219; QUAD $0x5362c4fe483d3162; QUAD $0x3d516296d4254825; QUAD $0xfe483d5162c1fe48; QUAD $0x62f8fe4845d162c2; QUAD $0xf16202c4724865f1; QUAD $0x1df1620dc4724825; QUAD $0x487e716216c47248; QUAD $0xce2548557362cc6f; QUAD $0x96dc254825d362e8; QUAD $0xd162d9fe4865d162; QUAD $0x487e7162d8fe4865; QUAD $0x724815b16225626f; QUAD $0xc272480db16211c2; QUAD $0x0ad2724805b16213; QUAD $0x6296ef25480d5362; QUAD $0x5d8162e5fe405dc1; QUAD $0x724815b162e5fe40; QUAD $0xc572480db16207c5; QUAD $0x03d5724805b16212; QUAD $0x6296ef25480d5362; QUAD $0x6d5162e5fe405dc1; QUAD $0x6f487e7162c4fe48; QUAD $0x06c772482df162cf; QUAD $0x620bc7724825f162; QUAD $0x736219c772481df1; QUAD $0x3d3162cac925487d; QUAD $0x2548255362c5fe48; QUAD $0xc1fe483d516296d4; QUAD $0xd162c2fe483d5162; QUAD $0x486df162f0fe484d; QUAD $0x724825f16202c372; QUAD $0xc372481df1620dc3; QUAD $0x62cb6f487e716216; QUAD $0xd362e8cd25485d73; QUAD $0x6dd16296d4254825; QUAD $0xfe486dd162d1fe48; QUAD $0x26626f487e7162d0; QUAD $0x6211c3724815b162; QUAD $0xb16213c372480db1; QUAD $0x0d53620ad3724805; QUAD $0x4055c16296ef2548; QUAD $0xeefe40558162edfe; QUAD $0x6207c6724815b162; QUAD $0xb16212c672480db1; QUAD $0x0d536203d6724805; QUAD $0x4055c16296ef2548; QUAD $0xc4fe48755162edfe; QUAD $0xf162ce6f487e7162; QUAD $0x25f16206c672482d; QUAD $0x481df1620bc67248; QUAD $0x254845736219c672; QUAD $0xc6fe483d3162cac8; QUAD $0x6296d42548255362; QUAD $0x3d5162c1fe483d51; QUAD $0xfe4855d162c2fe48; QUAD $0x02c2724875f162e8; QUAD $0x620dc2724825f162; QUAD $0x716216c272481df1; QUAD $0x48657362ca6f487e; QUAD $0x254825d362e8cc25; QUAD $0xc9fe4875d16296cc; QUAD $0x7162c8fe4875d162; QUAD $0x15b16227626f487e; QUAD $0x480db16211c47248; QUAD $0x724805b16213c472; QUAD $0xef25480d53620ad4; QUAD $0x62f5fe404dc16296; QUAD $0x15b162f7fe404d81; QUAD $0x480db16207c77248; QUAD $0x724805b16212c772; QUAD $0xef25480d536203d7; QUAD $0x62f5fe404dc16296; QUAD $0x7e7162c4fe487d51; QUAD $0x72482df162cd6f48; QUAD $0xc5724825f16206c5; QUAD $0x19c572481df1620b; QUAD $0x62cacf25484d7362; QUAD $0x255362c7fe483d31; QUAD $0x483d516296d42548; QUAD $0xc2fe483d5162c1fe; QUAD $0xf162e0fe485dd162; QUAD $0x25f16202c172487d; QUAD $0x481df1620dc17248; QUAD $0x6f487e716216c172; QUAD $0xe8cb25486d7362c9; QUAD $0x6296c4254825d362; QUAD $0x7dd162c1fe487dd1; QUAD $0x6f487e7162c0fe48; QUAD $0xc5724815b1622862; QUAD $0x13c572480db16211; QUAD $0x620ad5724805b162; QUAD $0xc16296ef25480d53; QUAD $0x4045a162fdfe4045; QUAD $0xc07248159162f8fe; QUAD $0x12c072480d916207; QUAD $0x6203d07248059162; QUAD $0xc16296ef25480d53; QUAD $0x48455162fdfe4045; QUAD $0xcc6f487e7162c4fe; QUAD $0x6206c472482df162; QUAD $0xf1620bc4724825f1; QUAD $0x55736219c472481d; QUAD $0x483d1162cace2548; QUAD $0xd42548255362c0fe; QUAD $0x62c1fe483d516296; QUAD $0x65d162c2fe483d51; QUAD $0x724845f162d8fe48; QUAD $0xc0724825f16202c0; QUAD $0x16c072481df1620d; QUAD $0x7362c86f487e7162; QUAD $0x25d362e8ca254875; QUAD $0x4845d16296fc2548; QUAD $0xf8fe4845d162f9fe; QUAD $0x6229626f487e7162; QUAD $0xb16211c6724815b1; QUAD $0x05b16213c672480d; QUAD $0x480d53620ad67248; QUAD $0xfe403d416296ef25; QUAD $0x62c1fe403d2162c5; QUAD $0x916207c172481591; QUAD $0x05916212c172480d; QUAD $0x480d536203d17248; QUAD $0xfe403d416296ef25; QUAD $0x62c4fe484d5162c5; QUAD $0x2df162cb6f487e71; QUAD $0x4825f16206c37248; QUAD $0x72481df1620bc372; QUAD $0xcd25485d736219c3; QUAD $0x62c1fe483d1162ca; QUAD $0x516296d425482553; QUAD $0x483d5162c1fe483d; QUAD $0xd0fe486dd162c2fe; QUAD $0x6202c772484df162; QUAD $0xf1620dc7724825f1; QUAD $0x7e716216c772481d; QUAD $0x25487d7362cf6f48; QUAD $0xf4254825d362e8c9; QUAD $0x62f1fe484dd16296; QUAD $0x7e7162f0fe484dd1; QUAD $0x4815b1622a626f48; QUAD $0x72480db16211c772; QUAD $0xd7724805b16213c7; QUAD $0x96ef25480d53620a; QUAD $0x2162cdfe40354162; QUAD $0x48159162cafe4035; QUAD $0x72480d916207c272; QUAD $0xd2724805916212c2; QUAD $0x96ef25480d536203; QUAD $0x5162cdfe40354162; QUAD $0x487e7162c4fe4855; QUAD $0xc272482df162ca6f; QUAD $0x0bc2724825f16206; QUAD $0x6219c272481df162; QUAD $0x1162cacc25486573; QUAD $0x48255362c2fe483d; QUAD $0xfe483d516296d425; QUAD $0x62c2fe483d5162c1; QUAD $0x55f162c8fe4875d1; QUAD $0x4825f16202c67248; QUAD $0x72481df1620dc672; QUAD $0xce6f487e716216c6; QUAD $0x62e8c82548457362; QUAD $0xd16296ec254825d3; QUAD $0x4855d162e9fe4855; QUAD $0x626f487e7162e8fe; QUAD $0x11c072481591622b; QUAD $0x6213c072480d9162; QUAD $0x53620ad072480591; QUAD $0x2d416296ef25480d; QUAD $0xfe402d2162d5fe40; QUAD $0x07c37248159162d3; QUAD $0x6212c372480d9162; QUAD $0x536203d372480591; QUAD $0x2d416296ef25480d; QUAD $0xfe485d5162d5fe40; QUAD $0x62c96f487e7162c4; QUAD $0xf16206c172482df1; QUAD $0x1df1620bc1724825; QUAD $0x486d736219c17248; QUAD $0xfe483d1162cacb25; QUAD $0x96d42548255362c3; QUAD $0x5162c1fe483d5162; QUAD $0x487dd162c2fe483d; QUAD $0xc572485df162c0fe; QUAD $0x0dc5724825f16202; QUAD $0x6216c572481df162; QUAD $0x4d7362cd6f487e71; QUAD $0x4825d362e8cf2548; QUAD $0xfe485dd16296e425; QUAD $0x62e0fe485dd162e1; QUAD $0x91622c626f487e71; QUAD $0x0d916211c1724815; QUAD $0x4805916213c17248; QUAD $0x25480d53620ad172; QUAD $0xddfe4025416296ef; QUAD $0x9162dcfe40252162; QUAD $0x0d916207c4724815; QUAD $0x4805916212c47248; QUAD $0x25480d536203d472; QUAD $0xddfe4025416296ef; QUAD $0x7162c4fe48655162; QUAD $0x482df162c86f487e; QUAD $0x724825f16206c072; QUAD $0xc072481df1620bc0; QUAD $0xcaca254875736219; QUAD $0x5362c4fe483d1162; QUAD $0x3d516296d4254825; QUAD $0xfe483d5162c1fe48; QUAD $0x62f8fe4845d162c2; QUAD $0xf16202c4724865f1; QUAD $0x1df1620dc4724825; QUAD $0x487e716216c47248; QUAD $0xce2548557362cc6f; QUAD $0x96dc254825d362e8; QUAD $0xd162d9fe4865d162; QUAD $0x487e7162d8fe4865; QUAD $0x72481591622d626f; QUAD $0xc272480d916211c2; QUAD $0x0ad2724805916213; QUAD $0x6296ef25480d5362; QUAD $0x1d2162e5fe401d41; QUAD $0x7248159162e5fe40; QUAD $0xc572480d916207c5; QUAD $0x03d5724805916212; QUAD $0x6296ef25480d5362; QUAD $0x6d5162e5fe401d41; QUAD $0x6f487e7162c4fe48; QUAD $0x06c772482df162cf; QUAD $0x620bc7724825f162; QUAD $0x736219c772481df1; QUAD $0x3d1162cac925487d; QUAD $0x2548255362c5fe48; QUAD $0xc1fe483d516296d4; QUAD $0xd162c2fe483d5162; QUAD $0x486df162f0fe484d; QUAD $0x724825f16202c372; QUAD $0xc372481df1620dc3; QUAD $0x62cb6f487e716216; QUAD $0xd362e8cd25485d73; QUAD $0x6dd16296d4254825; QUAD $0xfe486dd162d1fe48; QUAD $0x2e626f487e7162d0; QUAD $0x6211c37248159162; QUAD $0x916213c372480d91; QUAD $0x0d53620ad3724805; QUAD $0x4015416296ef2548; QUAD $0xeefe40152162edfe; QUAD $0x6207c67248159162; QUAD $0x916212c672480d91; QUAD $0x0d536203d6724805; QUAD $0x4015416296ef2548; QUAD $0xc4fe48755162edfe; QUAD $0xf162ce6f487e7162; QUAD $0x25f16206c672482d; QUAD $0x481df1620bc67248; QUAD $0x254845736219c672; QUAD $0xc6fe483d1162cac8; QUAD $0x6296d42548255362; QUAD $0x3d5162c1fe483d51; QUAD $0xfe4855d162c2fe48; QUAD $0x02c2724875f162e8; QUAD $0x620dc2724825f162; QUAD $0x716216c272481df1; QUAD $0x48657362ca6f487e; QUAD $0x254825d362e8cc25; QUAD $0xc9fe4875d16296cc; QUAD $0x7162c8fe4875d162; QUAD $0x1591622f626f487e; QUAD $0x480d916211c47248; QUAD $0x724805916213c472; QUAD $0xef25480d53620ad4; QUAD $0x62f5fe400d416296; QUAD $0x159162f7fe400d21; QUAD $0x480d916207c77248; QUAD $0x724805916212c772; QUAD $0xef25480d536203d7; QUAD $0x62f5fe400d416296; QUAD $0x7e7162c4fe487d51; QUAD $0x72482df162cd6f48; QUAD $0xc5724825f16206c5; QUAD $0x19c572481df1620b; QUAD $0x62cacf25484d7362; QUAD $0x255362c7fe483d11; QUAD $0x483d516296d42548; QUAD $0xc2fe483d5162c1fe; QUAD $0xf162e0fe485dd162; QUAD $0x25f16202c172487d; QUAD $0x481df1620dc17248; QUAD $0x6f487e716216c172; QUAD $0xe8cb25486d7362c9; QUAD $0x6296c4254825d362; QUAD $0x7dd162c1fe487dd1; QUAD $0x6f487e7162c0fe48; QUAD $0xc572481591623062; QUAD $0x13c572480d916211; QUAD $0x620ad57248059162; QUAD $0x416296ef25480d53; QUAD $0x40050162fdfe4005; QUAD $0xc0724815b162f8fe; QUAD $0x12c072480db16207; QUAD $0x6203d0724805b162; QUAD $0x416296ef25480d53; QUAD $0x01ee8348fdfe4005
118 JE lastLoop
119 ADDQ $8, R13
120 MOVQ (R13), R14
121 QUAD $0x7162c4fe48455162; QUAD $0x482df162cc6f487e; QUAD $0x724825f16206c472; QUAD $0xc472481df1620bc4; QUAD $0xcace254855736219; QUAD $0x5362c0fe483d3162; QUAD $0x3d516296d4254825; QUAD $0xfe483d5162c1fe48; QUAD $0x62d8fe4865d162c2; QUAD $0xf16202c0724845f1; QUAD $0x1df1620dc0724825; QUAD $0x487e716216c07248; QUAD $0xca2548757362c86f; QUAD $0x96fc254825d362e8; QUAD $0xd162f9fe4845d162; QUAD $0x487e7162f8fe4845; WORD $0x626f; BYTE $0x31
122 TESTQ $(1<<0), R14
123 JE skipNext0
124 MOVQ 0*24(AX), R9
125 LONG $0x487cc162; WORD $0x0410; BYTE $0x09
126
127skipNext0:
128 QUAD $0x7162c4fe484d5162; QUAD $0x482df162cb6f487e; QUAD $0x724825f16206c372; QUAD $0xc372481df1620bc3; QUAD $0xcacd25485d736219; QUAD $0x5362c1fe483d3162; QUAD $0x3d516296d4254825; QUAD $0xfe483d5162c1fe48; QUAD $0x62d0fe486dd162c2; QUAD $0xf16202c772484df1; QUAD $0x1df1620dc7724825; QUAD $0x487e716216c77248; QUAD $0xc925487d7362cf6f; QUAD $0x96f4254825d362e8; QUAD $0xd162f1fe484dd162; QUAD $0x487e7162f0fe484d; WORD $0x626f; BYTE $0x32
129 TESTQ $(1<<1), R14
130 JE skipNext1
131 MOVQ 1*24(AX), R9
132 LONG $0x487cc162; WORD $0x0c10; BYTE $0x09
133
134skipNext1:
135 QUAD $0x7162c4fe48555162; QUAD $0x482df162ca6f487e; QUAD $0x724825f16206c272; QUAD $0xc272481df1620bc2; QUAD $0xcacc254865736219; QUAD $0x5362c2fe483d3162; QUAD $0x3d516296d4254825; QUAD $0xfe483d5162c1fe48; QUAD $0x62c8fe4875d162c2; QUAD $0xf16202c6724855f1; QUAD $0x1df1620dc6724825; QUAD $0x487e716216c67248; QUAD $0xc82548457362ce6f; QUAD $0x96ec254825d362e8; QUAD $0xd162e9fe4855d162; QUAD $0x487e7162e8fe4855; WORD $0x626f; BYTE $0x33
136 TESTQ $(1<<2), R14
137 JE skipNext2
138 MOVQ 2*24(AX), R9
139 LONG $0x487cc162; WORD $0x1410; BYTE $0x09
140
141skipNext2:
142 QUAD $0x7162c4fe485d5162; QUAD $0x482df162c96f487e; QUAD $0x724825f16206c172; QUAD $0xc172481df1620bc1; QUAD $0xcacb25486d736219; QUAD $0x5362c3fe483d3162; QUAD $0x3d516296d4254825; QUAD $0xfe483d5162c1fe48; QUAD $0x62c0fe487dd162c2; QUAD $0xf16202c572485df1; QUAD $0x1df1620dc5724825; QUAD $0x487e716216c57248; QUAD $0xcf25484d7362cd6f; QUAD $0x96e4254825d362e8; QUAD $0xd162e1fe485dd162; QUAD $0x487e7162e0fe485d; WORD $0x626f; BYTE $0x34
143 TESTQ $(1<<3), R14
144 JE skipNext3
145 MOVQ 3*24(AX), R9
146 LONG $0x487cc162; WORD $0x1c10; BYTE $0x09
147
148skipNext3:
149 QUAD $0x7162c4fe48655162; QUAD $0x482df162c86f487e; QUAD $0x724825f16206c072; QUAD $0xc072481df1620bc0; QUAD $0xcaca254875736219; QUAD $0x5362c4fe483d3162; QUAD $0x3d516296d4254825; QUAD $0xfe483d5162c1fe48; QUAD $0x62f8fe4845d162c2; QUAD $0xf16202c4724865f1; QUAD $0x1df1620dc4724825; QUAD $0x487e716216c47248; QUAD $0xce2548557362cc6f; QUAD $0x96dc254825d362e8; QUAD $0xd162d9fe4865d162; QUAD $0x487e7162d8fe4865; WORD $0x626f; BYTE $0x35
150 TESTQ $(1<<4), R14
151 JE skipNext4
152 MOVQ 4*24(AX), R9
153 LONG $0x487cc162; WORD $0x2410; BYTE $0x09
154
155skipNext4:
156 QUAD $0x7162c4fe486d5162; QUAD $0x482df162cf6f487e; QUAD $0x724825f16206c772; QUAD $0xc772481df1620bc7; QUAD $0xcac925487d736219; QUAD $0x5362c5fe483d3162; QUAD $0x3d516296d4254825; QUAD $0xfe483d5162c1fe48; QUAD $0x62f0fe484dd162c2; QUAD $0xf16202c372486df1; QUAD $0x1df1620dc3724825; QUAD $0x487e716216c37248; QUAD $0xcd25485d7362cb6f; QUAD $0x96d4254825d362e8; QUAD $0xd162d1fe486dd162; QUAD $0x487e7162d0fe486d; WORD $0x626f; BYTE $0x36
157 TESTQ $(1<<5), R14
158 JE skipNext5
159 MOVQ 5*24(AX), R9
160 LONG $0x487cc162; WORD $0x2c10; BYTE $0x09
161
162skipNext5:
163 QUAD $0x7162c4fe48755162; QUAD $0x482df162ce6f487e; QUAD $0x724825f16206c672; QUAD $0xc672481df1620bc6; QUAD $0xcac8254845736219; QUAD $0x5362c6fe483d3162; QUAD $0x3d516296d4254825; QUAD $0xfe483d5162c1fe48; QUAD $0x62e8fe4855d162c2; QUAD $0xf16202c2724875f1; QUAD $0x1df1620dc2724825; QUAD $0x487e716216c27248; QUAD $0xcc2548657362ca6f; QUAD $0x96cc254825d362e8; QUAD $0xd162c9fe4875d162; QUAD $0x487e7162c8fe4875; WORD $0x626f; BYTE $0x37
164 TESTQ $(1<<6), R14
165 JE skipNext6
166 MOVQ 6*24(AX), R9
167 LONG $0x487cc162; WORD $0x3410; BYTE $0x09
168
169skipNext6:
170 QUAD $0x7162c4fe487d5162; QUAD $0x482df162cd6f487e; QUAD $0x724825f16206c572; QUAD $0xc572481df1620bc5; QUAD $0xcacf25484d736219; QUAD $0x5362c7fe483d3162; QUAD $0x3d516296d4254825; QUAD $0xfe483d5162c1fe48; QUAD $0x62e0fe485dd162c2; QUAD $0xf16202c172487df1; QUAD $0x1df1620dc1724825; QUAD $0x487e716216c17248; QUAD $0xcb25486d7362c96f; QUAD $0x96c4254825d362e8; QUAD $0xd162c1fe487dd162; QUAD $0x487e7162c0fe487d; WORD $0x626f; BYTE $0x38
171 TESTQ $(1<<7), R14
172 JE skipNext7
173 MOVQ 7*24(AX), R9
174 LONG $0x487cc162; WORD $0x3c10; BYTE $0x09
175
176skipNext7:
177 QUAD $0x7162c4fe48455162; QUAD $0x482df162cc6f487e; QUAD $0x724825f16206c472; QUAD $0xc472481df1620bc4; QUAD $0xcace254855736219; QUAD $0x5362c0fe483d1162; QUAD $0x3d516296d4254825; QUAD $0xfe483d5162c1fe48; QUAD $0x62d8fe4865d162c2; QUAD $0xf16202c0724845f1; QUAD $0x1df1620dc0724825; QUAD $0x487e716216c07248; QUAD $0xca2548757362c86f; QUAD $0x96fc254825d362e8; QUAD $0xd162f9fe4845d162; QUAD $0x487e7162f8fe4845; WORD $0x626f; BYTE $0x39
178 TESTQ $(1<<8), R14
179 JE skipNext8
180 MOVQ 8*24(AX), R9
181 LONG $0x487c4162; WORD $0x0410; BYTE $0x09
182
183skipNext8:
184 QUAD $0x7162c4fe484d5162; QUAD $0x482df162cb6f487e; QUAD $0x724825f16206c372; QUAD $0xc372481df1620bc3; QUAD $0xcacd25485d736219; QUAD $0x5362c1fe483d1162; QUAD $0x3d516296d4254825; QUAD $0xfe483d5162c1fe48; QUAD $0x62d0fe486dd162c2; QUAD $0xf16202c772484df1; QUAD $0x1df1620dc7724825; QUAD $0x487e716216c77248; QUAD $0xc925487d7362cf6f; QUAD $0x96f4254825d362e8; QUAD $0xd162f1fe484dd162; QUAD $0x487e7162f0fe484d; WORD $0x626f; BYTE $0x3a
185 TESTQ $(1<<9), R14
186 JE skipNext9
187 MOVQ 9*24(AX), R9
188 LONG $0x487c4162; WORD $0x0c10; BYTE $0x09
189
190skipNext9:
191 QUAD $0x7162c4fe48555162; QUAD $0x482df162ca6f487e; QUAD $0x724825f16206c272; QUAD $0xc272481df1620bc2; QUAD $0xcacc254865736219; QUAD $0x5362c2fe483d1162; QUAD $0x3d516296d4254825; QUAD $0xfe483d5162c1fe48; QUAD $0x62c8fe4875d162c2; QUAD $0xf16202c6724855f1; QUAD $0x1df1620dc6724825; QUAD $0x487e716216c67248; QUAD $0xc82548457362ce6f; QUAD $0x96ec254825d362e8; QUAD $0xd162e9fe4855d162; QUAD $0x487e7162e8fe4855; WORD $0x626f; BYTE $0x3b
192 TESTQ $(1<<10), R14
193 JE skipNext10
194 MOVQ 10*24(AX), R9
195 LONG $0x487c4162; WORD $0x1410; BYTE $0x09
196
197skipNext10:
198 QUAD $0x7162c4fe485d5162; QUAD $0x482df162c96f487e; QUAD $0x724825f16206c172; QUAD $0xc172481df1620bc1; QUAD $0xcacb25486d736219; QUAD $0x5362c3fe483d1162; QUAD $0x3d516296d4254825; QUAD $0xfe483d5162c1fe48; QUAD $0x62c0fe487dd162c2; QUAD $0xf16202c572485df1; QUAD $0x1df1620dc5724825; QUAD $0x487e716216c57248; QUAD $0xcf25484d7362cd6f; QUAD $0x96e4254825d362e8; QUAD $0xd162e1fe485dd162; QUAD $0x487e7162e0fe485d; WORD $0x626f; BYTE $0x3c
199 TESTQ $(1<<11), R14
200 JE skipNext11
201 MOVQ 11*24(AX), R9
202 LONG $0x487c4162; WORD $0x1c10; BYTE $0x09
203
204skipNext11:
205 QUAD $0x7162c4fe48655162; QUAD $0x482df162c86f487e; QUAD $0x724825f16206c072; QUAD $0xc072481df1620bc0; QUAD $0xcaca254875736219; QUAD $0x5362c4fe483d1162; QUAD $0x3d516296d4254825; QUAD $0xfe483d5162c1fe48; QUAD $0x62f8fe4845d162c2; QUAD $0xf16202c4724865f1; QUAD $0x1df1620dc4724825; QUAD $0x487e716216c47248; QUAD $0xce2548557362cc6f; QUAD $0x96dc254825d362e8; QUAD $0xd162d9fe4865d162; QUAD $0x487e7162d8fe4865; WORD $0x626f; BYTE $0x3d
206 TESTQ $(1<<12), R14
207 JE skipNext12
208 MOVQ 12*24(AX), R9
209 LONG $0x487c4162; WORD $0x2410; BYTE $0x09
210
211skipNext12:
212 QUAD $0x7162c4fe486d5162; QUAD $0x482df162cf6f487e; QUAD $0x724825f16206c772; QUAD $0xc772481df1620bc7; QUAD $0xcac925487d736219; QUAD $0x5362c5fe483d1162; QUAD $0x3d516296d4254825; QUAD $0xfe483d5162c1fe48; QUAD $0x62f0fe484dd162c2; QUAD $0xf16202c372486df1; QUAD $0x1df1620dc3724825; QUAD $0x487e716216c37248; QUAD $0xcd25485d7362cb6f; QUAD $0x96d4254825d362e8; QUAD $0xd162d1fe486dd162; QUAD $0x487e7162d0fe486d; WORD $0x626f; BYTE $0x3e
213 TESTQ $(1<<13), R14
214 JE skipNext13
215 MOVQ 13*24(AX), R9
216 LONG $0x487c4162; WORD $0x2c10; BYTE $0x09
217
218skipNext13:
219 QUAD $0x7162c4fe48755162; QUAD $0x482df162ce6f487e; QUAD $0x724825f16206c672; QUAD $0xc672481df1620bc6; QUAD $0xcac8254845736219; QUAD $0x5362c6fe483d1162; QUAD $0x3d516296d4254825; QUAD $0xfe483d5162c1fe48; QUAD $0x62e8fe4855d162c2; QUAD $0xf16202c2724875f1; QUAD $0x1df1620dc2724825; QUAD $0x487e716216c27248; QUAD $0xcc2548657362ca6f; QUAD $0x96cc254825d362e8; QUAD $0xd162c9fe4875d162; QUAD $0x487e7162c8fe4875; WORD $0x626f; BYTE $0x3f
220 TESTQ $(1<<14), R14
221 JE skipNext14
222 MOVQ 14*24(AX), R9
223 LONG $0x487c4162; WORD $0x3410; BYTE $0x09
224
225skipNext14:
226 QUAD $0x7162c4fe487d5162; QUAD $0x482df162cd6f487e; QUAD $0x724825f16206c572; QUAD $0xc572481df1620bc5; QUAD $0xcacf25484d736219; QUAD $0x5362c7fe483d1162; QUAD $0x3d516296d4254825; QUAD $0xfe483d5162c1fe48; QUAD $0x62e0fe485dd162c2; QUAD $0xf16202c172487df1; QUAD $0x1df1620dc1724825; QUAD $0x487e716216c17248; QUAD $0xcb25486d7362c96f; QUAD $0x96c4254825d362e8; QUAD $0xd162c1fe487dd162; QUAD $0x487e7162c0fe487d; WORD $0x626f; BYTE $0x40
227 TESTQ $(1<<15), R14
228 JE skipNext15
229 MOVQ 15*24(AX), R9
230 LONG $0x487c4162; WORD $0x3c10; BYTE $0x09
231
232skipNext15:
233 QUAD $0xd162d86f487e7162; QUAD $0x7dd16224046f487e; QUAD $0x6f487e7162c3fe49; QUAD $0x244c6f487ed162d9; QUAD $0x62cbfe4975d16201; QUAD $0x7ed162da6f487e71; QUAD $0x6dd1620224546f48; QUAD $0x6f487e7162d3fe49; QUAD $0x245c6f487ed162db; QUAD $0x62dbfe4965d16203; QUAD $0x7ed162dc6f487e71; QUAD $0x5dd1620424646f48; QUAD $0x6f487e7162e3fe49; QUAD $0x246c6f487ed162dd; QUAD $0x62ebfe4955d16205; QUAD $0x7ed162de6f487e71; QUAD $0x4dd1620624746f48; QUAD $0x6f487e7162f3fe49; QUAD $0x247c6f487ed162df; QUAD $0xc4fbfe4945d16207; LONG $0xce92fbc1
234 JMP lloop
235
236lastLoop:
237 QUAD $0x7162c4fe48455162; QUAD $0x482df162cc6f487e; QUAD $0x724825f16206c472; QUAD $0xc472481df1620bc4; QUAD $0xcace254855736219; QUAD $0x5362c0fe483d3162; QUAD $0x3d516296d4254825; QUAD $0xfe483d5162c1fe48; QUAD $0x62d8fe4865d162c2; QUAD $0xf16202c0724845f1; QUAD $0x1df1620dc0724825; QUAD $0x487e716216c07248; QUAD $0xca2548757362c86f; QUAD $0x96fc254825d362e8; QUAD $0xd162f9fe4845d162; QUAD $0x487e7162f8fe4845; QUAD $0xfe484d516231626f; QUAD $0x62cb6f487e7162c4; QUAD $0xf16206c372482df1; QUAD $0x1df1620bc3724825; QUAD $0x485d736219c37248; QUAD $0xfe483d3162cacd25; QUAD $0x96d42548255362c1; QUAD $0x5162c1fe483d5162; QUAD $0x486dd162c2fe483d; QUAD $0xc772484df162d0fe; QUAD $0x0dc7724825f16202; QUAD $0x6216c772481df162; QUAD $0x7d7362cf6f487e71; QUAD $0x4825d362e8c92548; QUAD $0xfe484dd16296f425; QUAD $0x62f0fe484dd162f1; QUAD $0x516232626f487e71; QUAD $0x487e7162c4fe4855; QUAD $0xc272482df162ca6f; QUAD $0x0bc2724825f16206; QUAD $0x6219c272481df162; QUAD $0x3162cacc25486573; QUAD $0x48255362c2fe483d; QUAD $0xfe483d516296d425; QUAD $0x62c2fe483d5162c1; QUAD $0x55f162c8fe4875d1; QUAD $0x4825f16202c67248; QUAD $0x72481df1620dc672; QUAD $0xce6f487e716216c6; QUAD $0x62e8c82548457362; QUAD $0xd16296ec254825d3; QUAD $0x4855d162e9fe4855; QUAD $0x626f487e7162e8fe; QUAD $0x62c4fe485d516233; QUAD $0x2df162c96f487e71; QUAD $0x4825f16206c17248; QUAD $0x72481df1620bc172; QUAD $0xcb25486d736219c1; QUAD $0x62c3fe483d3162ca; QUAD $0x516296d425482553; QUAD $0x483d5162c1fe483d; QUAD $0xc0fe487dd162c2fe; QUAD $0x6202c572485df162; QUAD $0xf1620dc5724825f1; QUAD $0x7e716216c572481d; QUAD $0x25484d7362cd6f48; QUAD $0xe4254825d362e8cf; QUAD $0x62e1fe485dd16296; QUAD $0x7e7162e0fe485dd1; QUAD $0x4865516234626f48; QUAD $0xc86f487e7162c4fe; QUAD $0x6206c072482df162; QUAD $0xf1620bc0724825f1; QUAD $0x75736219c072481d; QUAD $0x483d3162caca2548; QUAD $0xd42548255362c4fe; QUAD $0x62c1fe483d516296; QUAD $0x45d162c2fe483d51; QUAD $0x724865f162f8fe48; QUAD $0xc4724825f16202c4; QUAD $0x16c472481df1620d; QUAD $0x7362cc6f487e7162; QUAD $0x25d362e8ce254855; QUAD $0x4865d16296dc2548; QUAD $0xd8fe4865d162d9fe; QUAD $0x6235626f487e7162; QUAD $0x7e7162c4fe486d51; QUAD $0x72482df162cf6f48; QUAD $0xc7724825f16206c7; QUAD $0x19c772481df1620b; QUAD $0x62cac925487d7362; QUAD $0x255362c5fe483d31; QUAD $0x483d516296d42548; QUAD $0xc2fe483d5162c1fe; QUAD $0xf162f0fe484dd162; QUAD $0x25f16202c372486d; QUAD $0x481df1620dc37248; QUAD $0x6f487e716216c372; QUAD $0xe8cd25485d7362cb; QUAD $0x6296d4254825d362; QUAD $0x6dd162d1fe486dd1; QUAD $0x6f487e7162d0fe48; QUAD $0xc4fe487551623662; QUAD $0xf162ce6f487e7162; QUAD $0x25f16206c672482d; QUAD $0x481df1620bc67248; QUAD $0x254845736219c672; QUAD $0xc6fe483d3162cac8; QUAD $0x6296d42548255362; QUAD $0x3d5162c1fe483d51; QUAD $0xfe4855d162c2fe48; QUAD $0x02c2724875f162e8; QUAD $0x620dc2724825f162; QUAD $0x716216c272481df1; QUAD $0x48657362ca6f487e; QUAD $0x254825d362e8cc25; QUAD $0xc9fe4875d16296cc; QUAD $0x7162c8fe4875d162; QUAD $0x7d516237626f487e; QUAD $0x6f487e7162c4fe48; QUAD $0x06c572482df162cd; QUAD $0x620bc5724825f162; QUAD $0x736219c572481df1; QUAD $0x3d3162cacf25484d; QUAD $0x2548255362c7fe48; QUAD $0xc1fe483d516296d4; QUAD $0xd162c2fe483d5162; QUAD $0x487df162e0fe485d; QUAD $0x724825f16202c172; QUAD $0xc172481df1620dc1; QUAD $0x62c96f487e716216; QUAD $0xd362e8cb25486d73; QUAD $0x7dd16296c4254825; QUAD $0xfe487dd162c1fe48; QUAD $0x38626f487e7162c0; QUAD $0x7162c4fe48455162; QUAD $0x482df162cc6f487e; QUAD $0x724825f16206c472; QUAD $0xc472481df1620bc4; QUAD $0xcace254855736219; QUAD $0x5362c0fe483d1162; QUAD $0x3d516296d4254825; QUAD $0xfe483d5162c1fe48; QUAD $0x62d8fe4865d162c2; QUAD $0xf16202c0724845f1; QUAD $0x1df1620dc0724825; QUAD $0x487e716216c07248; QUAD $0xca2548757362c86f; QUAD $0x96fc254825d362e8; QUAD $0xd162f9fe4845d162; QUAD $0x487e7162f8fe4845; QUAD $0xfe484d516239626f; QUAD $0x62cb6f487e7162c4; QUAD $0xf16206c372482df1; QUAD $0x1df1620bc3724825; QUAD $0x485d736219c37248; QUAD $0xfe483d1162cacd25; QUAD $0x96d42548255362c1; QUAD $0x5162c1fe483d5162; QUAD $0x486dd162c2fe483d; QUAD $0xc772484df162d0fe; QUAD $0x0dc7724825f16202; QUAD $0x6216c772481df162; QUAD $0x7d7362cf6f487e71; QUAD $0x4825d362e8c92548; QUAD $0xfe484dd16296f425; QUAD $0x62f0fe484dd162f1; QUAD $0x51623a626f487e71; QUAD $0x487e7162c4fe4855; QUAD $0xc272482df162ca6f; QUAD $0x0bc2724825f16206; QUAD $0x6219c272481df162; QUAD $0x1162cacc25486573; QUAD $0x48255362c2fe483d; QUAD $0xfe483d516296d425; QUAD $0x62c2fe483d5162c1; QUAD $0x55f162c8fe4875d1; QUAD $0x4825f16202c67248; QUAD $0x72481df1620dc672; QUAD $0xce6f487e716216c6; QUAD $0x62e8c82548457362; QUAD $0xd16296ec254825d3; QUAD $0x4855d162e9fe4855; QUAD $0x626f487e7162e8fe; QUAD $0x62c4fe485d51623b; QUAD $0x2df162c96f487e71; QUAD $0x4825f16206c17248; QUAD $0x72481df1620bc172; QUAD $0xcb25486d736219c1; QUAD $0x62c3fe483d1162ca; QUAD $0x516296d425482553; QUAD $0x483d5162c1fe483d; QUAD $0xc0fe487dd162c2fe; QUAD $0x6202c572485df162; QUAD $0xf1620dc5724825f1; QUAD $0x7e716216c572481d; QUAD $0x25484d7362cd6f48; QUAD $0xe4254825d362e8cf; QUAD $0x62e1fe485dd16296; QUAD $0x7e7162e0fe485dd1; QUAD $0x486551623c626f48; QUAD $0xc86f487e7162c4fe; QUAD $0x6206c072482df162; QUAD $0xf1620bc0724825f1; QUAD $0x75736219c072481d; QUAD $0x483d1162caca2548; QUAD $0xd42548255362c4fe; QUAD $0x62c1fe483d516296; QUAD $0x45d162c2fe483d51; QUAD $0x724865f162f8fe48; QUAD $0xc4724825f16202c4; QUAD $0x16c472481df1620d; QUAD $0x7362cc6f487e7162; QUAD $0x25d362e8ce254855; QUAD $0x4865d16296dc2548; QUAD $0xd8fe4865d162d9fe; QUAD $0x623d626f487e7162; QUAD $0x7e7162c4fe486d51; QUAD $0x72482df162cf6f48; QUAD $0xc7724825f16206c7; QUAD $0x19c772481df1620b; QUAD $0x62cac925487d7362; QUAD $0x255362c5fe483d11; QUAD $0x483d516296d42548; QUAD $0xc2fe483d5162c1fe; QUAD $0xf162f0fe484dd162; QUAD $0x25f16202c372486d; QUAD $0x481df1620dc37248; QUAD $0x6f487e716216c372; QUAD $0xe8cd25485d7362cb; QUAD $0x6296d4254825d362; QUAD $0x6dd162d1fe486dd1; QUAD $0x6f487e7162d0fe48; QUAD $0xc4fe487551623e62; QUAD $0xf162ce6f487e7162; QUAD $0x25f16206c672482d; QUAD $0x481df1620bc67248; QUAD $0x254845736219c672; QUAD $0xc6fe483d1162cac8; QUAD $0x6296d42548255362; QUAD $0x3d5162c1fe483d51; QUAD $0xfe4855d162c2fe48; QUAD $0x02c2724875f162e8; QUAD $0x620dc2724825f162; QUAD $0x716216c272481df1; QUAD $0x48657362ca6f487e; QUAD $0x254825d362e8cc25; QUAD $0xc9fe4875d16296cc; QUAD $0x7162c8fe4875d162; QUAD $0x7d51623f626f487e; QUAD $0x6f487e7162c4fe48; QUAD $0x06c572482df162cd; QUAD $0x620bc5724825f162; QUAD $0x736219c572481df1; QUAD $0x3d1162cacf25484d; QUAD $0x2548255362c7fe48; QUAD $0xc1fe483d516296d4; QUAD $0xd162c2fe483d5162; QUAD $0x487df162e0fe485d; QUAD $0x724825f16202c172; QUAD $0xc172481df1620dc1; QUAD $0x62c96f487e716216; QUAD $0xd362e8cb25486d73; QUAD $0x7dd16296c4254825; QUAD $0xfe487dd162c1fe48; QUAD $0x40626f487e7162c0; QUAD $0xd162d86f487e7162; QUAD $0x7dd16224046f487e; QUAD $0x6f487e7162c3fe49; QUAD $0x244c6f487ed162d9; QUAD $0x62cbfe4975d16201; QUAD $0x7ed162da6f487e71; QUAD $0x6dd1620224546f48; QUAD $0x6f487e7162d3fe49; QUAD $0x245c6f487ed162db; QUAD $0x62dbfe4965d16203; QUAD $0x7ed162dc6f487e71; QUAD $0x5dd1620424646f48; QUAD $0x6f487e7162e3fe49; QUAD $0x246c6f487ed162dd; QUAD $0x62ebfe4955d16205; QUAD $0x7ed162de6f487e71; QUAD $0x4dd1620624746f48; QUAD $0x6f487e7162f3fe49; QUAD $0x247c6f487ed162df; QUAD $0x62fbfe4945d16207; QUAD $0x7ef162077f487ef1; QUAD $0x487ef162014f7f48; QUAD $0x7f487ef16202577f; QUAD $0x677f487ef162035f; QUAD $0x056f7f487ef16204; QUAD $0x6206777f487ef162; LONG $0x7f487ef1; WORD $0x077f
238 VZEROUPPER
239 RET
240
241DATA PSHUFFLE_BYTE_FLIP_MASK<>+0x000(SB)/8, $0x0405060700010203
242DATA PSHUFFLE_BYTE_FLIP_MASK<>+0x008(SB)/8, $0x0c0d0e0f08090a0b
243DATA PSHUFFLE_BYTE_FLIP_MASK<>+0x010(SB)/8, $0x0405060700010203
244DATA PSHUFFLE_BYTE_FLIP_MASK<>+0x018(SB)/8, $0x0c0d0e0f08090a0b
245DATA PSHUFFLE_BYTE_FLIP_MASK<>+0x020(SB)/8, $0x0405060700010203
246DATA PSHUFFLE_BYTE_FLIP_MASK<>+0x028(SB)/8, $0x0c0d0e0f08090a0b
247DATA PSHUFFLE_BYTE_FLIP_MASK<>+0x030(SB)/8, $0x0405060700010203
248DATA PSHUFFLE_BYTE_FLIP_MASK<>+0x038(SB)/8, $0x0c0d0e0f08090a0b
249GLOBL PSHUFFLE_BYTE_FLIP_MASK<>(SB), 8, $64
250DATA PSHUFFLE_TRANSPOSE16_MASK1<>+0x000(SB)/8, $0x0000000000000000
251DATA PSHUFFLE_TRANSPOSE16_MASK1<>+0x008(SB)/8, $0x0000000000000001
252DATA PSHUFFLE_TRANSPOSE16_MASK1<>+0x010(SB)/8, $0x0000000000000008
253DATA PSHUFFLE_TRANSPOSE16_MASK1<>+0x018(SB)/8, $0x0000000000000009
254DATA PSHUFFLE_TRANSPOSE16_MASK1<>+0x020(SB)/8, $0x0000000000000004
255DATA PSHUFFLE_TRANSPOSE16_MASK1<>+0x028(SB)/8, $0x0000000000000005
256DATA PSHUFFLE_TRANSPOSE16_MASK1<>+0x030(SB)/8, $0x000000000000000C
257DATA PSHUFFLE_TRANSPOSE16_MASK1<>+0x038(SB)/8, $0x000000000000000D
258GLOBL PSHUFFLE_TRANSPOSE16_MASK1<>(SB), 8, $64
259DATA PSHUFFLE_TRANSPOSE16_MASK2<>+0x000(SB)/8, $0x0000000000000002
260DATA PSHUFFLE_TRANSPOSE16_MASK2<>+0x008(SB)/8, $0x0000000000000003
261DATA PSHUFFLE_TRANSPOSE16_MASK2<>+0x010(SB)/8, $0x000000000000000A
262DATA PSHUFFLE_TRANSPOSE16_MASK2<>+0x018(SB)/8, $0x000000000000000B
263DATA PSHUFFLE_TRANSPOSE16_MASK2<>+0x020(SB)/8, $0x0000000000000006
264DATA PSHUFFLE_TRANSPOSE16_MASK2<>+0x028(SB)/8, $0x0000000000000007
265DATA PSHUFFLE_TRANSPOSE16_MASK2<>+0x030(SB)/8, $0x000000000000000E
266DATA PSHUFFLE_TRANSPOSE16_MASK2<>+0x038(SB)/8, $0x000000000000000F
267GLOBL PSHUFFLE_TRANSPOSE16_MASK2<>(SB), 8, $64
diff --git a/vendor/github.com/minio/sha256-simd/sha256block_amd64.go b/vendor/github.com/minio/sha256-simd/sha256block_amd64.go
new file mode 100644
index 0000000..e536f54
--- /dev/null
+++ b/vendor/github.com/minio/sha256-simd/sha256block_amd64.go
@@ -0,0 +1,31 @@
1//go:build !noasm && !appengine && gc
2// +build !noasm,!appengine,gc
3
4/*
5 * Minio Cloud Storage, (C) 2016 Minio, Inc.
6 *
7 * Licensed under the Apache License, Version 2.0 (the "License");
8 * you may not use this file except in compliance with the License.
9 * You may obtain a copy of the License at
10 *
11 * http://www.apache.org/licenses/LICENSE-2.0
12 *
13 * Unless required by applicable law or agreed to in writing, software
14 * distributed under the License is distributed on an "AS IS" BASIS,
15 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 * See the License for the specific language governing permissions and
17 * limitations under the License.
18 */
19
20package sha256
21
22func blockArmSha2Go(dig *digest, p []byte) {
23 panic("blockArmSha2Go called unexpectedly")
24}
25
26//go:noescape
27func blockIntelSha(h *[8]uint32, message []uint8)
28
29func blockIntelShaGo(dig *digest, p []byte) {
30 blockIntelSha(&dig.h, p)
31}
diff --git a/vendor/github.com/minio/sha256-simd/sha256block_amd64.s b/vendor/github.com/minio/sha256-simd/sha256block_amd64.s
new file mode 100644
index 0000000..c98a1d8
--- /dev/null
+++ b/vendor/github.com/minio/sha256-simd/sha256block_amd64.s
@@ -0,0 +1,266 @@
1//+build !noasm,!appengine,gc
2
3// SHA intrinsic version of SHA256
4
5// Kristofer Peterson, (C) 2018.
6//
7// Licensed under the Apache License, Version 2.0 (the "License");
8// you may not use this file except in compliance with the License.
9// You may obtain a copy of the License at
10//
11// http://www.apache.org/licenses/LICENSE-2.0
12//
13// Unless required by applicable law or agreed to in writing, software
14// distributed under the License is distributed on an "AS IS" BASIS,
15// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16// See the License for the specific language governing permissions and
17// limitations under the License.
18//
19
20#include "textflag.h"
21
22DATA K<>+0x00(SB)/4, $0x428a2f98
23DATA K<>+0x04(SB)/4, $0x71374491
24DATA K<>+0x08(SB)/4, $0xb5c0fbcf
25DATA K<>+0x0c(SB)/4, $0xe9b5dba5
26DATA K<>+0x10(SB)/4, $0x3956c25b
27DATA K<>+0x14(SB)/4, $0x59f111f1
28DATA K<>+0x18(SB)/4, $0x923f82a4
29DATA K<>+0x1c(SB)/4, $0xab1c5ed5
30DATA K<>+0x20(SB)/4, $0xd807aa98
31DATA K<>+0x24(SB)/4, $0x12835b01
32DATA K<>+0x28(SB)/4, $0x243185be
33DATA K<>+0x2c(SB)/4, $0x550c7dc3
34DATA K<>+0x30(SB)/4, $0x72be5d74
35DATA K<>+0x34(SB)/4, $0x80deb1fe
36DATA K<>+0x38(SB)/4, $0x9bdc06a7
37DATA K<>+0x3c(SB)/4, $0xc19bf174
38DATA K<>+0x40(SB)/4, $0xe49b69c1
39DATA K<>+0x44(SB)/4, $0xefbe4786
40DATA K<>+0x48(SB)/4, $0x0fc19dc6
41DATA K<>+0x4c(SB)/4, $0x240ca1cc
42DATA K<>+0x50(SB)/4, $0x2de92c6f
43DATA K<>+0x54(SB)/4, $0x4a7484aa
44DATA K<>+0x58(SB)/4, $0x5cb0a9dc
45DATA K<>+0x5c(SB)/4, $0x76f988da
46DATA K<>+0x60(SB)/4, $0x983e5152
47DATA K<>+0x64(SB)/4, $0xa831c66d
48DATA K<>+0x68(SB)/4, $0xb00327c8
49DATA K<>+0x6c(SB)/4, $0xbf597fc7
50DATA K<>+0x70(SB)/4, $0xc6e00bf3
51DATA K<>+0x74(SB)/4, $0xd5a79147
52DATA K<>+0x78(SB)/4, $0x06ca6351
53DATA K<>+0x7c(SB)/4, $0x14292967
54DATA K<>+0x80(SB)/4, $0x27b70a85
55DATA K<>+0x84(SB)/4, $0x2e1b2138
56DATA K<>+0x88(SB)/4, $0x4d2c6dfc
57DATA K<>+0x8c(SB)/4, $0x53380d13
58DATA K<>+0x90(SB)/4, $0x650a7354
59DATA K<>+0x94(SB)/4, $0x766a0abb
60DATA K<>+0x98(SB)/4, $0x81c2c92e
61DATA K<>+0x9c(SB)/4, $0x92722c85
62DATA K<>+0xa0(SB)/4, $0xa2bfe8a1
63DATA K<>+0xa4(SB)/4, $0xa81a664b
64DATA K<>+0xa8(SB)/4, $0xc24b8b70
65DATA K<>+0xac(SB)/4, $0xc76c51a3
66DATA K<>+0xb0(SB)/4, $0xd192e819
67DATA K<>+0xb4(SB)/4, $0xd6990624
68DATA K<>+0xb8(SB)/4, $0xf40e3585
69DATA K<>+0xbc(SB)/4, $0x106aa070
70DATA K<>+0xc0(SB)/4, $0x19a4c116
71DATA K<>+0xc4(SB)/4, $0x1e376c08
72DATA K<>+0xc8(SB)/4, $0x2748774c
73DATA K<>+0xcc(SB)/4, $0x34b0bcb5
74DATA K<>+0xd0(SB)/4, $0x391c0cb3
75DATA K<>+0xd4(SB)/4, $0x4ed8aa4a
76DATA K<>+0xd8(SB)/4, $0x5b9cca4f
77DATA K<>+0xdc(SB)/4, $0x682e6ff3
78DATA K<>+0xe0(SB)/4, $0x748f82ee
79DATA K<>+0xe4(SB)/4, $0x78a5636f
80DATA K<>+0xe8(SB)/4, $0x84c87814
81DATA K<>+0xec(SB)/4, $0x8cc70208
82DATA K<>+0xf0(SB)/4, $0x90befffa
83DATA K<>+0xf4(SB)/4, $0xa4506ceb
84DATA K<>+0xf8(SB)/4, $0xbef9a3f7
85DATA K<>+0xfc(SB)/4, $0xc67178f2
86GLOBL K<>(SB), RODATA|NOPTR, $256
87
88DATA SHUF_MASK<>+0x00(SB)/8, $0x0405060700010203
89DATA SHUF_MASK<>+0x08(SB)/8, $0x0c0d0e0f08090a0b
90GLOBL SHUF_MASK<>(SB), RODATA|NOPTR, $16
91
92// Register Usage
93// BX base address of constant table (constant)
94// DX hash_state (constant)
95// SI hash_data.data
96// DI hash_data.data + hash_data.length - 64 (constant)
97// X0 scratch
98// X1 scratch
99// X2 working hash state // ABEF
100// X3 working hash state // CDGH
101// X4 first 16 bytes of block
102// X5 second 16 bytes of block
103// X6 third 16 bytes of block
104// X7 fourth 16 bytes of block
105// X12 saved hash state // ABEF
106// X13 saved hash state // CDGH
107// X15 data shuffle mask (constant)
108
109TEXT ·blockIntelSha(SB), NOSPLIT, $0-32
110 MOVQ h+0(FP), DX
111 MOVQ message_base+8(FP), SI
112 MOVQ message_len+16(FP), DI
113 LEAQ -64(SI)(DI*1), DI
114 MOVOU (DX), X2
115 MOVOU 16(DX), X1
116 MOVO X2, X3
117 PUNPCKLLQ X1, X2
118 PUNPCKHLQ X1, X3
119 PSHUFD $0x27, X2, X2
120 PSHUFD $0x27, X3, X3
121 MOVO SHUF_MASK<>(SB), X15
122 LEAQ K<>(SB), BX
123
124 JMP TEST
125
126LOOP:
127 MOVO X2, X12
128 MOVO X3, X13
129
130 // load block and shuffle
131 MOVOU (SI), X4
132 MOVOU 16(SI), X5
133 MOVOU 32(SI), X6
134 MOVOU 48(SI), X7
135 PSHUFB X15, X4
136 PSHUFB X15, X5
137 PSHUFB X15, X6
138 PSHUFB X15, X7
139
140#define ROUND456 \
141 PADDL X5, X0 \
142 LONG $0xdacb380f \ // SHA256RNDS2 XMM3, XMM2
143 MOVO X5, X1 \
144 LONG $0x0f3a0f66; WORD $0x04cc \ // PALIGNR XMM1, XMM4, 4
145 PADDL X1, X6 \
146 LONG $0xf5cd380f \ // SHA256MSG2 XMM6, XMM5
147 PSHUFD $0x4e, X0, X0 \
148 LONG $0xd3cb380f \ // SHA256RNDS2 XMM2, XMM3
149 LONG $0xe5cc380f // SHA256MSG1 XMM4, XMM5
150
151#define ROUND567 \
152 PADDL X6, X0 \
153 LONG $0xdacb380f \ // SHA256RNDS2 XMM3, XMM2
154 MOVO X6, X1 \
155 LONG $0x0f3a0f66; WORD $0x04cd \ // PALIGNR XMM1, XMM5, 4
156 PADDL X1, X7 \
157 LONG $0xfecd380f \ // SHA256MSG2 XMM7, XMM6
158 PSHUFD $0x4e, X0, X0 \
159 LONG $0xd3cb380f \ // SHA256RNDS2 XMM2, XMM3
160 LONG $0xeecc380f // SHA256MSG1 XMM5, XMM6
161
162#define ROUND674 \
163 PADDL X7, X0 \
164 LONG $0xdacb380f \ // SHA256RNDS2 XMM3, XMM2
165 MOVO X7, X1 \
166 LONG $0x0f3a0f66; WORD $0x04ce \ // PALIGNR XMM1, XMM6, 4
167 PADDL X1, X4 \
168 LONG $0xe7cd380f \ // SHA256MSG2 XMM4, XMM7
169 PSHUFD $0x4e, X0, X0 \
170 LONG $0xd3cb380f \ // SHA256RNDS2 XMM2, XMM3
171 LONG $0xf7cc380f // SHA256MSG1 XMM6, XMM7
172
173#define ROUND745 \
174 PADDL X4, X0 \
175 LONG $0xdacb380f \ // SHA256RNDS2 XMM3, XMM2
176 MOVO X4, X1 \
177 LONG $0x0f3a0f66; WORD $0x04cf \ // PALIGNR XMM1, XMM7, 4
178 PADDL X1, X5 \
179 LONG $0xeccd380f \ // SHA256MSG2 XMM5, XMM4
180 PSHUFD $0x4e, X0, X0 \
181 LONG $0xd3cb380f \ // SHA256RNDS2 XMM2, XMM3
182 LONG $0xfccc380f // SHA256MSG1 XMM7, XMM4
183
184 // rounds 0-3
185 MOVO (BX), X0
186 PADDL X4, X0
187 LONG $0xdacb380f // SHA256RNDS2 XMM3, XMM2
188 PSHUFD $0x4e, X0, X0
189 LONG $0xd3cb380f // SHA256RNDS2 XMM2, XMM3
190
191 // rounds 4-7
192 MOVO 1*16(BX), X0
193 PADDL X5, X0
194 LONG $0xdacb380f // SHA256RNDS2 XMM3, XMM2
195 PSHUFD $0x4e, X0, X0
196 LONG $0xd3cb380f // SHA256RNDS2 XMM2, XMM3
197 LONG $0xe5cc380f // SHA256MSG1 XMM4, XMM5
198
199 // rounds 8-11
200 MOVO 2*16(BX), X0
201 PADDL X6, X0
202 LONG $0xdacb380f // SHA256RNDS2 XMM3, XMM2
203 PSHUFD $0x4e, X0, X0
204 LONG $0xd3cb380f // SHA256RNDS2 XMM2, XMM3
205 LONG $0xeecc380f // SHA256MSG1 XMM5, XMM6
206
207 MOVO 3*16(BX), X0; ROUND674 // rounds 12-15
208 MOVO 4*16(BX), X0; ROUND745 // rounds 16-19
209 MOVO 5*16(BX), X0; ROUND456 // rounds 20-23
210 MOVO 6*16(BX), X0; ROUND567 // rounds 24-27
211 MOVO 7*16(BX), X0; ROUND674 // rounds 28-31
212 MOVO 8*16(BX), X0; ROUND745 // rounds 32-35
213 MOVO 9*16(BX), X0; ROUND456 // rounds 36-39
214 MOVO 10*16(BX), X0; ROUND567 // rounds 40-43
215 MOVO 11*16(BX), X0; ROUND674 // rounds 44-47
216 MOVO 12*16(BX), X0; ROUND745 // rounds 48-51
217
218 // rounds 52-55
219 MOVO 13*16(BX), X0
220 PADDL X5, X0
221 LONG $0xdacb380f // SHA256RNDS2 XMM3, XMM2
222 MOVO X5, X1
223 LONG $0x0f3a0f66; WORD $0x04cc // PALIGNR XMM1, XMM4, 4
224 PADDL X1, X6
225 LONG $0xf5cd380f // SHA256MSG2 XMM6, XMM5
226 PSHUFD $0x4e, X0, X0
227 LONG $0xd3cb380f // SHA256RNDS2 XMM2, XMM3
228
229 // rounds 56-59
230 MOVO 14*16(BX), X0
231 PADDL X6, X0
232 LONG $0xdacb380f // SHA256RNDS2 XMM3, XMM2
233 MOVO X6, X1
234 LONG $0x0f3a0f66; WORD $0x04cd // PALIGNR XMM1, XMM5, 4
235 PADDL X1, X7
236 LONG $0xfecd380f // SHA256MSG2 XMM7, XMM6
237 PSHUFD $0x4e, X0, X0
238 LONG $0xd3cb380f // SHA256RNDS2 XMM2, XMM3
239
240 // rounds 60-63
241 MOVO 15*16(BX), X0
242 PADDL X7, X0
243 LONG $0xdacb380f // SHA256RNDS2 XMM3, XMM2
244 PSHUFD $0x4e, X0, X0
245 LONG $0xd3cb380f // SHA256RNDS2 XMM2, XMM3
246
247 PADDL X12, X2
248 PADDL X13, X3
249
250 ADDQ $64, SI
251
252TEST:
253 CMPQ SI, DI
254 JBE LOOP
255
256 PSHUFD $0x4e, X3, X0
257 LONG $0x0e3a0f66; WORD $0xf0c2 // PBLENDW XMM0, XMM2, 0xf0
258 PSHUFD $0x4e, X2, X1
259 LONG $0x0e3a0f66; WORD $0x0fcb // PBLENDW XMM1, XMM3, 0x0f
260 PSHUFD $0x1b, X0, X0
261 PSHUFD $0x1b, X1, X1
262
263 MOVOU X0, (DX)
264 MOVOU X1, 16(DX)
265
266 RET
diff --git a/vendor/github.com/minio/sha256-simd/sha256block_arm64.go b/vendor/github.com/minio/sha256-simd/sha256block_arm64.go
new file mode 100644
index 0000000..d4369e2
--- /dev/null
+++ b/vendor/github.com/minio/sha256-simd/sha256block_arm64.go
@@ -0,0 +1,37 @@
1//go:build !noasm && !appengine && gc
2// +build !noasm,!appengine,gc
3
4/*
5 * Minio Cloud Storage, (C) 2016 Minio, Inc.
6 *
7 * Licensed under the Apache License, Version 2.0 (the "License");
8 * you may not use this file except in compliance with the License.
9 * You may obtain a copy of the License at
10 *
11 * http://www.apache.org/licenses/LICENSE-2.0
12 *
13 * Unless required by applicable law or agreed to in writing, software
14 * distributed under the License is distributed on an "AS IS" BASIS,
15 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 * See the License for the specific language governing permissions and
17 * limitations under the License.
18 */
19
20package sha256
21
22func blockIntelShaGo(dig *digest, p []byte) {
23 panic("blockIntelShaGo called unexpectedly")
24}
25
26//go:noescape
27func blockArmSha2(h []uint32, message []uint8)
28
29func blockArmSha2Go(dig *digest, p []byte) {
30
31 h := []uint32{dig.h[0], dig.h[1], dig.h[2], dig.h[3], dig.h[4], dig.h[5], dig.h[6], dig.h[7]}
32
33 blockArmSha2(h[:], p[:])
34
35 dig.h[0], dig.h[1], dig.h[2], dig.h[3], dig.h[4], dig.h[5], dig.h[6], dig.h[7] = h[0], h[1], h[2], h[3], h[4],
36 h[5], h[6], h[7]
37}
diff --git a/vendor/github.com/minio/sha256-simd/sha256block_arm64.s b/vendor/github.com/minio/sha256-simd/sha256block_arm64.s
new file mode 100644
index 0000000..7ab88b1
--- /dev/null
+++ b/vendor/github.com/minio/sha256-simd/sha256block_arm64.s
@@ -0,0 +1,192 @@
1//+build !noasm,!appengine,gc
2
3// ARM64 version of SHA256
4
5//
6// Minio Cloud Storage, (C) 2016 Minio, Inc.
7//
8// Licensed under the Apache License, Version 2.0 (the "License");
9// you may not use this file except in compliance with the License.
10// You may obtain a copy of the License at
11//
12// http://www.apache.org/licenses/LICENSE-2.0
13//
14// Unless required by applicable law or agreed to in writing, software
15// distributed under the License is distributed on an "AS IS" BASIS,
16// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
17// See the License for the specific language governing permissions and
18// limitations under the License.
19//
20
21//
22// Based on implementation as found in https://github.com/jocover/sha256-armv8
23//
24// Use github.com/minio/asm2plan9s on this file to assemble ARM instructions to
25// their Plan9 equivalents
26//
27
28TEXT ·blockArmSha2(SB), 7, $0
29 MOVD h+0(FP), R0
30 MOVD message+24(FP), R1
31 MOVD message_len+32(FP), R2 // length of message
32 SUBS $64, R2
33 BMI complete
34
35 // Load constants table pointer
36 MOVD $·constants(SB), R3
37
38 // Cache constants table in registers v16 - v31
39 WORD $0x4cdf2870 // ld1 {v16.4s-v19.4s}, [x3], #64
40 WORD $0x4cdf7800 // ld1 {v0.4s}, [x0], #16
41 WORD $0x4cdf2874 // ld1 {v20.4s-v23.4s}, [x3], #64
42
43 WORD $0x4c407801 // ld1 {v1.4s}, [x0]
44 WORD $0x4cdf2878 // ld1 {v24.4s-v27.4s}, [x3], #64
45 WORD $0xd1004000 // sub x0, x0, #0x10
46 WORD $0x4cdf287c // ld1 {v28.4s-v31.4s}, [x3], #64
47
48loop:
49 // Main loop
50 WORD $0x4cdf2025 // ld1 {v5.16b-v8.16b}, [x1], #64
51 WORD $0x4ea01c02 // mov v2.16b, v0.16b
52 WORD $0x4ea11c23 // mov v3.16b, v1.16b
53 WORD $0x6e2008a5 // rev32 v5.16b, v5.16b
54 WORD $0x6e2008c6 // rev32 v6.16b, v6.16b
55 WORD $0x4eb084a9 // add v9.4s, v5.4s, v16.4s
56 WORD $0x6e2008e7 // rev32 v7.16b, v7.16b
57 WORD $0x4eb184ca // add v10.4s, v6.4s, v17.4s
58 WORD $0x4ea21c44 // mov v4.16b, v2.16b
59 WORD $0x5e094062 // sha256h q2, q3, v9.4s
60 WORD $0x5e095083 // sha256h2 q3, q4, v9.4s
61 WORD $0x5e2828c5 // sha256su0 v5.4s, v6.4s
62 WORD $0x6e200908 // rev32 v8.16b, v8.16b
63 WORD $0x4eb284e9 // add v9.4s, v7.4s, v18.4s
64 WORD $0x4ea21c44 // mov v4.16b, v2.16b
65 WORD $0x5e0a4062 // sha256h q2, q3, v10.4s
66 WORD $0x5e0a5083 // sha256h2 q3, q4, v10.4s
67 WORD $0x5e2828e6 // sha256su0 v6.4s, v7.4s
68 WORD $0x5e0860e5 // sha256su1 v5.4s, v7.4s, v8.4s
69 WORD $0x4eb3850a // add v10.4s, v8.4s, v19.4s
70 WORD $0x4ea21c44 // mov v4.16b, v2.16b
71 WORD $0x5e094062 // sha256h q2, q3, v9.4s
72 WORD $0x5e095083 // sha256h2 q3, q4, v9.4s
73 WORD $0x5e282907 // sha256su0 v7.4s, v8.4s
74 WORD $0x5e056106 // sha256su1 v6.4s, v8.4s, v5.4s
75 WORD $0x4eb484a9 // add v9.4s, v5.4s, v20.4s
76 WORD $0x4ea21c44 // mov v4.16b, v2.16b
77 WORD $0x5e0a4062 // sha256h q2, q3, v10.4s
78 WORD $0x5e0a5083 // sha256h2 q3, q4, v10.4s
79 WORD $0x5e2828a8 // sha256su0 v8.4s, v5.4s
80 WORD $0x5e0660a7 // sha256su1 v7.4s, v5.4s, v6.4s
81 WORD $0x4eb584ca // add v10.4s, v6.4s, v21.4s
82 WORD $0x4ea21c44 // mov v4.16b, v2.16b
83 WORD $0x5e094062 // sha256h q2, q3, v9.4s
84 WORD $0x5e095083 // sha256h2 q3, q4, v9.4s
85 WORD $0x5e2828c5 // sha256su0 v5.4s, v6.4s
86 WORD $0x5e0760c8 // sha256su1 v8.4s, v6.4s, v7.4s
87 WORD $0x4eb684e9 // add v9.4s, v7.4s, v22.4s
88 WORD $0x4ea21c44 // mov v4.16b, v2.16b
89 WORD $0x5e0a4062 // sha256h q2, q3, v10.4s
90 WORD $0x5e0a5083 // sha256h2 q3, q4, v10.4s
91 WORD $0x5e2828e6 // sha256su0 v6.4s, v7.4s
92 WORD $0x5e0860e5 // sha256su1 v5.4s, v7.4s, v8.4s
93 WORD $0x4eb7850a // add v10.4s, v8.4s, v23.4s
94 WORD $0x4ea21c44 // mov v4.16b, v2.16b
95 WORD $0x5e094062 // sha256h q2, q3, v9.4s
96 WORD $0x5e095083 // sha256h2 q3, q4, v9.4s
97 WORD $0x5e282907 // sha256su0 v7.4s, v8.4s
98 WORD $0x5e056106 // sha256su1 v6.4s, v8.4s, v5.4s
99 WORD $0x4eb884a9 // add v9.4s, v5.4s, v24.4s
100 WORD $0x4ea21c44 // mov v4.16b, v2.16b
101 WORD $0x5e0a4062 // sha256h q2, q3, v10.4s
102 WORD $0x5e0a5083 // sha256h2 q3, q4, v10.4s
103 WORD $0x5e2828a8 // sha256su0 v8.4s, v5.4s
104 WORD $0x5e0660a7 // sha256su1 v7.4s, v5.4s, v6.4s
105 WORD $0x4eb984ca // add v10.4s, v6.4s, v25.4s
106 WORD $0x4ea21c44 // mov v4.16b, v2.16b
107 WORD $0x5e094062 // sha256h q2, q3, v9.4s
108 WORD $0x5e095083 // sha256h2 q3, q4, v9.4s
109 WORD $0x5e2828c5 // sha256su0 v5.4s, v6.4s
110 WORD $0x5e0760c8 // sha256su1 v8.4s, v6.4s, v7.4s
111 WORD $0x4eba84e9 // add v9.4s, v7.4s, v26.4s
112 WORD $0x4ea21c44 // mov v4.16b, v2.16b
113 WORD $0x5e0a4062 // sha256h q2, q3, v10.4s
114 WORD $0x5e0a5083 // sha256h2 q3, q4, v10.4s
115 WORD $0x5e2828e6 // sha256su0 v6.4s, v7.4s
116 WORD $0x5e0860e5 // sha256su1 v5.4s, v7.4s, v8.4s
117 WORD $0x4ebb850a // add v10.4s, v8.4s, v27.4s
118 WORD $0x4ea21c44 // mov v4.16b, v2.16b
119 WORD $0x5e094062 // sha256h q2, q3, v9.4s
120 WORD $0x5e095083 // sha256h2 q3, q4, v9.4s
121 WORD $0x5e282907 // sha256su0 v7.4s, v8.4s
122 WORD $0x5e056106 // sha256su1 v6.4s, v8.4s, v5.4s
123 WORD $0x4ebc84a9 // add v9.4s, v5.4s, v28.4s
124 WORD $0x4ea21c44 // mov v4.16b, v2.16b
125 WORD $0x5e0a4062 // sha256h q2, q3, v10.4s
126 WORD $0x5e0a5083 // sha256h2 q3, q4, v10.4s
127 WORD $0x5e2828a8 // sha256su0 v8.4s, v5.4s
128 WORD $0x5e0660a7 // sha256su1 v7.4s, v5.4s, v6.4s
129 WORD $0x4ebd84ca // add v10.4s, v6.4s, v29.4s
130 WORD $0x4ea21c44 // mov v4.16b, v2.16b
131 WORD $0x5e094062 // sha256h q2, q3, v9.4s
132 WORD $0x5e095083 // sha256h2 q3, q4, v9.4s
133 WORD $0x5e0760c8 // sha256su1 v8.4s, v6.4s, v7.4s
134 WORD $0x4ebe84e9 // add v9.4s, v7.4s, v30.4s
135 WORD $0x4ea21c44 // mov v4.16b, v2.16b
136 WORD $0x5e0a4062 // sha256h q2, q3, v10.4s
137 WORD $0x5e0a5083 // sha256h2 q3, q4, v10.4s
138 WORD $0x4ebf850a // add v10.4s, v8.4s, v31.4s
139 WORD $0x4ea21c44 // mov v4.16b, v2.16b
140 WORD $0x5e094062 // sha256h q2, q3, v9.4s
141 WORD $0x5e095083 // sha256h2 q3, q4, v9.4s
142 WORD $0x4ea21c44 // mov v4.16b, v2.16b
143 WORD $0x5e0a4062 // sha256h q2, q3, v10.4s
144 WORD $0x5e0a5083 // sha256h2 q3, q4, v10.4s
145 WORD $0x4ea38421 // add v1.4s, v1.4s, v3.4s
146 WORD $0x4ea28400 // add v0.4s, v0.4s, v2.4s
147
148 SUBS $64, R2
149 BPL loop
150
151 // Store result
152 WORD $0x4c00a800 // st1 {v0.4s, v1.4s}, [x0]
153
154complete:
155 RET
156
157// Constants table
158DATA ·constants+0x0(SB)/8, $0x71374491428a2f98
159DATA ·constants+0x8(SB)/8, $0xe9b5dba5b5c0fbcf
160DATA ·constants+0x10(SB)/8, $0x59f111f13956c25b
161DATA ·constants+0x18(SB)/8, $0xab1c5ed5923f82a4
162DATA ·constants+0x20(SB)/8, $0x12835b01d807aa98
163DATA ·constants+0x28(SB)/8, $0x550c7dc3243185be
164DATA ·constants+0x30(SB)/8, $0x80deb1fe72be5d74
165DATA ·constants+0x38(SB)/8, $0xc19bf1749bdc06a7
166DATA ·constants+0x40(SB)/8, $0xefbe4786e49b69c1
167DATA ·constants+0x48(SB)/8, $0x240ca1cc0fc19dc6
168DATA ·constants+0x50(SB)/8, $0x4a7484aa2de92c6f
169DATA ·constants+0x58(SB)/8, $0x76f988da5cb0a9dc
170DATA ·constants+0x60(SB)/8, $0xa831c66d983e5152
171DATA ·constants+0x68(SB)/8, $0xbf597fc7b00327c8
172DATA ·constants+0x70(SB)/8, $0xd5a79147c6e00bf3
173DATA ·constants+0x78(SB)/8, $0x1429296706ca6351
174DATA ·constants+0x80(SB)/8, $0x2e1b213827b70a85
175DATA ·constants+0x88(SB)/8, $0x53380d134d2c6dfc
176DATA ·constants+0x90(SB)/8, $0x766a0abb650a7354
177DATA ·constants+0x98(SB)/8, $0x92722c8581c2c92e
178DATA ·constants+0xa0(SB)/8, $0xa81a664ba2bfe8a1
179DATA ·constants+0xa8(SB)/8, $0xc76c51a3c24b8b70
180DATA ·constants+0xb0(SB)/8, $0xd6990624d192e819
181DATA ·constants+0xb8(SB)/8, $0x106aa070f40e3585
182DATA ·constants+0xc0(SB)/8, $0x1e376c0819a4c116
183DATA ·constants+0xc8(SB)/8, $0x34b0bcb52748774c
184DATA ·constants+0xd0(SB)/8, $0x4ed8aa4a391c0cb3
185DATA ·constants+0xd8(SB)/8, $0x682e6ff35b9cca4f
186DATA ·constants+0xe0(SB)/8, $0x78a5636f748f82ee
187DATA ·constants+0xe8(SB)/8, $0x8cc7020884c87814
188DATA ·constants+0xf0(SB)/8, $0xa4506ceb90befffa
189DATA ·constants+0xf8(SB)/8, $0xc67178f2bef9a3f7
190
191GLOBL ·constants(SB), 8, $256
192
diff --git a/vendor/github.com/minio/sha256-simd/sha256block_other.go b/vendor/github.com/minio/sha256-simd/sha256block_other.go
new file mode 100644
index 0000000..94d7eb0
--- /dev/null
+++ b/vendor/github.com/minio/sha256-simd/sha256block_other.go
@@ -0,0 +1,29 @@
1//go:build appengine || noasm || (!amd64 && !arm64) || !gc
2// +build appengine noasm !amd64,!arm64 !gc
3
4/*
5 * Minio Cloud Storage, (C) 2019 Minio, Inc.
6 *
7 * Licensed under the Apache License, Version 2.0 (the "License");
8 * you may not use this file except in compliance with the License.
9 * You may obtain a copy of the License at
10 *
11 * http://www.apache.org/licenses/LICENSE-2.0
12 *
13 * Unless required by applicable law or agreed to in writing, software
14 * distributed under the License is distributed on an "AS IS" BASIS,
15 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 * See the License for the specific language governing permissions and
17 * limitations under the License.
18 */
19
20package sha256
21
22func blockIntelShaGo(dig *digest, p []byte) {
23 panic("blockIntelShaGo called unexpectedly")
24
25}
26
27func blockArmSha2Go(dig *digest, p []byte) {
28 panic("blockArmSha2Go called unexpectedly")
29}
diff --git a/vendor/github.com/minio/sha256-simd/test-architectures.sh b/vendor/github.com/minio/sha256-simd/test-architectures.sh
new file mode 100644
index 0000000..50150ea
--- /dev/null
+++ b/vendor/github.com/minio/sha256-simd/test-architectures.sh
@@ -0,0 +1,15 @@
1#!/bin/sh
2
3set -e
4
5go tool dist list | while IFS=/ read os arch; do
6 echo "Checking $os/$arch..."
7 echo " normal"
8 GOARCH=$arch GOOS=$os go build -o /dev/null ./...
9 echo " noasm"
10 GOARCH=$arch GOOS=$os go build -tags noasm -o /dev/null ./...
11 echo " appengine"
12 GOARCH=$arch GOOS=$os go build -tags appengine -o /dev/null ./...
13 echo " noasm,appengine"
14 GOARCH=$arch GOOS=$os go build -tags 'appengine noasm' -o /dev/null ./...
15done